diff --git a/README.md b/README.md index 105672a3ee9092b65866110640006800c0b4a45f..302c14e81afb3073c88d6588461d5a83fdf60c79 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,14 @@ --- title: PanDA Panoramic Depth Estimation -emoji: 🐨 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 5.20.1 +emoji: 🔥 +colorFrom: green +colorTo: red +sdk: streamlit +sdk_version: 1.43.1 app_file: app.py pinned: false license: apache-2.0 -short_description: PanDA +short_description: PanDA, a panoramic monocular depth estimation method --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..91c74a188f2d5b762e0461f40960e3e690957bff --- /dev/null +++ b/app.py @@ -0,0 +1,99 @@ +import glob +import gradio as gr +import matplotlib +import numpy as np +from PIL import Image +import torch +import torch.nn as nn +import tempfile +from gradio_imageslider import ImageSlider +import yaml +import os + +from networks.models import * +from depth_anything_utils import Resize, NormalizeImage, PrepareForNet + +os.environ["GRADIO_TEMP_DIR"] = "/hpc2hdd/home/zcao740/Documents/Github/PanDA/tmp/gradio" + +css = """ +#img-display-container { + max-height: 100vh; +} +#img-display-input { + max-height: 80vh; +} +#img-display-output { + max-height: 80vh; +} +#download { + height: 62px; +} +""" +DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' +config = 'config/inference/panda_large.yaml' +with open(config, 'r') as f: + config = yaml.load(f, Loader=yaml.FullLoader) +state_dict = torch.load(os.path.join(config["load_weights_dir"], 'model.pth'), map_location="cpu") +model = make(config['model']) +if any(key.startswith('module') for key in state_dict.keys()): + model = nn.DataParallel(model) +model_state_dict = model.state_dict() +model.load_state_dict({k: v for k, v in state_dict.items() if k in model_state_dict}) +model = model.to(DEVICE).eval() + +title = "# PanDA" +description = """Official demo for **PanDA**. +Please refer to our [github](https://github.com/caozidong/PanDA) for more details.""" + +def predict_depth(image): + return model.infer_image(image) + +with gr.Blocks(css=css) as demo: + gr.Markdown(title) + gr.Markdown(description) + gr.Markdown("### Depth Prediction demo") + + with gr.Row(): + input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input') + depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5) + submit = gr.Button(value="Compute Depth") + gray_depth_file = gr.File(label="Grayscale depth map", elem_id="download",) + raw_file = gr.File(label="16-bit raw output (can be considered as disparity)", elem_id="download",) + + cmap = matplotlib.colormaps.get_cmap('Spectral_r') + + def on_submit(image): + original_image = image.copy() + + h, w = image.shape[:2] + + depth = predict_depth(image[:, :, ::-1]) + + raw_depth = Image.fromarray(depth.astype('uint16')) + tmp_raw_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False) + raw_depth.save(tmp_raw_depth.name) + + depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0 + depth = depth.astype(np.uint8) + colored_depth = (cmap(depth)[:, :, :3] * 255).astype(np.uint8) + + gray_depth = Image.fromarray(depth) + tmp_gray_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False) + gray_depth.save(tmp_gray_depth.name) + + return [(original_image, colored_depth), tmp_gray_depth.name, tmp_raw_depth.name] + + submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, gray_depth_file, raw_file]) + + example_files = glob.glob('erp_samples/*') + examples = gr.Examples( + examples=example_files, + inputs=[input_image], + outputs=[depth_image_slider, gray_depth_file, raw_file], + fn=on_submit + ) + + +if __name__ == '__main__': + port = 18001 + demo.queue().launch(port=port) \ No newline at end of file diff --git a/depth_anything_utils.py b/depth_anything_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c1911789cd7ab307d9bed14217811a90d795e614 --- /dev/null +++ b/depth_anything_utils.py @@ -0,0 +1,249 @@ +import os +import random +from PIL import Image, ImageOps, ImageFilter +import torch +from torchvision import transforms +import torch.nn.functional as F + +import numpy as np +import cv2 +import math + + +def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): + """Rezise the sample to ensure the given size. Keeps aspect ratio. + + Args: + sample (dict): sample + size (tuple): image size + + Returns: + tuple: new size + """ + shape = list(sample["disparity"].shape) + + if shape[0] >= size[0] and shape[1] >= size[1]: + return sample + + scale = [0, 0] + scale[0] = size[0] / shape[0] + scale[1] = size[1] / shape[1] + + scale = max(scale) + + shape[0] = math.ceil(scale * shape[0]) + shape[1] = math.ceil(scale * shape[1]) + + # resize + sample["image"] = cv2.resize( + sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method + ) + + sample["disparity"] = cv2.resize( + sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST + ) + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + tuple(shape[::-1]), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return tuple(shape) + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + image_interpolation_method=cv2.INTER_AREA, + ): + """Init. + + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + self.__width = width + self.__height = height + + self.__resize_target = resize_target + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + self.__image_interpolation_method = image_interpolation_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, sample): + width, height = self.get_size( + sample["image"].shape[1], sample["image"].shape[0] + ) + + # resize sample + sample["image"] = cv2.resize( + sample["image"], + (width, height), + interpolation=self.__image_interpolation_method, + ) + + if self.__resize_target: + if "disparity" in sample: + sample["disparity"] = cv2.resize( + sample["disparity"], + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + + if "depth" in sample: + sample["depth"] = cv2.resize( + sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST + ) + + if "semseg_mask" in sample: + # sample["semseg_mask"] = cv2.resize( + # sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST + # ) + sample["semseg_mask"] = F.interpolate(torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode='nearest').numpy()[0, 0] + + if "mask" in sample: + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + # sample["mask"] = sample["mask"].astype(bool) + + # print(sample['image'].shape, sample['depth'].shape) + return sample + + +class NormalizeImage(object): + """Normlize image by given mean and std. + """ + + def __init__(self, mean, std): + self.__mean = mean + self.__std = std + + def __call__(self, sample): + sample["image"] = (sample["image"] - self.__mean) / self.__std + + return sample + + +class PrepareForNet(object): + """Prepare sample for usage as network input. + """ + + def __init__(self): + pass + + def __call__(self, sample): + image = np.transpose(sample["image"], (2, 0, 1)) + sample["image"] = np.ascontiguousarray(image).astype(np.float32) + + if "mask" in sample: + sample["mask"] = sample["mask"].astype(np.float32) + sample["mask"] = np.ascontiguousarray(sample["mask"]) + + if "depth" in sample: + depth = sample["depth"].astype(np.float32) + sample["depth"] = np.ascontiguousarray(depth) + + if "semseg_mask" in sample: + sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32) + sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"]) + + return sample \ No newline at end of file diff --git a/depth_anything_v2_metric/README.md b/depth_anything_v2_metric/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fc84a2f050a63219dd1193b12b0651bd0ab0ced5 --- /dev/null +++ b/depth_anything_v2_metric/README.md @@ -0,0 +1,114 @@ +# Depth Anything V2 for Metric Depth Estimation + +![teaser](./assets/compare_zoedepth.png) + +We here provide a simple codebase to fine-tune our Depth Anything V2 pre-trained encoder for metric depth estimation. Built on our powerful encoder, we use a simple DPT head to regress the depth. We fine-tune our pre-trained encoder on synthetic Hypersim / Virtual KITTI datasets for indoor / outdoor metric depth estimation, respectively. + + +# Pre-trained Models + +We provide **six metric depth models** of three scales for indoor and outdoor scenes, respectively. + +| Base Model | Params | Indoor (Hypersim) | Outdoor (Virtual KITTI 2) | +|:-|-:|:-:|:-:| +| Depth-Anything-V2-Small | 24.8M | [Download](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-Hypersim-Small/resolve/main/depth_anything_v2_metric_hypersim_vits.pth?download=true) | [Download](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-VKITTI-Small/resolve/main/depth_anything_v2_metric_vkitti_vits.pth?download=true) | +| Depth-Anything-V2-Base | 97.5M | [Download](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-Hypersim-Base/resolve/main/depth_anything_v2_metric_hypersim_vitb.pth?download=true) | [Download](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-VKITTI-Base/resolve/main/depth_anything_v2_metric_vkitti_vitb.pth?download=true) | +| Depth-Anything-V2-Large | 335.3M | [Download](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-Hypersim-Large/resolve/main/depth_anything_v2_metric_hypersim_vitl.pth?download=true) | [Download](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-VKITTI-Large/resolve/main/depth_anything_v2_metric_vkitti_vitl.pth?download=true) | + +*We recommend to first try our larger models (if computational cost is affordable) and the indoor version.* + +## Usage + +### Prepraration + +```bash +git clone https://github.com/DepthAnything/Depth-Anything-V2 +cd Depth-Anything-V2/metric_depth +pip install -r requirements.txt +``` + +Download the checkpoints listed [here](#pre-trained-models) and put them under the `checkpoints` directory. + +### Use our models +```python +import cv2 +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + +model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]} +} + +encoder = 'vitl' # or 'vits', 'vitb' +dataset = 'hypersim' # 'hypersim' for indoor model, 'vkitti' for outdoor model +max_depth = 20 # 20 for indoor model, 80 for outdoor model + +model = DepthAnythingV2(**{**model_configs[encoder], 'max_depth': max_depth}) +model.load_state_dict(torch.load(f'checkpoints/depth_anything_v2_metric_{dataset}_{encoder}.pth', map_location='cpu')) +model.eval() + +raw_img = cv2.imread('your/image/path') +depth = model.infer_image(raw_img) # HxW depth map in meters in numpy +``` + +### Running script on images + +Here, we take the `vitl` encoder as an example. You can also use `vitb` or `vits` encoders. + +```bash +# indoor scenes +python run.py \ + --encoder vitl \ + --load-from checkpoints/depth_anything_v2_metric_hypersim_vitl.pth \ + --max-depth 20 \ + --img-path --outdir [--input-size ] [--save-numpy] + +# outdoor scenes +python run.py \ + --encoder vitl \ + --load-from checkpoints/depth_anything_v2_metric_vkitti_vitl.pth \ + --max-depth 80 \ + --img-path --outdir [--input-size ] [--save-numpy] +``` + +### Project 2D images to point clouds: + +```bash +python depth_to_pointcloud.py \ + --encoder vitl \ + --load-from checkpoints/depth_anything_v2_metric_hypersim_vitl.pth \ + --max-depth 20 \ + --img-path --outdir +``` + +### Reproduce training + +Please first prepare the [Hypersim](https://github.com/apple/ml-hypersim) and [Virtual KITTI 2](https://europe.naverlabs.com/research/computer-vision/proxy-virtual-worlds-vkitti-2/) datasets. Then: + +```bash +bash dist_train.sh +``` + + +## Citation + +If you find this project useful, please consider citing: + +```bibtex +@article{depth_anything_v2, + title={Depth Anything V2}, + author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Zhao, Zhen and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang}, + journal={arXiv:2406.09414}, + year={2024} +} + +@inproceedings{depth_anything_v1, + title={Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data}, + author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang}, + booktitle={CVPR}, + year={2024} +} +``` diff --git a/depth_anything_v2_metric/depth_anything_v2/__pycache__/dinov2.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dinov2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d60803f78d222100778dad4df6c00f7bfe788f4 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dinov2.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/__pycache__/dinov2.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dinov2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c8d82e41114c5331f9ea66c322d4238c32c8d68 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dinov2.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/__pycache__/dpt.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faecd01394f41b13ef170eaac782711d9ee6faa2 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dpt.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/__pycache__/dpt.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dpt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e96a8830669591187c4f6857bb8a3c50a937be25 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/__pycache__/dpt.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2.py b/depth_anything_v2_metric/depth_anything_v2/dinov2.py new file mode 100644 index 0000000000000000000000000000000000000000..ec4499a18330523aa3564b16be70e813de000c94 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2.py @@ -0,0 +1,415 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the Apache License, Version 2.0 +# found in the LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py + +from functools import partial +import math +import logging +from typing import Sequence, Tuple, Union, Callable + +import torch +import torch.nn as nn +import torch.utils.checkpoint +from torch.nn.init import trunc_normal_ + +from .dinov2_layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block + + +logger = logging.getLogger("dinov2") + + +def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = ".".join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +class BlockChunk(nn.ModuleList): + def forward(self, x): + for b in self: + x = b(x) + return x + + +class DinoVisionTransformer(nn.Module): + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + ffn_bias=True, + proj_bias=True, + drop_path_rate=0.0, + drop_path_uniform=False, + init_values=None, # for layerscale: None or 0 => no layerscale + embed_layer=PatchEmbed, + act_layer=nn.GELU, + block_fn=Block, + ffn_layer="mlp", + block_chunks=1, + num_register_tokens=0, + interpolate_antialias=False, + interpolate_offset=0.1, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + proj_bias (bool): enable bias for proj in attn if True + ffn_bias (bool): enable bias for ffn if True + drop_path_rate (float): stochastic depth rate + drop_path_uniform (bool): apply uniform drop rate across blocks + weight_init (str): weight init scheme + init_values (float): layer-scale init values + embed_layer (nn.Module): patch embedding layer + act_layer (nn.Module): MLP activation layer + block_fn (nn.Module): transformer block class + ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity" + block_chunks: (int) split block sequence into block_chunks units for FSDP wrap + num_register_tokens: (int) number of extra cls tokens (so-called "registers") + interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings + interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings + """ + super().__init__() + norm_layer = partial(nn.LayerNorm, eps=1e-6) + + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 1 + self.n_blocks = depth + self.num_heads = num_heads + self.patch_size = patch_size + self.num_register_tokens = num_register_tokens + self.interpolate_antialias = interpolate_antialias + self.interpolate_offset = interpolate_offset + + self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + assert num_register_tokens >= 0 + self.register_tokens = ( + nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None + ) + + if drop_path_uniform is True: + dpr = [drop_path_rate] * depth + else: + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + + if ffn_layer == "mlp": + logger.info("using MLP layer as FFN") + ffn_layer = Mlp + elif ffn_layer == "swiglufused" or ffn_layer == "swiglu": + logger.info("using SwiGLU layer as FFN") + ffn_layer = SwiGLUFFNFused + elif ffn_layer == "identity": + logger.info("using Identity layer as FFN") + + def f(*args, **kwargs): + return nn.Identity() + + ffn_layer = f + else: + raise NotImplementedError + + blocks_list = [ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + ffn_bias=ffn_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + ffn_layer=ffn_layer, + init_values=init_values, + ) + for i in range(depth) + ] + if block_chunks > 0: + self.chunked_blocks = True + chunked_blocks = [] + chunksize = depth // block_chunks + for i in range(0, depth, chunksize): + # this is to keep the block index consistent if we chunk the block list + chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize]) + self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks]) + else: + self.chunked_blocks = False + self.blocks = nn.ModuleList(blocks_list) + + self.norm = norm_layer(embed_dim) + self.head = nn.Identity() + + self.mask_token = nn.Parameter(torch.zeros(1, embed_dim)) + + self.init_weights() + + def init_weights(self): + trunc_normal_(self.pos_embed, std=0.02) + nn.init.normal_(self.cls_token, std=1e-6) + if self.register_tokens is not None: + nn.init.normal_(self.register_tokens, std=1e-6) + named_apply(init_weights_vit_timm, self) + + def interpolate_pos_encoding(self, x, w, h): + previous_dtype = x.dtype + npatch = x.shape[1] - 1 + N = self.pos_embed.shape[1] - 1 + if npatch == N and w == h: + return self.pos_embed + pos_embed = self.pos_embed.float() + class_pos_embed = pos_embed[:, 0] + patch_pos_embed = pos_embed[:, 1:] + dim = x.shape[-1] + w0 = w // self.patch_size + h0 = h // self.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + # DINOv2 with register modify the interpolate_offset from 0.1 to 0.0 + w0, h0 = w0 + self.interpolate_offset, h0 + self.interpolate_offset + # w0, h0 = w0 + 0.1, h0 + 0.1 + + sqrt_N = math.sqrt(N) + sx, sy = float(w0) / sqrt_N, float(h0) / sqrt_N + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.reshape(1, int(sqrt_N), int(sqrt_N), dim).permute(0, 3, 1, 2), + scale_factor=(sx, sy), + # (int(w0), int(h0)), # to solve the upsampling shape issue + mode="bicubic", + antialias=self.interpolate_antialias + ) + + assert int(w0) == patch_pos_embed.shape[-2] + assert int(h0) == patch_pos_embed.shape[-1] + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype) + + def prepare_tokens_with_masks(self, x, masks=None): + B, nc, w, h = x.shape + x = self.patch_embed(x) + if masks is not None: + x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x) + + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + x = x + self.interpolate_pos_encoding(x, w, h) + + if self.register_tokens is not None: + x = torch.cat( + ( + x[:, :1], + self.register_tokens.expand(x.shape[0], -1, -1), + x[:, 1:], + ), + dim=1, + ) + + return x + + def forward_features_list(self, x_list, masks_list): + x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)] + for blk in self.blocks: + x = blk(x) + + all_x = x + output = [] + for x, masks in zip(all_x, masks_list): + x_norm = self.norm(x) + output.append( + { + "x_norm_clstoken": x_norm[:, 0], + "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1], + "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :], + "x_prenorm": x, + "masks": masks, + } + ) + return output + + def forward_features(self, x, masks=None): + if isinstance(x, list): + return self.forward_features_list(x, masks) + + x = self.prepare_tokens_with_masks(x, masks) + + for blk in self.blocks: + x = blk(x) + + x_norm = self.norm(x) + return { + "x_norm_clstoken": x_norm[:, 0], + "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1], + "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :], + "x_prenorm": x, + "masks": masks, + } + + def _get_intermediate_layers_not_chunked(self, x, n=1): + x = self.prepare_tokens_with_masks(x) + # If n is an int, take the n last blocks. If it's a list, take them + output, total_block_len = [], len(self.blocks) + blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n + for i, blk in enumerate(self.blocks): + x = blk(x) + if i in blocks_to_take: + output.append(x) + assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" + return output + + def _get_intermediate_layers_chunked(self, x, n=1): + x = self.prepare_tokens_with_masks(x) + output, i, total_block_len = [], 0, len(self.blocks[-1]) + # If n is an int, take the n last blocks. If it's a list, take them + blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n + for block_chunk in self.blocks: + for blk in block_chunk[i:]: # Passing the nn.Identity() + x = blk(x) + if i in blocks_to_take: + output.append(x) + i += 1 + assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" + return output + + def get_intermediate_layers( + self, + x: torch.Tensor, + n: Union[int, Sequence] = 1, # Layers or n last layers to take + reshape: bool = False, + return_class_token: bool = False, + norm=True + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]: + if self.chunked_blocks: + outputs = self._get_intermediate_layers_chunked(x, n) + else: + outputs = self._get_intermediate_layers_not_chunked(x, n) + if norm: + outputs = [self.norm(out) for out in outputs] + class_tokens = [out[:, 0] for out in outputs] + outputs = [out[:, 1 + self.num_register_tokens:] for out in outputs] + if reshape: + B, _, w, h = x.shape + outputs = [ + out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous() + for out in outputs + ] + if return_class_token: + return tuple(zip(outputs, class_tokens)) + return tuple(outputs) + + def forward(self, *args, is_training=False, **kwargs): + ret = self.forward_features(*args, **kwargs) + if is_training: + return ret + else: + return self.head(ret["x_norm_clstoken"]) + + +def init_weights_vit_timm(module: nn.Module, name: str = ""): + """ViT weight initialization, original timm impl (for reproducibility)""" + if isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=0.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +def vit_small(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_base(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_large(patch_size=16, num_register_tokens=0, **kwargs): + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs): + """ + Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64 + """ + model = DinoVisionTransformer( + patch_size=patch_size, + embed_dim=1536, + depth=40, + num_heads=24, + mlp_ratio=4, + block_fn=partial(Block, attn_class=MemEffAttention), + num_register_tokens=num_register_tokens, + **kwargs, + ) + return model + + +def DINOv2(model_name): + model_zoo = { + "vits": vit_small, + "vitb": vit_base, + "vitl": vit_large, + "vitg": vit_giant2 + } + + return model_zoo[model_name]( + img_size=518, + patch_size=14, + init_values=1.0, + ffn_layer="mlp" if model_name != "vitg" else "swiglufused", + block_chunks=0, + num_register_tokens=0, + interpolate_antialias=False, + interpolate_offset=0.1 + ) \ No newline at end of file diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__init__.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8120f4bc83066cb3f825ce32daa3b437f88486f1 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .mlp import Mlp +from .patch_embed import PatchEmbed +from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused +from .block import NestedTensorBlock +from .attention import MemEffAttention diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93556c23675b325cd16650711f8f60b1b0997708 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aa75eb24404ede44372b4b9669b241985e58d86 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/__init__.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..257a004eff694e9ff9ad843c9b329da84bc0d35b Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3435b2ce29a2ed5cb14c78a6401a185649978b0f Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/attention.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b7a032e12abb48c5b27e7f50da4d806fa87ff2 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7afe7f59fa199ae6da1d61e20f6b05f247a9eae8 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/block.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9753f19bb6ac0921e4bb19524bed1c10d229531f Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0028035765e3089d7d32df41def27d3e65e8ec90 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/drop_path.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b2f7760ca2828d1c97c341e913ab3cc4558da7a Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e0f8d22dcfd700301fc4efbacaaf42a1e23a58b Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/layer_scale.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e9b697967e85aad5770eb0cbbc06215ca307669 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efa2a5cc08030a5c616c3a51f928ab89dc1b6e09 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/mlp.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff8e25107e3cbb70699a4a85921ae24f549197c1 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7897f5e03418805e5af92810ec138cec48bf152 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/patch_embed.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4b29d6315fbfc4376d7670e7e46e6e75d6cd7e5 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce40efde7fb2496d76ec708cdf003ba9465df658 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/__pycache__/swiglu_ffn.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/attention.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..815a2bf53dbec496f6a184ed7d03bcecb7124262 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/attention.py @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py + +import logging + +from torch import Tensor +from torch import nn + + +logger = logging.getLogger("dinov2") + + +try: + from xformers.ops import memory_efficient_attention, unbind, fmha + + XFORMERS_AVAILABLE = True +except ImportError: + logger.warning("xFormers not available") + XFORMERS_AVAILABLE = False + + +class Attention(nn.Module): + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + proj_bias: bool = True, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ) -> None: + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x: Tensor) -> Tensor: + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + attn = q @ k.transpose(-2, -1) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class MemEffAttention(Attention): + def forward(self, x: Tensor, attn_bias=None) -> Tensor: + if not XFORMERS_AVAILABLE: + assert attn_bias is None, "xFormers is required for nested tensors usage" + return super().forward(x) + + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads) + + q, k, v = unbind(qkv, 2) + + x = memory_efficient_attention(q, k, v, attn_bias=attn_bias) + x = x.reshape([B, N, C]) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + \ No newline at end of file diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/block.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/block.py new file mode 100644 index 0000000000000000000000000000000000000000..25488f57cc0ad3c692f86b62555f6668e2a66db1 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/block.py @@ -0,0 +1,252 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py + +import logging +from typing import Callable, List, Any, Tuple, Dict + +import torch +from torch import nn, Tensor + +from .attention import Attention, MemEffAttention +from .drop_path import DropPath +from .layer_scale import LayerScale +from .mlp import Mlp + + +logger = logging.getLogger("dinov2") + + +try: + from xformers.ops import fmha + from xformers.ops import scaled_index_add, index_select_cat + + XFORMERS_AVAILABLE = True +except ImportError: + logger.warning("xFormers not available") + XFORMERS_AVAILABLE = False + + +class Block(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = False, + proj_bias: bool = True, + ffn_bias: bool = True, + drop: float = 0.0, + attn_drop: float = 0.0, + init_values=None, + drop_path: float = 0.0, + act_layer: Callable[..., nn.Module] = nn.GELU, + norm_layer: Callable[..., nn.Module] = nn.LayerNorm, + attn_class: Callable[..., nn.Module] = Attention, + ffn_layer: Callable[..., nn.Module] = Mlp, + ) -> None: + super().__init__() + # print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}") + self.norm1 = norm_layer(dim) + self.attn = attn_class( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + proj_bias=proj_bias, + attn_drop=attn_drop, + proj_drop=drop, + ) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = ffn_layer( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + bias=ffn_bias, + ) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.sample_drop_ratio = drop_path + + def forward(self, x: Tensor) -> Tensor: + def attn_residual_func(x: Tensor) -> Tensor: + return self.ls1(self.attn(self.norm1(x))) + + def ffn_residual_func(x: Tensor) -> Tensor: + return self.ls2(self.mlp(self.norm2(x))) + + if self.training and self.sample_drop_ratio > 0.1: + # the overhead is compensated only for a drop path rate larger than 0.1 + x = drop_add_residual_stochastic_depth( + x, + residual_func=attn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + ) + x = drop_add_residual_stochastic_depth( + x, + residual_func=ffn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + ) + elif self.training and self.sample_drop_ratio > 0.0: + x = x + self.drop_path1(attn_residual_func(x)) + x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2 + else: + x = x + attn_residual_func(x) + x = x + ffn_residual_func(x) + return x + + +def drop_add_residual_stochastic_depth( + x: Tensor, + residual_func: Callable[[Tensor], Tensor], + sample_drop_ratio: float = 0.0, +) -> Tensor: + # 1) extract subset using permutation + b, n, d = x.shape + sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) + brange = (torch.randperm(b, device=x.device))[:sample_subset_size] + x_subset = x[brange] + + # 2) apply residual_func to get residual + residual = residual_func(x_subset) + + x_flat = x.flatten(1) + residual = residual.flatten(1) + + residual_scale_factor = b / sample_subset_size + + # 3) add the residual + x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) + return x_plus_residual.view_as(x) + + +def get_branges_scales(x, sample_drop_ratio=0.0): + b, n, d = x.shape + sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1) + brange = (torch.randperm(b, device=x.device))[:sample_subset_size] + residual_scale_factor = b / sample_subset_size + return brange, residual_scale_factor + + +def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None): + if scaling_vector is None: + x_flat = x.flatten(1) + residual = residual.flatten(1) + x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor) + else: + x_plus_residual = scaled_index_add( + x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor + ) + return x_plus_residual + + +attn_bias_cache: Dict[Tuple, Any] = {} + + +def get_attn_bias_and_cat(x_list, branges=None): + """ + this will perform the index select, cat the tensors, and provide the attn_bias from cache + """ + batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list] + all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list)) + if all_shapes not in attn_bias_cache.keys(): + seqlens = [] + for b, x in zip(batch_sizes, x_list): + for _ in range(b): + seqlens.append(x.shape[1]) + attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens) + attn_bias._batch_sizes = batch_sizes + attn_bias_cache[all_shapes] = attn_bias + + if branges is not None: + cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1]) + else: + tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list) + cat_tensors = torch.cat(tensors_bs1, dim=1) + + return attn_bias_cache[all_shapes], cat_tensors + + +def drop_add_residual_stochastic_depth_list( + x_list: List[Tensor], + residual_func: Callable[[Tensor, Any], Tensor], + sample_drop_ratio: float = 0.0, + scaling_vector=None, +) -> Tensor: + # 1) generate random set of indices for dropping samples in the batch + branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list] + branges = [s[0] for s in branges_scales] + residual_scale_factors = [s[1] for s in branges_scales] + + # 2) get attention bias and index+concat the tensors + attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges) + + # 3) apply residual_func to get residual, and split the result + residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore + + outputs = [] + for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors): + outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x)) + return outputs + + +class NestedTensorBlock(Block): + def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]: + """ + x_list contains a list of tensors to nest together and run + """ + assert isinstance(self.attn, MemEffAttention) + + if self.training and self.sample_drop_ratio > 0.0: + + def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.attn(self.norm1(x), attn_bias=attn_bias) + + def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.mlp(self.norm2(x)) + + x_list = drop_add_residual_stochastic_depth_list( + x_list, + residual_func=attn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None, + ) + x_list = drop_add_residual_stochastic_depth_list( + x_list, + residual_func=ffn_residual_func, + sample_drop_ratio=self.sample_drop_ratio, + scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None, + ) + return x_list + else: + + def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias)) + + def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor: + return self.ls2(self.mlp(self.norm2(x))) + + attn_bias, x = get_attn_bias_and_cat(x_list) + x = x + attn_residual_func(x, attn_bias=attn_bias) + x = x + ffn_residual_func(x) + return attn_bias.split(x) + + def forward(self, x_or_x_list): + if isinstance(x_or_x_list, Tensor): + return super().forward(x_or_x_list) + elif isinstance(x_or_x_list, list): + assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage" + return self.forward_nested(x_or_x_list) + else: + raise AssertionError diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/drop_path.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/drop_path.py new file mode 100644 index 0000000000000000000000000000000000000000..af05625984dd14682cc96a63bf0c97bab1f123b1 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/drop_path.py @@ -0,0 +1,35 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py + + +from torch import nn + + +def drop_path(x, drop_prob: float = 0.0, training: bool = False): + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0: + random_tensor.div_(keep_prob) + output = x * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/layer_scale.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/layer_scale.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5daa52bd81d3581adeb2198ea5b7dba2a3aea1 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/layer_scale.py @@ -0,0 +1,28 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110 + +from typing import Union + +import torch +from torch import Tensor +from torch import nn + + +class LayerScale(nn.Module): + def __init__( + self, + dim: int, + init_values: Union[float, Tensor] = 1e-5, + inplace: bool = False, + ) -> None: + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x: Tensor) -> Tensor: + return x.mul_(self.gamma) if self.inplace else x * self.gamma diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/mlp.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..5e4b315f972f9a9f54aef1e4ef4e81b52976f018 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/mlp.py @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/mlp.py + + +from typing import Callable, Optional + +from torch import Tensor, nn + + +class Mlp(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = nn.GELU, + drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias) + self.drop = nn.Dropout(drop) + + def forward(self, x: Tensor) -> Tensor: + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/patch_embed.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/patch_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..574abe41175568d700a389b8b96d1ba554914779 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/patch_embed.py @@ -0,0 +1,89 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +# References: +# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py + +from typing import Callable, Optional, Tuple, Union + +from torch import Tensor +import torch.nn as nn + + +def make_2tuple(x): + if isinstance(x, tuple): + assert len(x) == 2 + return x + + assert isinstance(x, int) + return (x, x) + + +class PatchEmbed(nn.Module): + """ + 2D image to patch embedding: (B,C,H,W) -> (B,N,D) + + Args: + img_size: Image size. + patch_size: Patch token size. + in_chans: Number of input image channels. + embed_dim: Number of linear projection output channels. + norm_layer: Normalization layer. + """ + + def __init__( + self, + img_size: Union[int, Tuple[int, int]] = 224, + patch_size: Union[int, Tuple[int, int]] = 16, + in_chans: int = 3, + embed_dim: int = 768, + norm_layer: Optional[Callable] = None, + flatten_embedding: bool = True, + ) -> None: + super().__init__() + + image_HW = make_2tuple(img_size) + patch_HW = make_2tuple(patch_size) + patch_grid_size = ( + image_HW[0] // patch_HW[0], + image_HW[1] // patch_HW[1], + ) + + self.img_size = image_HW + self.patch_size = patch_HW + self.patches_resolution = patch_grid_size + self.num_patches = patch_grid_size[0] * patch_grid_size[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.flatten_embedding = flatten_embedding + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x: Tensor) -> Tensor: + _, _, H, W = x.shape + patch_H, patch_W = self.patch_size + + assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}" + assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}" + + x = self.proj(x) # B C H W + H, W = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) # B HW C + x = self.norm(x) + if not self.flatten_embedding: + x = x.reshape(-1, H, W, self.embed_dim) # B H W C + return x + + def flops(self) -> float: + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops diff --git a/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/swiglu_ffn.py b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/swiglu_ffn.py new file mode 100644 index 0000000000000000000000000000000000000000..b3324b266fb0a50ccf8c3a0ede2ae10ac4dfa03e --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dinov2_layers/swiglu_ffn.py @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Callable, Optional + +from torch import Tensor, nn +import torch.nn.functional as F + + +class SwiGLUFFN(nn.Module): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = None, + drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias) + self.w3 = nn.Linear(hidden_features, out_features, bias=bias) + + def forward(self, x: Tensor) -> Tensor: + x12 = self.w12(x) + x1, x2 = x12.chunk(2, dim=-1) + hidden = F.silu(x1) * x2 + return self.w3(hidden) + + +try: + from xformers.ops import SwiGLU + + XFORMERS_AVAILABLE = True +except ImportError: + SwiGLU = SwiGLUFFN + XFORMERS_AVAILABLE = False + + +class SwiGLUFFNFused(SwiGLU): + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_layer: Callable[..., nn.Module] = None, + drop: float = 0.0, + bias: bool = True, + ) -> None: + out_features = out_features or in_features + hidden_features = hidden_features or in_features + hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 + super().__init__( + in_features=in_features, + hidden_features=hidden_features, + out_features=out_features, + bias=bias, + ) diff --git a/depth_anything_v2_metric/depth_anything_v2/dpt.py b/depth_anything_v2_metric/depth_anything_v2/dpt.py new file mode 100644 index 0000000000000000000000000000000000000000..833447e19e0f4c0037d49dffed1713ce1c062956 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/dpt.py @@ -0,0 +1,222 @@ +import cv2 +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.transforms import Compose + +from .dinov2 import DINOv2 +from .util.blocks import FeatureFusionBlock, _make_scratch +from .util.transform import Resize, NormalizeImage, PrepareForNet + + +def _make_fusion_block(features, use_bn, size=None): + return FeatureFusionBlock( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + size=size, + ) + + +class ConvBlock(nn.Module): + def __init__(self, in_feature, out_feature): + super().__init__() + + self.conv_block = nn.Sequential( + nn.Conv2d(in_feature, out_feature, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(out_feature), + nn.ReLU(True) + ) + + def forward(self, x): + return self.conv_block(x) + + +class DPTHead(nn.Module): + def __init__( + self, + in_channels, + features=256, + use_bn=False, + out_channels=[256, 512, 1024, 1024], + use_clstoken=False + ): + super(DPTHead, self).__init__() + + self.use_clstoken = use_clstoken + + self.projects = nn.ModuleList([ + nn.Conv2d( + in_channels=in_channels, + out_channels=out_channel, + kernel_size=1, + stride=1, + padding=0, + ) for out_channel in out_channels + ]) + + self.resize_layers = nn.ModuleList([ + nn.ConvTranspose2d( + in_channels=out_channels[0], + out_channels=out_channels[0], + kernel_size=4, + stride=4, + padding=0), + nn.ConvTranspose2d( + in_channels=out_channels[1], + out_channels=out_channels[1], + kernel_size=2, + stride=2, + padding=0), + nn.Identity(), + nn.Conv2d( + in_channels=out_channels[3], + out_channels=out_channels[3], + kernel_size=3, + stride=2, + padding=1) + ]) + + if use_clstoken: + self.readout_projects = nn.ModuleList() + for _ in range(len(self.projects)): + self.readout_projects.append( + nn.Sequential( + nn.Linear(2 * in_channels, in_channels), + nn.GELU())) + + self.scratch = _make_scratch( + out_channels, + features, + groups=1, + expand=False, + ) + + self.scratch.stem_transpose = None + + self.scratch.refinenet1 = _make_fusion_block(features, use_bn) + self.scratch.refinenet2 = _make_fusion_block(features, use_bn) + self.scratch.refinenet3 = _make_fusion_block(features, use_bn) + self.scratch.refinenet4 = _make_fusion_block(features, use_bn) + + head_features_1 = features + head_features_2 = 32 + + self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1) + self.scratch.output_conv2 = nn.Sequential( + nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), + nn.Sigmoid() + ) + + def forward(self, out_features, patch_h, patch_w): + out = [] + for i, x in enumerate(out_features): + if self.use_clstoken: + x, cls_token = x[0], x[1] + readout = cls_token.unsqueeze(1).expand_as(x) + x = self.readout_projects[i](torch.cat((x, readout), -1)) + else: + x = x[0] + + x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w)) + + x = self.projects[i](x) + x = self.resize_layers[i](x) + + out.append(x) + + layer_1, layer_2, layer_3, layer_4 = out + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:]) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:]) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv1(path_1) + out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True) + out = self.scratch.output_conv2(out) + + return out + + +class DepthAnythingV2(nn.Module): + def __init__( + self, + encoder='vitl', + features=256, + out_channels=[256, 512, 1024, 1024], + use_bn=False, + use_clstoken=False, + max_depth=20.0 + ): + super(DepthAnythingV2, self).__init__() + + self.intermediate_layer_idx = { + 'vits': [2, 5, 8, 11], + 'vitb': [2, 5, 8, 11], + 'vitl': [4, 11, 17, 23], + 'vitg': [9, 19, 29, 39] + } + + self.max_depth = max_depth + + self.encoder = encoder + self.pretrained = DINOv2(model_name=encoder) + + self.depth_head = DPTHead(self.pretrained.embed_dim, features, use_bn, out_channels=out_channels, use_clstoken=use_clstoken) + + def forward(self, x): + patch_h, patch_w = x.shape[-2] // 14, x.shape[-1] // 14 + + features = self.pretrained.get_intermediate_layers(x, self.intermediate_layer_idx[self.encoder], return_class_token=True) + + depth = self.depth_head(features, patch_h, patch_w) * self.max_depth + + return depth.squeeze(1) + + @torch.no_grad() + def infer_image(self, raw_image, input_size=518): + image, (h, w) = self.image2tensor(raw_image, input_size) + print(image.shape) + depth = self.forward(image) + print(depth.shape) + depth = F.interpolate(depth[:, None], (h, w), mode="bilinear", align_corners=True)[0, 0] + + return depth.cpu().numpy() + + def image2tensor(self, raw_image, input_size=518): + transform = Compose([ + Resize( + width=input_size, + height=input_size, + resize_target=False, + keep_aspect_ratio=True, + ensure_multiple_of=14, + resize_method='lower_bound', + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), + ]) + + h, w = raw_image.shape[:2] + + image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0 + + image = transform({'image': image})['image'] + image = torch.from_numpy(image).unsqueeze(0) + + DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' + image = image.to(DEVICE) + + return image, (h, w) diff --git a/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/blocks.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/blocks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d9c9fcb06c094bae19ee523407a97de7c741850 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/blocks.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/blocks.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/blocks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a1097f8aa42845fcab2aedc7cf69c374400314b Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/blocks.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/transform.cpython-310.pyc b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/transform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8af9a1a6004bb32817ddb1803618c5c8853dd714 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/transform.cpython-310.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/transform.cpython-39.pyc b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/transform.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ea34d41d37d3eb2f42435f7d419fc3a2b0e90c0 Binary files /dev/null and b/depth_anything_v2_metric/depth_anything_v2/util/__pycache__/transform.cpython-39.pyc differ diff --git a/depth_anything_v2_metric/depth_anything_v2/util/blocks.py b/depth_anything_v2_metric/depth_anything_v2/util/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..382ea183a40264056142afffc201c992a2b01d37 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/util/blocks.py @@ -0,0 +1,148 @@ +import torch.nn as nn + + +def _make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + if len(in_shape) >= 4: + out_shape4 = out_shape + + if expand: + out_shape1 = out_shape + out_shape2 = out_shape * 2 + out_shape3 = out_shape * 4 + if len(in_shape) >= 4: + out_shape4 = out_shape * 8 + + scratch.layer1_rn = nn.Conv2d(in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + scratch.layer2_rn = nn.Conv2d(in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + scratch.layer3_rn = nn.Conv2d(in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + if len(in_shape) >= 4: + scratch.layer4_rn = nn.Conv2d(in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) + + return scratch + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features, activation, bn): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups=1 + + self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) + + self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) + + if self.bn == True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn == True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn == True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__( + self, + features, + activation, + deconv=False, + bn=False, + expand=False, + align_corners=True, + size=None + ): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + + self.groups=1 + + self.expand = expand + out_features = features + if self.expand == True: + out_features = features // 2 + + self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) + + self.resConfUnit1 = ResidualConvUnit(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + self.size=size + + def forward(self, *xs, size=None): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + + output = self.resConfUnit2(output) + + if (size is None) and (self.size is None): + modifier = {"scale_factor": 2} + elif size is None: + modifier = {"size": self.size} + else: + modifier = {"size": size} + + output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners) + + output = self.out_conv(output) + + return output diff --git a/depth_anything_v2_metric/depth_anything_v2/util/transform.py b/depth_anything_v2_metric/depth_anything_v2/util/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..b14aacd44ea086b01725a9ca68bb49eadcf37d73 --- /dev/null +++ b/depth_anything_v2_metric/depth_anything_v2/util/transform.py @@ -0,0 +1,158 @@ +import numpy as np +import cv2 + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + image_interpolation_method=cv2.INTER_AREA, + ): + """Init. + + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + self.__width = width + self.__height = height + + self.__resize_target = resize_target + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + self.__image_interpolation_method = image_interpolation_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height) + new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height) + new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, sample): + width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0]) + + # resize sample + sample["image"] = cv2.resize(sample["image"], (width, height), interpolation=self.__image_interpolation_method) + + if self.__resize_target: + if "depth" in sample: + sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST) + + if "mask" in sample: + sample["mask"] = cv2.resize(sample["mask"].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST) + + return sample + + +class NormalizeImage(object): + """Normlize image by given mean and std. + """ + + def __init__(self, mean, std): + self.__mean = mean + self.__std = std + + def __call__(self, sample): + sample["image"] = (sample["image"] - self.__mean) / self.__std + + return sample + + +class PrepareForNet(object): + """Prepare sample for usage as network input. + """ + + def __init__(self): + pass + + def __call__(self, sample): + image = np.transpose(sample["image"], (2, 0, 1)) + sample["image"] = np.ascontiguousarray(image).astype(np.float32) + + if "depth" in sample: + depth = sample["depth"].astype(np.float32) + sample["depth"] = np.ascontiguousarray(depth) + + if "mask" in sample: + sample["mask"] = sample["mask"].astype(np.float32) + sample["mask"] = np.ascontiguousarray(sample["mask"]) + + return sample \ No newline at end of file diff --git a/depth_anything_v2_metric/depth_to_pointcloud.py b/depth_anything_v2_metric/depth_to_pointcloud.py new file mode 100644 index 0000000000000000000000000000000000000000..770fe60698724327f1071c66d685b4a3d8ce7ca8 --- /dev/null +++ b/depth_anything_v2_metric/depth_to_pointcloud.py @@ -0,0 +1,114 @@ +""" +Born out of Depth Anything V1 Issue 36 +Make sure you have the necessary libraries installed. +Code by @1ssb + +This script processes a set of images to generate depth maps and corresponding point clouds. +The resulting point clouds are saved in the specified output directory. + +Usage: + python script.py --encoder vitl --load-from path_to_model --max-depth 20 --img-path path_to_images --outdir output_directory --focal-length-x 470.4 --focal-length-y 470.4 + +Arguments: + --encoder: Model encoder to use. Choices are ['vits', 'vitb', 'vitl', 'vitg']. + --load-from: Path to the pre-trained model weights. + --max-depth: Maximum depth value for the depth map. + --img-path: Path to the input image or directory containing images. + --outdir: Directory to save the output point clouds. + --focal-length-x: Focal length along the x-axis. + --focal-length-y: Focal length along the y-axis. +""" + +import argparse +import cv2 +import glob +import numpy as np +import open3d as o3d +import os +from PIL import Image +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + + +def main(): + # Parse command-line arguments + parser = argparse.ArgumentParser(description='Generate depth maps and point clouds from images.') + parser.add_argument('--encoder', default='vitl', type=str, choices=['vits', 'vitb', 'vitl', 'vitg'], + help='Model encoder to use.') + parser.add_argument('--load-from', default='', type=str, required=True, + help='Path to the pre-trained model weights.') + parser.add_argument('--max-depth', default=20, type=float, + help='Maximum depth value for the depth map.') + parser.add_argument('--img-path', type=str, required=True, + help='Path to the input image or directory containing images.') + parser.add_argument('--outdir', type=str, default='./vis_pointcloud', + help='Directory to save the output point clouds.') + parser.add_argument('--focal-length-x', default=470.4, type=float, + help='Focal length along the x-axis.') + parser.add_argument('--focal-length-y', default=470.4, type=float, + help='Focal length along the y-axis.') + + args = parser.parse_args() + + # Determine the device to use (CUDA, MPS, or CPU) + DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' + + # Model configuration based on the chosen encoder + model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, + 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} + } + + # Initialize the DepthAnythingV2 model with the specified configuration + depth_anything = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth}) + depth_anything.load_state_dict(torch.load(args.load_from, map_location='cpu')) + depth_anything = depth_anything.to(DEVICE).eval() + + # Get the list of image files to process + if os.path.isfile(args.img_path): + if args.img_path.endswith('txt'): + with open(args.img_path, 'r') as f: + filenames = f.read().splitlines() + else: + filenames = [args.img_path] + else: + filenames = glob.glob(os.path.join(args.img_path, '**/*'), recursive=True) + + # Create the output directory if it doesn't exist + os.makedirs(args.outdir, exist_ok=True) + + # Process each image file + for k, filename in enumerate(filenames): + print(f'Processing {k+1}/{len(filenames)}: {filename}') + + # Load the image + color_image = Image.open(filename).convert('RGB') + width, height = color_image.size + + # Read the image using OpenCV + image = cv2.imread(filename) + pred = depth_anything.infer_image(image, height) + + # Resize depth prediction to match the original image size + resized_pred = Image.fromarray(pred).resize((width, height), Image.NEAREST) + + # Generate mesh grid and calculate point cloud coordinates + x, y = np.meshgrid(np.arange(width), np.arange(height)) + x = (x - width / 2) / args.focal_length_x + y = (y - height / 2) / args.focal_length_y + z = np.array(resized_pred) + points = np.stack((np.multiply(x, z), np.multiply(y, z), z), axis=-1).reshape(-1, 3) + colors = np.array(color_image).reshape(-1, 3) / 255.0 + + # Create the point cloud and save it to the output directory + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points) + pcd.colors = o3d.utility.Vector3dVector(colors) + o3d.io.write_point_cloud(os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + ".ply"), pcd) + + +if __name__ == '__main__': + main() diff --git a/depth_anything_v2_metric/dist_train.sh b/depth_anything_v2_metric/dist_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..afa750ad3c3e05fca741bcd66a5f6fea1dab46ac --- /dev/null +++ b/depth_anything_v2_metric/dist_train.sh @@ -0,0 +1,26 @@ +#!/bin/bash +now=$(date +"%Y%m%d_%H%M%S") + +epoch=120 +bs=4 +gpus=8 +lr=0.000005 +encoder=vitl +dataset=hypersim # vkitti +img_size=518 +min_depth=0.001 +max_depth=20 # 80 for virtual kitti +pretrained_from=../checkpoints/depth_anything_v2_${encoder}.pth +save_path=exp/hypersim # exp/vkitti + +mkdir -p $save_path + +python3 -m torch.distributed.launch \ + --nproc_per_node=$gpus \ + --nnodes 1 \ + --node_rank=0 \ + --master_addr=localhost \ + --master_port=20596 \ + train.py --epoch $epoch --encoder $encoder --bs $bs --lr $lr --save-path $save_path --dataset $dataset \ + --img-size $img_size --min-depth $min_depth --max-depth $max_depth --pretrained-from $pretrained_from \ + --port 20596 2>&1 | tee -a $save_path/$now.log diff --git a/depth_anything_v2_metric/requirements.txt b/depth_anything_v2_metric/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..082330f721c6e704a360cd7706b968d5a93b312f --- /dev/null +++ b/depth_anything_v2_metric/requirements.txt @@ -0,0 +1,5 @@ +matplotlib +opencv-python +open3d +torch +torchvision diff --git a/depth_anything_v2_metric/run.py b/depth_anything_v2_metric/run.py new file mode 100644 index 0000000000000000000000000000000000000000..63d46f54099d2039f80379fec694a717e2965bed --- /dev/null +++ b/depth_anything_v2_metric/run.py @@ -0,0 +1,81 @@ +import argparse +import cv2 +import glob +import matplotlib +import numpy as np +import os +import torch + +from depth_anything_v2.dpt import DepthAnythingV2 + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Depth Anything V2 Metric Depth Estimation') + + parser.add_argument('--img-path', type=str) + parser.add_argument('--input-size', type=int, default=518) + parser.add_argument('--outdir', type=str, default='./vis_depth') + + parser.add_argument('--encoder', type=str, default='vitl', choices=['vits', 'vitb', 'vitl', 'vitg']) + parser.add_argument('--load-from', type=str, default='checkpoints/depth_anything_v2_metric_hypersim_vitl.pth') + parser.add_argument('--max-depth', type=float, default=20) + + parser.add_argument('--save-numpy', dest='save_numpy', action='store_true', help='save the model raw output') + parser.add_argument('--pred-only', dest='pred_only', action='store_true', help='only display the prediction') + parser.add_argument('--grayscale', dest='grayscale', action='store_true', help='do not apply colorful palette') + + args = parser.parse_args() + + DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' + + model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, + 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} + } + + depth_anything = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth}) + depth_anything.load_state_dict(torch.load(args.load_from, map_location='cpu')) + depth_anything = depth_anything.to(DEVICE).eval() + + if os.path.isfile(args.img_path): + if args.img_path.endswith('txt'): + with open(args.img_path, 'r') as f: + filenames = f.read().splitlines() + else: + filenames = [args.img_path] + else: + filenames = glob.glob(os.path.join(args.img_path, '**/*'), recursive=True) + + os.makedirs(args.outdir, exist_ok=True) + + cmap = matplotlib.colormaps.get_cmap('Spectral') + + for k, filename in enumerate(filenames): + print(f'Progress {k+1}/{len(filenames)}: {filename}') + + raw_image = cv2.imread(filename) + + depth = depth_anything.infer_image(raw_image, args.input_size) + + if args.save_numpy: + output_path = os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + '_raw_depth_meter.npy') + np.save(output_path, depth) + + depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0 + depth = depth.astype(np.uint8) + + if args.grayscale: + depth = np.repeat(depth[..., np.newaxis], 3, axis=-1) + else: + depth = (cmap(depth)[:, :, :3] * 255)[:, :, ::-1].astype(np.uint8) + + output_path = os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + '.png') + if args.pred_only: + cv2.imwrite(output_path, depth) + else: + split_region = np.ones((raw_image.shape[0], 50, 3), dtype=np.uint8) * 255 + combined_result = cv2.hconcat([raw_image, split_region, depth]) + + cv2.imwrite(output_path, combined_result) \ No newline at end of file diff --git a/depth_anything_v2_metric/train.py b/depth_anything_v2_metric/train.py new file mode 100644 index 0000000000000000000000000000000000000000..9b304275c5bd22f63fa19e54c427b3da6a730ea9 --- /dev/null +++ b/depth_anything_v2_metric/train.py @@ -0,0 +1,212 @@ +import argparse +import logging +import os +import pprint +import random + +import warnings +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.distributed as dist +from torch.utils.data import DataLoader +from torch.optim import AdamW +import torch.nn.functional as F +from torch.utils.tensorboard import SummaryWriter + +from dataset.hypersim import Hypersim +from dataset.kitti import KITTI +from dataset.vkitti2 import VKITTI2 +from depth_anything_v2.dpt import DepthAnythingV2 +from util.dist_helper import setup_distributed +from util.loss import SiLogLoss +from util.metric import eval_depth +from util.utils import init_log + + +parser = argparse.ArgumentParser(description='Depth Anything V2 for Metric Depth Estimation') + +parser.add_argument('--encoder', default='vitl', choices=['vits', 'vitb', 'vitl', 'vitg']) +parser.add_argument('--dataset', default='hypersim', choices=['hypersim', 'vkitti']) +parser.add_argument('--img-size', default=518, type=int) +parser.add_argument('--min-depth', default=0.001, type=float) +parser.add_argument('--max-depth', default=20, type=float) +parser.add_argument('--epochs', default=40, type=int) +parser.add_argument('--bs', default=2, type=int) +parser.add_argument('--lr', default=0.000005, type=float) +parser.add_argument('--pretrained-from', type=str) +parser.add_argument('--save-path', type=str, required=True) +parser.add_argument('--local-rank', default=0, type=int) +parser.add_argument('--port', default=None, type=int) + + +def main(): + args = parser.parse_args() + + warnings.simplefilter('ignore', np.RankWarning) + + logger = init_log('global', logging.INFO) + logger.propagate = 0 + + rank, world_size = setup_distributed(port=args.port) + + if rank == 0: + all_args = {**vars(args), 'ngpus': world_size} + logger.info('{}\n'.format(pprint.pformat(all_args))) + writer = SummaryWriter(args.save_path) + + cudnn.enabled = True + cudnn.benchmark = True + + size = (args.img_size, args.img_size) + if args.dataset == 'hypersim': + trainset = Hypersim('dataset/splits/hypersim/train.txt', 'train', size=size) + elif args.dataset == 'vkitti': + trainset = VKITTI2('dataset/splits/vkitti2/train.txt', 'train', size=size) + else: + raise NotImplementedError + trainsampler = torch.utils.data.distributed.DistributedSampler(trainset) + trainloader = DataLoader(trainset, batch_size=args.bs, pin_memory=True, num_workers=4, drop_last=True, sampler=trainsampler) + + if args.dataset == 'hypersim': + valset = Hypersim('dataset/splits/hypersim/val.txt', 'val', size=size) + elif args.dataset == 'vkitti': + valset = KITTI('dataset/splits/kitti/val.txt', 'val', size=size) + else: + raise NotImplementedError + valsampler = torch.utils.data.distributed.DistributedSampler(valset) + valloader = DataLoader(valset, batch_size=1, pin_memory=True, num_workers=4, drop_last=True, sampler=valsampler) + + local_rank = int(os.environ["LOCAL_RANK"]) + + model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, + 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} + } + model = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth}) + + if args.pretrained_from: + model.load_state_dict({k: v for k, v in torch.load(args.pretrained_from, map_location='cpu').items() if 'pretrained' in k}, strict=False) + + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model.cuda(local_rank) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False, + output_device=local_rank, find_unused_parameters=True) + + criterion = SiLogLoss().cuda(local_rank) + + optimizer = AdamW([{'params': [param for name, param in model.named_parameters() if 'pretrained' in name], 'lr': args.lr}, + {'params': [param for name, param in model.named_parameters() if 'pretrained' not in name], 'lr': args.lr * 10.0}], + lr=args.lr, betas=(0.9, 0.999), weight_decay=0.01) + + total_iters = args.epochs * len(trainloader) + + previous_best = {'d1': 0, 'd2': 0, 'd3': 0, 'abs_rel': 100, 'sq_rel': 100, 'rmse': 100, 'rmse_log': 100, 'log10': 100, 'silog': 100} + + for epoch in range(args.epochs): + if rank == 0: + logger.info('===========> Epoch: {:}/{:}, d1: {:.3f}, d2: {:.3f}, d3: {:.3f}'.format(epoch, args.epochs, previous_best['d1'], previous_best['d2'], previous_best['d3'])) + logger.info('===========> Epoch: {:}/{:}, abs_rel: {:.3f}, sq_rel: {:.3f}, rmse: {:.3f}, rmse_log: {:.3f}, ' + 'log10: {:.3f}, silog: {:.3f}'.format( + epoch, args.epochs, previous_best['abs_rel'], previous_best['sq_rel'], previous_best['rmse'], + previous_best['rmse_log'], previous_best['log10'], previous_best['silog'])) + + trainloader.sampler.set_epoch(epoch + 1) + + model.train() + total_loss = 0 + + for i, sample in enumerate(trainloader): + optimizer.zero_grad() + + img, depth, valid_mask = sample['image'].cuda(), sample['depth'].cuda(), sample['valid_mask'].cuda() + + if random.random() < 0.5: + img = img.flip(-1) + depth = depth.flip(-1) + valid_mask = valid_mask.flip(-1) + + pred = model(img) + + loss = criterion(pred, depth, (valid_mask == 1) & (depth >= args.min_depth) & (depth <= args.max_depth)) + + loss.backward() + optimizer.step() + + total_loss += loss.item() + + iters = epoch * len(trainloader) + i + + lr = args.lr * (1 - iters / total_iters) ** 0.9 + + optimizer.param_groups[0]["lr"] = lr + optimizer.param_groups[1]["lr"] = lr * 10.0 + + if rank == 0: + writer.add_scalar('train/loss', loss.item(), iters) + + if rank == 0 and i % 100 == 0: + logger.info('Iter: {}/{}, LR: {:.7f}, Loss: {:.3f}'.format(i, len(trainloader), optimizer.param_groups[0]['lr'], loss.item())) + + model.eval() + + results = {'d1': torch.tensor([0.0]).cuda(), 'd2': torch.tensor([0.0]).cuda(), 'd3': torch.tensor([0.0]).cuda(), + 'abs_rel': torch.tensor([0.0]).cuda(), 'sq_rel': torch.tensor([0.0]).cuda(), 'rmse': torch.tensor([0.0]).cuda(), + 'rmse_log': torch.tensor([0.0]).cuda(), 'log10': torch.tensor([0.0]).cuda(), 'silog': torch.tensor([0.0]).cuda()} + nsamples = torch.tensor([0.0]).cuda() + + for i, sample in enumerate(valloader): + + img, depth, valid_mask = sample['image'].cuda().float(), sample['depth'].cuda()[0], sample['valid_mask'].cuda()[0] + + with torch.no_grad(): + pred = model(img) + pred = F.interpolate(pred[:, None], depth.shape[-2:], mode='bilinear', align_corners=True)[0, 0] + + valid_mask = (valid_mask == 1) & (depth >= args.min_depth) & (depth <= args.max_depth) + + if valid_mask.sum() < 10: + continue + + cur_results = eval_depth(pred[valid_mask], depth[valid_mask]) + + for k in results.keys(): + results[k] += cur_results[k] + nsamples += 1 + + torch.distributed.barrier() + + for k in results.keys(): + dist.reduce(results[k], dst=0) + dist.reduce(nsamples, dst=0) + + if rank == 0: + logger.info('==========================================================================================') + logger.info('{:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}'.format(*tuple(results.keys()))) + logger.info('{:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}'.format(*tuple([(v / nsamples).item() for v in results.values()]))) + logger.info('==========================================================================================') + print() + + for name, metric in results.items(): + writer.add_scalar(f'eval/{name}', (metric / nsamples).item(), epoch) + + for k in results.keys(): + if k in ['d1', 'd2', 'd3']: + previous_best[k] = max(previous_best[k], (results[k] / nsamples).item()) + else: + previous_best[k] = min(previous_best[k], (results[k] / nsamples).item()) + + if rank == 0: + checkpoint = { + 'model': model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'previous_best': previous_best, + } + torch.save(checkpoint, os.path.join(args.save_path, 'latest.pth')) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/depth_anything_v2_metric/util/dist_helper.py b/depth_anything_v2_metric/util/dist_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..7b6eb432b4988638ac9549a82fbaebf968fe9c61 --- /dev/null +++ b/depth_anything_v2_metric/util/dist_helper.py @@ -0,0 +1,41 @@ +import os +import subprocess + +import torch +import torch.distributed as dist + + +def setup_distributed(backend="nccl", port=None): + """AdaHessian Optimizer + Lifted from https://github.com/BIGBALLON/distribuuuu/blob/master/distribuuuu/utils.py + Originally licensed MIT, Copyright (c) 2020 Wei Li + """ + num_gpus = torch.cuda.device_count() + + if "SLURM_JOB_ID" in os.environ: + rank = int(os.environ["SLURM_PROCID"]) + world_size = int(os.environ["SLURM_NTASKS"]) + node_list = os.environ["SLURM_NODELIST"] + addr = subprocess.getoutput(f"scontrol show hostname {node_list} | head -n1") + # specify master port + if port is not None: + os.environ["MASTER_PORT"] = str(port) + elif "MASTER_PORT" not in os.environ: + os.environ["MASTER_PORT"] = "10685" + if "MASTER_ADDR" not in os.environ: + os.environ["MASTER_ADDR"] = addr + os.environ["WORLD_SIZE"] = str(world_size) + os.environ["LOCAL_RANK"] = str(rank % num_gpus) + os.environ["RANK"] = str(rank) + else: + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + + torch.cuda.set_device(rank % num_gpus) + + dist.init_process_group( + backend=backend, + world_size=world_size, + rank=rank, + ) + return rank, world_size diff --git a/depth_anything_v2_metric/util/loss.py b/depth_anything_v2_metric/util/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..2ae5b304effd46661e93ea23127d1115c36b5265 --- /dev/null +++ b/depth_anything_v2_metric/util/loss.py @@ -0,0 +1,16 @@ +import torch +from torch import nn + + +class SiLogLoss(nn.Module): + def __init__(self, lambd=0.5): + super().__init__() + self.lambd = lambd + + def forward(self, pred, target, valid_mask): + valid_mask = valid_mask.detach() + diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask]) + loss = torch.sqrt(torch.pow(diff_log, 2).mean() - + self.lambd * torch.pow(diff_log.mean(), 2)) + + return loss diff --git a/depth_anything_v2_metric/util/metric.py b/depth_anything_v2_metric/util/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..8638cf25875c753cb62c3977af1417c221237dce --- /dev/null +++ b/depth_anything_v2_metric/util/metric.py @@ -0,0 +1,26 @@ +import torch + + +def eval_depth(pred, target): + assert pred.shape == target.shape + + thresh = torch.max((target / pred), (pred / target)) + + d1 = torch.sum(thresh < 1.25).float() / len(thresh) + d2 = torch.sum(thresh < 1.25 ** 2).float() / len(thresh) + d3 = torch.sum(thresh < 1.25 ** 3).float() / len(thresh) + + diff = pred - target + diff_log = torch.log(pred) - torch.log(target) + + abs_rel = torch.mean(torch.abs(diff) / target) + sq_rel = torch.mean(torch.pow(diff, 2) / target) + + rmse = torch.sqrt(torch.mean(torch.pow(diff, 2))) + rmse_log = torch.sqrt(torch.mean(torch.pow(diff_log , 2))) + + log10 = torch.mean(torch.abs(torch.log10(pred) - torch.log10(target))) + silog = torch.sqrt(torch.pow(diff_log, 2).mean() - 0.5 * torch.pow(diff_log.mean(), 2)) + + return {'d1': d1.item(), 'd2': d2.item(), 'd3': d3.item(), 'abs_rel': abs_rel.item(), 'sq_rel': sq_rel.item(), + 'rmse': rmse.item(), 'rmse_log': rmse_log.item(), 'log10':log10.item(), 'silog':silog.item()} \ No newline at end of file diff --git a/depth_anything_v2_metric/util/utils.py b/depth_anything_v2_metric/util/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e89b994538c5123075605fb6130022867f37c99b --- /dev/null +++ b/depth_anything_v2_metric/util/utils.py @@ -0,0 +1,26 @@ +import os +import re +import numpy as np +import logging + +logs = set() + + +def init_log(name, level=logging.INFO): + if (name, level) in logs: + return + logs.add((name, level)) + logger = logging.getLogger(name) + logger.setLevel(level) + ch = logging.StreamHandler() + ch.setLevel(level) + if "SLURM_PROCID" in os.environ: + rank = int(os.environ["SLURM_PROCID"]) + logger.addFilter(lambda record: rank == 0) + else: + rank = 0 + format_str = "[%(asctime)s][%(levelname)8s] %(message)s" + formatter = logging.Formatter(format_str) + ch.setFormatter(formatter) + logger.addHandler(ch) + return logger diff --git a/networks/__init__.py b/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ba02a15b3299e5ae0fc566891ce3f6187b56dba --- /dev/null +++ b/networks/__init__.py @@ -0,0 +1 @@ +from .panda import PanDA \ No newline at end of file diff --git a/networks/__pycache__/Teacher_Model_v2.cpython-39.pyc b/networks/__pycache__/Teacher_Model_v2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c315e8bab5f4ea14850d1b225673fe641db2009b Binary files /dev/null and b/networks/__pycache__/Teacher_Model_v2.cpython-39.pyc differ diff --git a/networks/__pycache__/__init__.cpython-310.pyc b/networks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d7aa1cadb36ddcfff2a723d94f23e52e0e64575 Binary files /dev/null and b/networks/__pycache__/__init__.cpython-310.pyc differ diff --git a/networks/__pycache__/__init__.cpython-39.pyc b/networks/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62970b2396224a3ffe0182a3ed567ef320d5811a Binary files /dev/null and b/networks/__pycache__/__init__.cpython-39.pyc differ diff --git a/networks/__pycache__/blocks.cpython-39.pyc b/networks/__pycache__/blocks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9c0c8e4ce634db91ca7b6280159c83435def683 Binary files /dev/null and b/networks/__pycache__/blocks.cpython-39.pyc differ diff --git a/networks/__pycache__/dpt.cpython-39.pyc b/networks/__pycache__/dpt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cfff11bc95f7e76df90151b02d9e728d1cd688d Binary files /dev/null and b/networks/__pycache__/dpt.cpython-39.pyc differ diff --git a/networks/__pycache__/models.cpython-310.pyc b/networks/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62795722068e95c9afc36671fa743345d400a7b5 Binary files /dev/null and b/networks/__pycache__/models.cpython-310.pyc differ diff --git a/networks/__pycache__/models.cpython-39.pyc b/networks/__pycache__/models.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46d99209f8934521a71f694a0978269ec9ddcbbd Binary files /dev/null and b/networks/__pycache__/models.cpython-39.pyc differ diff --git a/networks/__pycache__/panda.cpython-310.pyc b/networks/__pycache__/panda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d77d26cbbb13a6b8671819f9b89c8bd038901eda Binary files /dev/null and b/networks/__pycache__/panda.cpython-310.pyc differ diff --git a/networks/__pycache__/panda.cpython-39.pyc b/networks/__pycache__/panda.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96497063237967fd17955ed904d9ddb3fca7a22e Binary files /dev/null and b/networks/__pycache__/panda.cpython-39.pyc differ diff --git a/networks/__pycache__/projection_utils.cpython-39.pyc b/networks/__pycache__/projection_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85a6fb106cbccdca47166b071c3cc2ba594f3ebf Binary files /dev/null and b/networks/__pycache__/projection_utils.cpython-39.pyc differ diff --git a/networks/__pycache__/utils.cpython-310.pyc b/networks/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec8a6e54b23906b65430f7a515cb6e5f4fcf894c Binary files /dev/null and b/networks/__pycache__/utils.cpython-310.pyc differ diff --git a/networks/__pycache__/utils.cpython-39.pyc b/networks/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1ffbff677fb427abf5e4a2cb668a16d17f652de Binary files /dev/null and b/networks/__pycache__/utils.cpython-39.pyc differ diff --git a/networks/blocks.py b/networks/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..38dbcfeffc0c38ef51bcb20dfd347e50b2a60616 --- /dev/null +++ b/networks/blocks.py @@ -0,0 +1,153 @@ +import torch.nn as nn + + +def _make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + if len(in_shape) >= 4: + out_shape4 = out_shape + + if expand: + out_shape1 = out_shape + out_shape2 = out_shape*2 + out_shape3 = out_shape*4 + if len(in_shape) >= 4: + out_shape4 = out_shape*8 + + scratch.layer1_rn = nn.Conv2d( + in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer2_rn = nn.Conv2d( + in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer3_rn = nn.Conv2d( + in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + if len(in_shape) >= 4: + scratch.layer4_rn = nn.Conv2d( + in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + + return scratch + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features, activation, bn): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups=1 + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + if self.bn==True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn==True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn==True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + + self.groups=1 + + self.expand = expand + out_features = features + if self.expand==True: + out_features = features//2 + + self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) + + self.resConfUnit1 = ResidualConvUnit(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + self.size=size + + def forward(self, *xs, size=None): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + + output = self.resConfUnit2(output) + + if (size is None) and (self.size is None): + modifier = {"scale_factor": 2} + elif size is None: + modifier = {"size": self.size} + else: + modifier = {"size": size} + + output = nn.functional.interpolate( + output, **modifier, mode="bilinear", align_corners=self.align_corners + ) + + output = self.out_conv(output) + + return output diff --git a/networks/dpt.py b/networks/dpt.py new file mode 100644 index 0000000000000000000000000000000000000000..f6487d84d7705ade993da83f96644042d42e98ec --- /dev/null +++ b/networks/dpt.py @@ -0,0 +1,195 @@ +import argparse +import torch +import torch.nn as nn +import torch.nn.functional as F +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download + +from .blocks import FeatureFusionBlock, _make_scratch + +from argparse import Namespace +from .models import register + + +def _make_fusion_block(features, use_bn, size = None): + return FeatureFusionBlock( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + size=size, + ) + + +class DPTHead(nn.Module): + def __init__(self, nclass, in_channels, features=256, use_bn=False, out_channels=[256, 512, 1024, 1024], use_clstoken=False): + super(DPTHead, self).__init__() + + self.nclass = nclass + self.use_clstoken = use_clstoken + + self.projects = nn.ModuleList([ + nn.Conv2d( + in_channels=in_channels, + out_channels=out_channel, + kernel_size=1, + stride=1, + padding=0, + ) for out_channel in out_channels + ]) + + self.resize_layers = nn.ModuleList([ + nn.ConvTranspose2d( + in_channels=out_channels[0], + out_channels=out_channels[0], + kernel_size=4, + stride=4, + padding=0), + nn.ConvTranspose2d( + in_channels=out_channels[1], + out_channels=out_channels[1], + kernel_size=2, + stride=2, + padding=0), + nn.Identity(), + nn.Conv2d( + in_channels=out_channels[3], + out_channels=out_channels[3], + kernel_size=3, + stride=2, + padding=1) + ]) + + if use_clstoken: + self.readout_projects = nn.ModuleList() + for _ in range(len(self.projects)): + self.readout_projects.append( + nn.Sequential( + nn.Linear(2 * in_channels, in_channels), + nn.GELU())) + + self.scratch = _make_scratch( + out_channels, + features, + groups=1, + expand=False, + ) + + self.scratch.stem_transpose = None + + self.scratch.refinenet1 = _make_fusion_block(features, use_bn) + self.scratch.refinenet2 = _make_fusion_block(features, use_bn) + self.scratch.refinenet3 = _make_fusion_block(features, use_bn) + self.scratch.refinenet4 = _make_fusion_block(features, use_bn) + + head_features_1 = features + head_features_2 = 32 + + if nclass > 1: + self.scratch.output_conv = nn.Sequential( + nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0), + ) + else: + self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1) + + self.scratch.output_conv2 = nn.Sequential( + nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True), + nn.Identity(), + ) + + def forward(self, out_features, patch_h, patch_w): + out = [] + for i, x in enumerate(out_features): + if self.use_clstoken: + x, cls_token = x[0], x[1] + readout = cls_token.unsqueeze(1).expand_as(x) + x = self.readout_projects[i](torch.cat((x, readout), -1)) + else: + x = x[0] + + x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w)) + + x = self.projects[i](x) + x = self.resize_layers[i](x) + + out.append(x) + + layer_1, layer_2, layer_3, layer_4 = out + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:]) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:]) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out_feats = [path_4, path_3, path_2, path_1] + + out = self.scratch.output_conv1(path_1) + out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True) + # out_feats = out + out = self.scratch.output_conv2(out) + + # return out, out_feats + return out + + +class DPT_DINOv2(nn.Module): + def __init__(self, encoder='vitl', features=256, out_channels=[256, 512, 1024, 1024], use_bn=False, use_clstoken=False, localhub=True): + super(DPT_DINOv2, self).__init__() + + assert encoder in ['vits', 'vitb', 'vitl'] + + # in case the Internet connection is not stable, please load the DINOv2 locally + if localhub: + self.pretrained = torch.hub.load('torchhub/facebookresearch_dinov2_main', 'dinov2_{:}14'.format(encoder), source='local', pretrained=False) + else: + self.pretrained = torch.hub.load('facebookresearch/dinov2', 'dinov2_{:}14'.format(encoder)) + + dim = self.pretrained.blocks[0].attn.qkv.in_features + + self.depth_head = DPTHead(1, dim, features, use_bn, out_channels=out_channels, use_clstoken=use_clstoken) + + def forward(self, x): + h, w = x.shape[-2:] + + features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True) + + patch_h, patch_w = h // 14, w // 14 + + # depth, depth_feats = self.depth_head(features, patch_h, patch_w) + depth = self.depth_head(features, patch_h, patch_w) + depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True) + depth = F.relu(depth) + + # return depth, depth_feats + return depth, features + + +class DepthAnything(DPT_DINOv2, PyTorchModelHubMixin): + def __init__(self, config): + super().__init__(**config) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "--encoder", + default="vits", + type=str, + choices=["vits", "vitb", "vitl"], + ) + args = parser.parse_args() + + model = DepthAnything.from_pretrained("LiheYoung/depth_anything_{:}14".format(args.encoder)) + + print(model) + \ No newline at end of file diff --git a/networks/models.py b/networks/models.py new file mode 100644 index 0000000000000000000000000000000000000000..136cce9202106162c9e0d0816d89a9ff9b9ccb2f --- /dev/null +++ b/networks/models.py @@ -0,0 +1,20 @@ +import copy + +models = {} + +def register(name): + def decorator(cls): + models[name] = cls + return cls + return decorator + +def make(model_spec, args=None, load_sd=False): + if args is not None: + model_args = copy.deepcopy(model_spec['args']) + model_args.update(args) + else: + model_args = model_spec['args'] + model = models[model_spec['name']](**model_args) + if load_sd: + model.load_state_dict(model_spec['sd']) + return model \ No newline at end of file diff --git a/networks/panda.py b/networks/panda.py new file mode 100644 index 0000000000000000000000000000000000000000..f5249d98e1565b3ccca1449c3390c105b26500a4 --- /dev/null +++ b/networks/panda.py @@ -0,0 +1,122 @@ +import torch +import numpy as np +from einops import rearrange +import torch.nn as nn +import torch.nn.functional as F +from torchvision.transforms import Compose +import cv2 + +from depth_anything_v2_metric.depth_anything_v2.dpt import DepthAnythingV2 +from .utils import LoRA_Depth_Anything_v2 + +from argparse import Namespace +from .models import register +from depth_anything_utils import Resize, NormalizeImage, PrepareForNet + +class PanDA(nn.Module): + def __init__(self, args): + """ + PanDA model for depth estimation + """ + super().__init__() + + midas_model_type = args.midas_model_type + fine_tune_type = args.fine_tune_type + min_depth = args.min_depth + self.max_depth = args.max_depth + lora = args.lora + train_decoder = args.train_decoder + lora_rank = args.lora_rank + + # Pre-defined setting of the model + model_configs = { + 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, + 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, + 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, + 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} + } + + # Load the pretrained model of depth anything + depth_anything = DepthAnythingV2(**{**model_configs[midas_model_type], 'max_depth': 1.0}) + if fine_tune_type == 'none': + depth_anything.load_state_dict(torch.load(f'/hpc2hdd/home/zcao740/Documents/360Depth/Semi-supervision/checkpoints/depth_anything_v2_{midas_model_type}.pth')) + elif fine_tune_type == 'hypersim': + depth_anything.load_state_dict(torch.load(f'/hpc2hdd/home/zcao740/Documents/360Depth/Semi-supervision/checkpoints/depth_anything_v2_metric_hypersim_{midas_model_type}.pth')) + elif fine_tune_type == 'vkitti': + depth_anything.load_state_dict(torch.load(f'checkpoints/depth_anything_v2_metric_vkitti_{midas_model_type}.pth')) + elif fine_tune_type == "backbone": + depth_anything.load_state_dict(torch.load(f'checkpoints/depth_anything_v2_{midas_model_type}.pth')) + elif fine_tune_type == "inference": + pass + + # Apply LoRA to the model for erp branch + if lora: + self.core = depth_anything + LoRA_Depth_Anything_v2(depth_anything, r=lora_rank) + if not train_decoder: + for param in self.core.depth_head.parameters(): + param.requires_grad = False + else: + self.core = depth_anything + + def forward(self, image): + if image.dim() == 3: + image = image.unsqueeze(0) + + # Forward of erp image + erp_pred = self.core(image) + erp_pred = erp_pred.unsqueeze(1) + + outputs = {} + outputs["pred_depth"] = erp_pred * self.max_depth + + return outputs + + @torch.no_grad() + def infer_image(self, raw_image, input_size=518): + image, (h, w) = self.image2tensor(raw_image, input_size) + + depth = self.forward(image)["pred_depth"] + + depth = F.interpolate(depth, (h, w), mode="bilinear", align_corners=True)[0, 0] + + return depth.cpu().numpy() + + def image2tensor(self, raw_image, input_size=518): + transform = Compose([ + Resize( + width=input_size * 2, + height=input_size, + resize_target=False, + keep_aspect_ratio=True, + ensure_multiple_of=14, + resize_method='lower_bound', + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), + ]) + + h, w = raw_image.shape[:2] + + image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0 + + image = transform({'image': image})['image'] + image = torch.from_numpy(image).unsqueeze(0) + + DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' + image = image.to(DEVICE) + + return image, (h, w) + +@register('panda') +def make_model(midas_model_type='vits', fine_tune_type='none', min_depth=0.1, max_depth=10.0, lora=True, train_decoder=True, lora_rank=4): + args = Namespace() + args.midas_model_type = midas_model_type + args.fine_tune_type = fine_tune_type + args.min_depth = min_depth + args.max_depth = max_depth + args.lora = lora + args.train_decoder = train_decoder + args.lora_rank = lora_rank + return PanDA(args) \ No newline at end of file diff --git a/networks/projection_utils.py b/networks/projection_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..93b01190da8b061f7c1bc7dbf03a62e1f91e4289 --- /dev/null +++ b/networks/projection_utils.py @@ -0,0 +1,478 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from scipy.ndimage import map_coordinates +import cv2 +import math +from os import makedirs +from os.path import join, exists + +# Based on https://github.com/sunset1995/py360convert +class Equirec2Cube: + def __init__(self, equ_h, equ_w, face_w): + ''' + equ_h: int, height of the equirectangular image + equ_w: int, width of the equirectangular image + face_w: int, the length of each face of the cubemap + ''' + + self.equ_h = equ_h + self.equ_w = equ_w + self.face_w = face_w + + self._xyzcube() + self._xyz2coor() + + # For convert R-distance to Z-depth for CubeMaps + cosmap = 1 / np.sqrt((2 * self.grid[..., 0]) ** 2 + (2 * self.grid[..., 1]) ** 2 + 1) + self.cosmaps = np.concatenate(6 * [cosmap], axis=1)[..., np.newaxis] + + def _xyzcube(self): + ''' + Compute the xyz cordinates of the unit cube in [F R B L U D] format. + ''' + self.xyz = np.zeros((self.face_w, self.face_w * 6, 3), np.float32) + rng = np.linspace(-0.5, 0.5, num=self.face_w, dtype=np.float32) + self.grid = np.stack(np.meshgrid(rng, -rng), -1) + + # Front face (z = 0.5) + self.xyz[:, 0 * self.face_w:1 * self.face_w, [0, 1]] = self.grid + self.xyz[:, 0 * self.face_w:1 * self.face_w, 2] = 0.5 + + # Right face (x = 0.5) + self.xyz[:, 1 * self.face_w:2 * self.face_w, [2, 1]] = self.grid[:, ::-1] + self.xyz[:, 1 * self.face_w:2 * self.face_w, 0] = 0.5 + + # Back face (z = -0.5) + self.xyz[:, 2 * self.face_w:3 * self.face_w, [0, 1]] = self.grid[:, ::-1] + self.xyz[:, 2 * self.face_w:3 * self.face_w, 2] = -0.5 + + # Left face (x = -0.5) + self.xyz[:, 3 * self.face_w:4 * self.face_w, [2, 1]] = self.grid + self.xyz[:, 3 * self.face_w:4 * self.face_w, 0] = -0.5 + + # Up face (y = 0.5) + self.xyz[:, 4 * self.face_w:5 * self.face_w, [0, 2]] = self.grid[::-1, :] + self.xyz[:, 4 * self.face_w:5 * self.face_w, 1] = 0.5 + + # Down face (y = -0.5) + self.xyz[:, 5 * self.face_w:6 * self.face_w, [0, 2]] = self.grid + self.xyz[:, 5 * self.face_w:6 * self.face_w, 1] = -0.5 + + def _xyz2coor(self): + + # x, y, z to longitude and latitude + x, y, z = np.split(self.xyz, 3, axis=-1) + lon = np.arctan2(x, z) + c = np.sqrt(x ** 2 + z ** 2) + lat = np.arctan2(y, c) + + # longitude and latitude to equirectangular coordinate + self.coor_x = (lon / (2 * np.pi) + 0.5) * self.equ_w - 0.5 + self.coor_y = (-lat / np.pi + 0.5) * self.equ_h - 0.5 + + def sample_equirec(self, e_img, order=0): + pad_u = np.roll(e_img[[0]], self.equ_w // 2, 1) + pad_d = np.roll(e_img[[-1]], self.equ_w // 2, 1) + e_img = np.concatenate([e_img, pad_d, pad_u], 0) + # pad_l = e_img[:, [0]] + # pad_r = e_img[:, [-1]] + # e_img = np.concatenate([e_img, pad_l, pad_r], 1) + + return map_coordinates(e_img, [self.coor_y, self.coor_x], + order=order, mode='wrap')[..., 0] + + def run(self, equ_img, equ_dep=None): + + h, w = equ_img.shape[:2] + if h != self.equ_h or w != self.equ_w: + equ_img = cv2.resize(equ_img, (self.equ_w, self.equ_h)) + if equ_dep is not None: + equ_dep = cv2.resize(equ_dep, (self.equ_w, self.equ_h), interpolation=cv2.INTER_NEAREST) + + cube_img = np.stack([self.sample_equirec(equ_img[..., i], order=1) + for i in range(equ_img.shape[2])], axis=-1) + + if equ_dep is not None: + cube_dep = np.stack([self.sample_equirec(equ_dep[..., i], order=0) + for i in range(equ_dep.shape[2])], axis=-1) + cube_dep = cube_dep * self.cosmaps + + if equ_dep is not None: + return cube_img, cube_dep + else: + return cube_img + +# Based on https://github.com/sunset1995/py360convert +class Cube2Equirec(nn.Module): + def __init__(self, face_w, equ_h, equ_w): + super(Cube2Equirec, self).__init__() + ''' + face_w: int, the length of each face of the cubemap + equ_h: int, height of the equirectangular image + equ_w: int, width of the equirectangular image + ''' + + self.face_w = face_w + self.equ_h = equ_h + self.equ_w = equ_w + + + # Get face id to each pixel: 0F 1R 2B 3L 4U 5D + self._equirect_facetype() + self._equirect_faceuv() + + + def _equirect_facetype(self): + ''' + 0F 1R 2B 3L 4U 5D + ''' + tp = np.roll(np.arange(4).repeat(self.equ_w // 4)[None, :].repeat(self.equ_h, 0), 3 * self.equ_w // 8, 1) + + # Prepare ceil mask + mask = np.zeros((self.equ_h, self.equ_w // 4), bool) + idx = np.linspace(-np.pi, np.pi, self.equ_w // 4) / 4 + idx = self.equ_h // 2 - np.round(np.arctan(np.cos(idx)) * self.equ_h / np.pi).astype(int) + for i, j in enumerate(idx): + mask[:j, i] = 1 + mask = np.roll(np.concatenate([mask] * 4, 1), 3 * self.equ_w // 8, 1) + + tp[mask] = 4 + tp[np.flip(mask, 0)] = 5 + + self.tp = tp + self.mask = mask + + def _equirect_faceuv(self): + + lon = ((np.linspace(0, self.equ_w -1, num=self.equ_w, dtype=np.float32 ) +0.5 ) /self.equ_w - 0.5 ) * 2 *np.pi + lat = -((np.linspace(0, self.equ_h -1, num=self.equ_h, dtype=np.float32 ) +0.5 ) /self.equ_h -0.5) * np.pi + + lon, lat = np.meshgrid(lon, lat) + + coor_u = np.zeros((self.equ_h, self.equ_w), dtype=np.float32) + coor_v = np.zeros((self.equ_h, self.equ_w), dtype=np.float32) + + for i in range(4): + mask = (self.tp == i) + coor_u[mask] = 0.5 * np.tan(lon[mask] - np.pi * i / 2) + coor_v[mask] = -0.5 * np.tan(lat[mask]) / np.cos(lon[mask] - np.pi * i / 2) + + mask = (self.tp == 4) + c = 0.5 * np.tan(np.pi / 2 - lat[mask]) + coor_u[mask] = c * np.sin(lon[mask]) + coor_v[mask] = c * np.cos(lon[mask]) + + mask = (self.tp == 5) + c = 0.5 * np.tan(np.pi / 2 - np.abs(lat[mask])) + coor_u[mask] = c * np.sin(lon[mask]) + coor_v[mask] = -c * np.cos(lon[mask]) + + # Final renormalize + coor_u = (np.clip(coor_u, -0.5, 0.5)) * 2 + coor_v = (np.clip(coor_v, -0.5, 0.5)) * 2 + + # Convert to torch tensor + self.tp = torch.from_numpy(self.tp.astype(np.float32) / 2.5 - 1) + self.coor_u = torch.from_numpy(coor_u) + self.coor_v = torch.from_numpy(coor_v) + + sample_grid = torch.stack([self.coor_u, self.coor_v, self.tp], dim=-1).view(1, 1, self.equ_h, self.equ_w, 3) + self.sample_grid = nn.Parameter(sample_grid, requires_grad=False) + + def forward(self, cube_feat): + + bs, ch, h, w = cube_feat.shape + assert h == self.face_w and w // 6 == self.face_w + + cube_feat = cube_feat.view(bs, ch, 1, h, w) + cube_feat = torch.cat(torch.split(cube_feat, self.face_w, dim=-1), dim=2) + + cube_feat = cube_feat.view([bs, ch, 6, self.face_w, self.face_w]) + sample_grid = torch.cat(bs * [self.sample_grid], dim=0) + equi_feat = F.grid_sample(cube_feat, sample_grid, padding_mode="border", align_corners=True) + + return equi_feat.squeeze(2) + +# generate patches in a closed-form +# the transformation and equation is referred from http://blog.nitishmutha.com/equirectangular/360degree/2017/06/12/How-to-project-Equirectangular-image-to-rectilinear-view.html +def pair(t): + return t if isinstance(t, tuple) else (t, t) + +def uv2xyz(uv): + xyz = np.zeros((*uv.shape[:-1], 3), dtype = np.float32) + xyz[..., 0] = np.multiply(np.cos(uv[..., 1]), np.sin(uv[..., 0])) + xyz[..., 1] = np.multiply(np.cos(uv[..., 1]), np.cos(uv[..., 0])) + xyz[..., 2] = np.sin(uv[..., 1]) + return xyz + +def equi2pers(erp_img, fov, nrows, patch_size): + bs, _, erp_h, erp_w = erp_img.shape + height, width = pair(patch_size) + fov_h, fov_w = pair(fov) + FOV = torch.tensor([fov_w/360.0, fov_h/180.0], dtype=torch.float32) + + PI = math.pi + PI_2 = math.pi * 0.5 + PI2 = math.pi * 2 + yy, xx = torch.meshgrid(torch.linspace(0, 1, height), torch.linspace(0, 1, width)) + screen_points = torch.stack([xx.flatten(), yy.flatten()], -1) + + if nrows==4: + num_rows = 4 + num_cols = [3, 6, 6, 3] + phi_centers = [-67.5, -22.5, 22.5, 67.5] + if nrows==6: + num_rows = 6 + num_cols = [3, 8, 12, 12, 8, 3] + phi_centers = [-75.2, -45.93, -15.72, 15.72, 45.93, 75.2] + if nrows==3: + num_rows = 3 + num_cols = [3, 4, 3] + phi_centers = [-60, 0, 60] + if nrows==5: + num_rows = 5 + num_cols = [3, 6, 8, 6, 3] + phi_centers = [-72.2, -36.1, 0, 36.1, 72.2] + + phi_interval = 180 // num_rows + all_combos = [] + erp_mask = [] + for i, n_cols in enumerate(num_cols): + for j in np.arange(n_cols): + theta_interval = 360 / n_cols + theta_center = j * theta_interval + theta_interval / 2 + + center = [theta_center, phi_centers[i]] + all_combos.append(center) + up = phi_centers[i] + phi_interval / 2 + down = phi_centers[i] - phi_interval / 2 + left = theta_center - theta_interval / 2 + right = theta_center + theta_interval / 2 + up = int((up + 90) / 180 * erp_h) + down = int((down + 90) / 180 * erp_h) + left = int(left / 360 * erp_w) + right = int(right / 360 * erp_w) + mask = np.zeros((erp_h, erp_w), dtype=int) + mask[down:up, left:right] = 1 + erp_mask.append(mask) + all_combos = np.vstack(all_combos) + shifts = np.arange(all_combos.shape[0]) * width + shifts = torch.from_numpy(shifts).float() + erp_mask = np.stack(erp_mask) + erp_mask = torch.from_numpy(erp_mask).float() + num_patch = all_combos.shape[0] + + center_point = torch.from_numpy(all_combos).float() # -180 to 180, -90 to 90 + center_point[:, 0] = (center_point[:, 0]) / 360 #0 to 1 + center_point[:, 1] = (center_point[:, 1] + 90) / 180 #0 to 1 + + cp = center_point * 2 - 1 + center_p = cp.clone() + cp[:, 0] = cp[:, 0] * PI + cp[:, 1] = cp[:, 1] * PI_2 + cp = cp.unsqueeze(1) + convertedCoord = screen_points * 2 - 1 + convertedCoord[:, 0] = convertedCoord[:, 0] * PI + convertedCoord[:, 1] = convertedCoord[:, 1] * PI_2 + convertedCoord = convertedCoord * (torch.ones(screen_points.shape, dtype=torch.float32) * FOV) + convertedCoord = convertedCoord.unsqueeze(0).repeat(cp.shape[0], 1, 1) + + x = convertedCoord[:, :, 0] + y = convertedCoord[:, :, 1] + + rou = torch.sqrt(x ** 2 + y ** 2) + c = torch.atan(rou) + sin_c = torch.sin(c) + cos_c = torch.cos(c) + lat = torch.asin(cos_c * torch.sin(cp[:, :, 1]) + (y * sin_c * torch.cos(cp[:, :, 1])) / rou) + lon = cp[:, :, 0] + torch.atan2(x * sin_c, rou * torch.cos(cp[:, :, 1]) * cos_c - y * torch.sin(cp[:, :, 1]) * sin_c) + lat_new = lat / PI_2 + lon_new = lon / PI + lon_new[lon_new > 1] -= 2 + lon_new[lon_new<-1] += 2 + + lon_new = lon_new.view(1, num_patch, height, width).permute(0, 2, 1, 3).contiguous().view(height, num_patch*width) + lat_new = lat_new.view(1, num_patch, height, width).permute(0, 2, 1, 3).contiguous().view(height, num_patch*width) + grid = torch.stack([lon_new, lat_new], -1) + grid = grid.unsqueeze(0).repeat(bs, 1, 1, 1).to(erp_img.device) + pers = F.grid_sample(erp_img, grid, mode='bilinear', padding_mode='border', align_corners=True) + pers = F.unfold(pers, kernel_size=(height, width), stride=(height, width)) + pers = pers.reshape(bs, -1, height, width, num_patch) + + grid_tmp = torch.stack([lon, lat], -1) + xyz = uv2xyz(grid_tmp) + xyz = xyz.reshape(num_patch, height, width, 3).transpose(0, 3, 1, 2) + xyz = torch.from_numpy(xyz).to(pers.device).contiguous() + + uv = grid[0, ...].reshape(height, width, num_patch, 2).permute(2, 3, 0, 1) + uv = uv.contiguous() + return pers, xyz, uv, center_p + +def pers2equi(pers_img, fov, nrows, patch_size, erp_size, layer_name): + bs = pers_img.shape[0] + channel = pers_img.shape[1] + device=pers_img.device + height, width = pair(patch_size) + fov_h, fov_w = pair(fov) + erp_h, erp_w = pair(erp_size) + n_patch = pers_img.shape[-1] + grid_dir = './grid' + if not exists(grid_dir): + makedirs(grid_dir) + grid_file = join(grid_dir, layer_name + '.pth') + + if not exists(grid_file): + FOV = torch.tensor([fov_w/360.0, fov_h/180.0], dtype=torch.float32) + + PI = math.pi + PI_2 = math.pi * 0.5 + PI2 = math.pi * 2 + + if nrows==4: + num_rows = 4 + num_cols = [3, 6, 6, 3] + phi_centers = [-67.5, -22.5, 22.5, 67.5] + if nrows==6: + num_rows = 6 + num_cols = [3, 8, 12, 12, 8, 3] + phi_centers = [-75.2, -45.93, -15.72, 15.72, 45.93, 75.2] + if nrows==3: + num_rows = 3 + num_cols = [3, 4, 3] + phi_centers = [-59.6, 0, 59.6] + if nrows==5: + num_rows = 5 + num_cols = [3, 6, 8, 6, 3] + phi_centers = [-72.2, -36.1, 0, 36.1, 72.2] + phi_interval = 180 // num_rows + all_combos = [] + + for i, n_cols in enumerate(num_cols): + for j in np.arange(n_cols): + theta_interval = 360 / n_cols + theta_center = j * theta_interval + theta_interval / 2 + + center = [theta_center, phi_centers[i]] + all_combos.append(center) + + + all_combos = np.vstack(all_combos) + n_patch = all_combos.shape[0] + + center_point = torch.from_numpy(all_combos).float() # -180 to 180, -90 to 90 + center_point[:, 0] = (center_point[:, 0]) / 360 #0 to 1 + center_point[:, 1] = (center_point[:, 1] + 90) / 180 #0 to 1 + + cp = center_point * 2 - 1 + cp[:, 0] = cp[:, 0] * PI + cp[:, 1] = cp[:, 1] * PI_2 + cp = cp.unsqueeze(1) + + lat_grid, lon_grid = torch.meshgrid(torch.linspace(-PI_2, PI_2, erp_h), torch.linspace(-PI, PI, erp_w)) + lon_grid = lon_grid.float().reshape(1, -1)#.repeat(num_rows*num_cols, 1) + lat_grid = lat_grid.float().reshape(1, -1)#.repeat(num_rows*num_cols, 1) + cos_c = torch.sin(cp[..., 1]) * torch.sin(lat_grid) + torch.cos(cp[..., 1]) * torch.cos(lat_grid) * torch.cos(lon_grid - cp[..., 0]) + new_x = (torch.cos(lat_grid) * torch.sin(lon_grid - cp[..., 0])) / cos_c + new_y = (torch.cos(cp[..., 1])*torch.sin(lat_grid) - torch.sin(cp[...,1])*torch.cos(lat_grid)*torch.cos(lon_grid-cp[...,0])) / cos_c + new_x = new_x / FOV[0] / PI # -1 to 1 + new_y = new_y / FOV[1] / PI_2 + cos_c_mask = cos_c.reshape(n_patch, erp_h, erp_w) + cos_c_mask = torch.where(cos_c_mask > 0, 1, 0) + + w_list = torch.zeros((n_patch, erp_h, erp_w, 4), dtype=torch.float32) + + new_x_patch = (new_x + 1) * 0.5 * height + new_y_patch = (new_y + 1) * 0.5 * width + new_x_patch = new_x_patch.reshape(n_patch, erp_h, erp_w) + new_y_patch = new_y_patch.reshape(n_patch, erp_h, erp_w) + mask = torch.where((new_x_patch < width) & (new_x_patch > 0) & (new_y_patch < height) & (new_y_patch > 0), 1, 0) + mask *= cos_c_mask + + x0 = torch.floor(new_x_patch).type(torch.int64) + x1 = x0 + 1 + y0 = torch.floor(new_y_patch).type(torch.int64) + y1 = y0 + 1 + + x0 = torch.clamp(x0, 0, width-1) + x1 = torch.clamp(x1, 0, width-1) + y0 = torch.clamp(y0, 0, height-1) + y1 = torch.clamp(y1, 0, height-1) + + wa = (x1.type(torch.float32)-new_x_patch) * (y1.type(torch.float32)-new_y_patch) + wb = (x1.type(torch.float32)-new_x_patch) * (new_y_patch-y0.type(torch.float32)) + wc = (new_x_patch-x0.type(torch.float32)) * (y1.type(torch.float32)-new_y_patch) + wd = (new_x_patch-x0.type(torch.float32)) * (new_y_patch-y0.type(torch.float32)) + + wa = wa * mask.expand_as(wa) + wb = wb * mask.expand_as(wb) + wc = wc * mask.expand_as(wc) + wd = wd * mask.expand_as(wd) + + w_list[..., 0] = wa + w_list[..., 1] = wb + w_list[..., 2] = wc + w_list[..., 3] = wd + + + save_file = {'x0':x0, 'y0':y0, 'x1':x1, 'y1':y1, 'w_list': w_list, 'mask':mask} + torch.save(save_file, grid_file) + else: + # the online merge really takes time + # pre-calculate the grid for once and use it during training + load_file = torch.load(grid_file) + #print('load_file') + x0 = load_file['x0'] + y0 = load_file['y0'] + x1 = load_file['x1'] + y1 = load_file['y1'] + w_list = load_file['w_list'] + mask = load_file['mask'] + + w_list = w_list.to(device) + mask = mask.to(device) + z = torch.arange(n_patch) + z = z.reshape(n_patch, 1, 1) + Ia = pers_img[:, :, y0, x0, z] + Ib = pers_img[:, :, y1, x0, z] + Ic = pers_img[:, :, y0, x1, z] + Id = pers_img[:, :, y1, x1, z] + output_a = Ia * mask.expand_as(Ia) + output_b = Ib * mask.expand_as(Ib) + output_c = Ic * mask.expand_as(Ic) + output_d = Id * mask.expand_as(Id) + + output_a = output_a.permute(0, 1, 3, 4, 2) + output_b = output_b.permute(0, 1, 3, 4, 2) + output_c = output_c.permute(0, 1, 3, 4, 2) + output_d = output_d.permute(0, 1, 3, 4, 2) + w_list = w_list.permute(1, 2, 0, 3) + w_list = w_list.flatten(2) + w_list *= torch.gt(w_list, 1e-5).type(torch.float32) + w_list = F.normalize(w_list, p=1, dim=-1).reshape(erp_h, erp_w, n_patch, 4) + w_list = w_list.unsqueeze(0).unsqueeze(0) + output = output_a * w_list[..., 0] + output_b * w_list[..., 1] + \ + output_c * w_list[..., 2] + output_d * w_list[..., 3] + img_erp = output.sum(-1) + + return img_erp + +def img2windows(img, H_sp, W_sp): + """ + img: B C H W + """ + B, C, H, W = img.shape + img_reshape = img.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp) + img_perm = img_reshape.permute(0, 2, 4, 3, 5, 1).contiguous().reshape(-1, H_sp, W_sp, C) + return img_perm + +def windows2img(img_splits_hw, H_sp, W_sp, H, W): + """ + img_splits_hw: B' H W C + """ + B = int(img_splits_hw.shape[0] / (H * W / H_sp / W_sp)) + + img = img_splits_hw.view(B, H // H_sp, W // W_sp, H_sp, W_sp, -1) + img = img.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return img \ No newline at end of file diff --git a/networks/utils.py b/networks/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..88e2c7872391c0b13128cfb3eb78543a2a6c87b2 --- /dev/null +++ b/networks/utils.py @@ -0,0 +1,197 @@ +import math +import copy +import random +import torch +import numpy as np +import torch.nn as nn +import torch.nn.functional as F + +from safetensors.torch import save_file +from safetensors import safe_open +from torch.nn.parameter import Parameter + +from depth_anything_v2_metric.depth_anything_v2.dpt import DepthAnythingV2 + +class _LoRA_qkv(nn.Module): + """In Sam it is implemented as + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + """ + + def __init__( + self, + qkv: nn.Module, + linear_a_q: nn.Module, + linear_b_q: nn.Module, + linear_a_v: nn.Module, + linear_b_v: nn.Module, + ): + super().__init__() + self.qkv = qkv + self.linear_a_q = linear_a_q + self.linear_b_q = linear_b_q + self.linear_a_v = linear_a_v + self.linear_b_v = linear_b_v + self.dim = qkv.in_features + self.w_identity = torch.eye(qkv.in_features) + + def forward(self, x): + qkv = self.qkv(x) # B,N,3*org_C + new_q = self.linear_b_q(self.linear_a_q(x)) + new_v = self.linear_b_v(self.linear_a_v(x)) + + qkv[:, :, : self.dim] += new_q + qkv[:, :, -self.dim:] += new_v + return qkv + +class LoRA(nn.Module): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + def save_fc_parameters(self, filename: str) -> None: + r"""Only safetensors is supported now. + + pip install safetensor if you do not have one installed yet. + """ + assert filename.endswith(".safetensors") + _in = self.lora_vit.head.in_features + _out = self.lora_vit.head.out_features + fc_tensors = {f"fc_{_in}in_{_out}out": self.lora_vit.head.weight} + save_file(fc_tensors, filename) + + def load_fc_parameters(self, filename: str) -> None: + r"""Only safetensors is supported now. + + pip install safetensor if you do not have one installed yet. + """ + + assert filename.endswith(".safetensors") + _in = self.lora_vit.head.in_features + _out = self.lora_vit.head.out_features + with safe_open(filename, framework="pt") as f: + saved_key = f"fc_{_in}in_{_out}out" + try: + saved_tensor = f.get_tensor(saved_key) + self.lora_vit.head.weight = Parameter(saved_tensor) + except ValueError: + print("this fc weight is not for this model") + + def save_lora_parameters(self, filename: str) -> None: + r"""Only safetensors is supported now. + + pip install safetensor if you do not have one installed yet. + + save both lora and fc parameters. + """ + + assert filename.endswith(".safetensors") + + num_layer = len(self.w_As) # actually, it is half + a_tensors = {f"w_a_{i:03d}": self.w_As[i].weight for i in range(num_layer)} + b_tensors = {f"w_b_{i:03d}": self.w_Bs[i].weight for i in range(num_layer)} + + _in = self.lora_vit.head.in_features + _out = self.lora_vit.head.out_features + fc_tensors = {f"fc_{_in}in_{_out}out": self.lora_vit.head.weight} + + merged_dict = {**a_tensors, **b_tensors, **fc_tensors} + save_file(merged_dict, filename) + + def load_lora_parameters(self, filename: str) -> None: + r"""Only safetensors is supported now. + + pip install safetensor if you do not have one installed yet.\ + + load both lora and fc parameters. + """ + + assert filename.endswith(".safetensors") + + with safe_open(filename, framework="pt") as f: + for i, w_A_linear in enumerate(self.w_As): + saved_key = f"w_a_{i:03d}" + saved_tensor = f.get_tensor(saved_key) + w_A_linear.weight = Parameter(saved_tensor) + + for i, w_B_linear in enumerate(self.w_Bs): + saved_key = f"w_b_{i:03d}" + saved_tensor = f.get_tensor(saved_key) + w_B_linear.weight = Parameter(saved_tensor) + + _in = self.lora_vit.head.in_features + _out = self.lora_vit.head.out_features + saved_key = f"fc_{_in}in_{_out}out" + try: + saved_tensor = f.get_tensor(saved_key) + self.lora_vit.head.weight = Parameter(saved_tensor) + except ValueError: + print("this fc weight is not for this model") + + def reset_parameters(self) -> None: + for w_A in self.w_As: + nn.init.kaiming_uniform_(w_A.weight, a=math.sqrt(5)) + for w_B in self.w_Bs: + nn.init.zeros_(w_B.weight) + +class LoRA_Depth_Anything_v2(LoRA): + """Applies low-rank adaptation to a Depth Anything model's image encoder. + + Args: + sam_model: a vision transformer model, see base_vit.py + r: rank of LoRA + num_classes: how many classes the model output, default to the vit model + lora_layer: which layer we apply LoRA. + + Examples:: + >>> model = ViT('B_16_imagenet1k') + >>> lora_model = LoRA_ViT(model, r=4) + >>> preds = lora_model(img) + >>> print(preds.shape) + torch.Size([1, 1000]) + """ + + def __init__(self, da_model: DepthAnythingV2, r: int, lora_layer=None): + super(LoRA_Depth_Anything_v2, self).__init__() + + assert r > 0 + # base_vit_dim = sam_model.image_encoder.patch_embed.proj.out_channels + # dim = base_vit_dim + if lora_layer: + self.lora_layer = lora_layer + else: + self.lora_layer = list(range(len(da_model.pretrained.blocks))) + # create for storage, then we can init them or load weights + self.w_As = [] # These are linear layers + self.w_Bs = [] + + # lets freeze first + for param in da_model.pretrained.parameters(): + param.requires_grad = False + + # Here, we do the surgery + for t_layer_i, blk in enumerate(da_model.pretrained.blocks): + # If we only want few lora layer instead of all + if t_layer_i not in self.lora_layer: + continue + w_qkv_linear = blk.attn.qkv + self.dim = w_qkv_linear.in_features + w_a_linear_q = nn.Linear(self.dim, r, bias=False) + w_b_linear_q = nn.Linear(r, self.dim, bias=False) + w_a_linear_v = nn.Linear(self.dim, r, bias=False) + w_b_linear_v = nn.Linear(r, self.dim, bias=False) + self.w_As.append(w_a_linear_q) + self.w_Bs.append(w_b_linear_q) + self.w_As.append(w_a_linear_v) + self.w_Bs.append(w_b_linear_v) + blk.attn.qkv = _LoRA_qkv( + w_qkv_linear, + w_a_linear_q, + w_b_linear_q, + w_a_linear_v, + w_b_linear_v, + ) + self.reset_parameters() + + self.lora_vit = da_model \ No newline at end of file