diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..009d8bc31347663eec153806f3c47e38c9c80f6f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +*.whl filter=lfs diff=lfs merge=lfs -text +# submodules/dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth filter=lfs diff=lfs merge=lfs -text +# wheel/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fdf34e3e42c566f51525e46bba8ff90700389b63 --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +/.idea/ +/work_dirs* +.vscode/ +/tmp +/data +# /checkpoints +*.so +*.patch +__pycache__/ +*.egg-info/ +/viz* +/submit* +build/ +*.pyd +/cache* +*.stl +# *.pth +/venv/ +.nk8s +*.mp4 +.vs +/exp/ +/dev/ diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..0b919995945f788ed53ce4c70f850c93325a0a5c --- /dev/null +++ b/app.py @@ -0,0 +1,154 @@ +# build upon InstantSplat https://huggingface.co/spaces/kairunwen/InstantSplat/blob/main/app.py +import os, subprocess, shlex, sys, gc +import numpy as np +import shutil +import argparse +import gradio as gr +import uuid +import glob +import re + +import spaces + +subprocess.run(shlex.split("pip install wheel/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl")) +subprocess.run(shlex.split("pip install wheel/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl")) +# subprocess.run(shlex.split("pip install wheel/curope-0.0.0-cp310-cp310-linux_x86_64.whl")) + +GRADIO_CACHE_FOLDER = './gradio_cache_folder' + + +def get_dust3r_args_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("--image_size", type=int, default=512, choices=[512, 224], help="image size") + parser.add_argument("--model_path", type=str, default="submodules/dust3r/checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth", help="path to the model weights") + parser.add_argument("--device", type=str, default='cuda', help="pytorch device") + parser.add_argument("--batch_size", type=int, default=1) + parser.add_argument("--schedule", type=str, default='linear') + parser.add_argument("--lr", type=float, default=0.01) + parser.add_argument("--niter", type=int, default=300) + parser.add_argument("--focal_avg", type=bool, default=True) + parser.add_argument("--n_views", type=int, default=3) + parser.add_argument("--base_path", type=str, default=GRADIO_CACHE_FOLDER) + return parser + + +def natural_sort(l): + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key.split('/')[-1])] + return sorted(l, key=alphanum_key) + +def cmd(command): + print(command) + os.system(command) + +@spaces.GPU(duration=150) +def process(inputfiles, input_path='demo'): + if inputfiles: + frames = natural_sort(inputfiles) + else: + frames = natural_sort(glob.glob('./assets/example/' + input_path + '/*')) + if len(frames) > 20: + stride = int(np.ceil(len(frames) / 20)) + frames = frames[::stride] + + # Create a temporary directory to store the selected frames + temp_dir = os.path.join(GRADIO_CACHE_FOLDER, str(uuid.uuid4())) + os.makedirs(temp_dir, exist_ok=True) + + # Copy the selected frames to the temporary directory + for i, frame in enumerate(frames): + shutil.copy(frame, f"{temp_dir}/{i:04d}.{frame.split('.')[-1]}") + + imgs_path = temp_dir + output_path = f'./results/{input_path}/output' + cmd(f"python dynamic_predictor/launch.py --mode=eval_pose_custom \ + --pretrained=Kai422kx/das3r \ + --dir_path={imgs_path} \ + --output_dir={output_path} \ + --use_pred_mask ") + + cmd(f"python utils/rearrange.py --output_dir={output_path}") + output_path = f'{output_path}_rearranged' + + print(output_path) + cmd(f"python train_gui.py -s {output_path} -m {output_path} --iter 2000") + cmd(f"python render.py -s {output_path} -m {output_path} --iter 2000 --get_video") + + output_video_path = f"{output_path}/rendered.mp4" + output_ply_path = f"{output_path}/point_cloud/iteration_2000/point_cloud.ply" + return output_video_path, output_ply_path, output_ply_path + + + +_TITLE = '''DAS3R''' +_DESCRIPTION = ''' +
+
+ DAS3R: Dynamics-Aware Gaussian Splatting for Static Scene Reconstruction +
+
+

+ + +
+ arxiv +   +   +
+

+ + +* Official demo of [DAS3R: Dynamics-Aware Gaussian Splatting for Static Scene Reconstruction](https://kai422.github.io/DAS3R/). +* You can explore the sample results by clicking the sequence names at the bottom of the page. +* Due to GPU memory and time constraints, the total processing frame number is constrained at 20 and the iterations for GS training is constrained at 2000. We apply uniform sampling when the total number of input frames exceeds 20. +* This Gradio demo is built upon InstantSplat, which can be found at [https://huggingface.co/spaces/kairunwen/InstantSplat](https://huggingface.co/spaces/kairunwen/InstantSplat). + +''' + +block = gr.Blocks().queue() +with block: + with gr.Row(): + with gr.Column(scale=1): + # gr.Markdown('# ' + _TITLE) + gr.Markdown(_DESCRIPTION) + + with gr.Row(variant='panel'): + with gr.Tab("Input"): + inputfiles = gr.File(file_count="multiple", label="images") + input_path = gr.Textbox(visible=False, label="example_path") + button_gen = gr.Button("RUN") + + with gr.Row(variant='panel'): + with gr.Tab("Output"): + with gr.Column(scale=2): + with gr.Group(): + output_model = gr.Model3D( + label="3D Dense Model under Gaussian Splats Formats, need more time to visualize", + interactive=False, + camera_position=[0.5, 0.5, 1], # 稍微偏移一点,以便更好地查看模型 + ) + gr.Markdown( + """ +
+   Use the left mouse button to rotate, the scroll wheel to zoom, and the right mouse button to move. +
+ """ + ) + output_file = gr.File(label="ply") + with gr.Column(scale=1): + output_video = gr.Video(label="video") + + button_gen.click(process, inputs=[inputfiles], outputs=[output_video, output_file, output_model]) + + gr.Examples( + examples=[ + "davis-dog", + # "sintel-market_2", + ], + inputs=[input_path], + outputs=[output_video, output_file, output_model], + fn=lambda x: process(inputfiles=None, input_path=x), + cache_examples=True, + label='Sparse-view Examples' + ) +block.launch(server_name="0.0.0.0", share=False) \ No newline at end of file diff --git a/app_wrapper.py b/app_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..be1af7756d476cbd12c07cf0c901bb1efaab1dd7 --- /dev/null +++ b/app_wrapper.py @@ -0,0 +1,19 @@ +import gradio as gr +import os +# import spaces + +hf_token = os.getenv("instantsplat_token") + +# gr.load("kairunwen/tmp", hf_token=token, src="spaces").launch() + + + +import shlex +import subprocess + +from huggingface_hub import HfApi + +api = HfApi() +api.snapshot_download(repo_id="kairunwen/tmp", repo_type="space", local_dir=".", token=hf_token) +subprocess.run(shlex.split("pip install -r requirements.txt")) +subprocess.run(shlex.split("python app.py")) diff --git a/arguments/__init__.py b/arguments/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..9ee5acb1717345919959219c9fac89a31b5c8591 --- /dev/null +++ b/arguments/__init__.py @@ -0,0 +1,112 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +from argparse import ArgumentParser, Namespace +import sys +import os + +class GroupParams: + pass + +class ParamGroup: + def __init__(self, parser: ArgumentParser, name : str, fill_none = False): + group = parser.add_argument_group(name) + for key, value in vars(self).items(): + shorthand = False + if key.startswith("_"): + shorthand = True + key = key[1:] + t = type(value) + value = value if not fill_none else None + if shorthand: + if t == bool: + group.add_argument("--" + key, ("-" + key[0:1]), default=value, action="store_true") + else: + group.add_argument("--" + key, ("-" + key[0:1]), default=value, type=t) + else: + if t == bool: + group.add_argument("--" + key, default=value, action="store_true") + else: + group.add_argument("--" + key, default=value, type=t) + + def extract(self, args): + group = GroupParams() + for arg in vars(args).items(): + if arg[0] in vars(self) or ("_" + arg[0]) in vars(self): + setattr(group, arg[0], arg[1]) + return group + +class ModelParams(ParamGroup): + def __init__(self, parser, sentinel=False): + self.sh_degree = 3 + self._source_path = "" + self._model_path = "" + self._images = "images" + self._resolution = -1 + self._white_background = False + self.data_device = "cuda" + self.eval = False + super().__init__(parser, "Loading Parameters", sentinel) + + def extract(self, args): + g = super().extract(args) + g.source_path = os.path.abspath(g.source_path) + return g + +class PipelineParams(ParamGroup): + def __init__(self, parser): + self.convert_SHs_python = False + self.compute_cov3D_python = False + self.debug = False + super().__init__(parser, "Pipeline Parameters") + +class OptimizationParams(ParamGroup): + def __init__(self, parser): + self.iterations = 30_000 + self.position_lr_init = 0.00016 + self.position_lr_final = 0.0000016 + self.position_lr_delay_mult = 0.01 + self.position_lr_max_steps = 30_000 + self.feature_lr = 0.0025 + self.opacity_lr = 0.05 + self.scaling_lr = 0.005 + self.rotation_lr = 0.001 + self.percent_dense = 0.01 + self.lambda_dssim = 0.2 + self.densification_interval = 100 + self.opacity_reset_interval = 3000 + self.densify_from_iter = 500 + self.densify_until_iter = 15_000 + self.densify_grad_threshold = 0.0002 + self.random_background = False + super().__init__(parser, "Optimization Parameters") + +def get_combined_args(parser : ArgumentParser): + cmdlne_string = sys.argv[1:] + cfgfile_string = "Namespace()" + args_cmdline = parser.parse_args(cmdlne_string) + + try: + cfgfilepath = os.path.join(args_cmdline.model_path, "cfg_args") + print("Looking for config file in", cfgfilepath) + with open(cfgfilepath) as cfg_file: + print("Config file found: {}".format(cfgfilepath)) + cfgfile_string = cfg_file.read() + except TypeError: + print("Config file not found at") + pass + args_cfgfile = eval(cfgfile_string) + + merged_dict = vars(args_cfgfile).copy() + for k,v in vars(args_cmdline).items(): + if v != None: + merged_dict[k] = v + return Namespace(**merged_dict) diff --git a/assets/example/davis-dog/00000.jpg b/assets/example/davis-dog/00000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a80ffa8856d21e0e0ce248aaa28f1d0c0b34e552 Binary files /dev/null and b/assets/example/davis-dog/00000.jpg differ diff --git a/assets/example/davis-dog/00001.jpg b/assets/example/davis-dog/00001.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5043f31ee12e6d787058445ef84e610ac1c497f Binary files /dev/null and b/assets/example/davis-dog/00001.jpg differ diff --git a/assets/example/davis-dog/00002.jpg b/assets/example/davis-dog/00002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d903ae0d308b92af6fbfae6e3bd4a60eb59094d2 Binary files /dev/null and b/assets/example/davis-dog/00002.jpg differ diff --git a/assets/example/davis-dog/00003.jpg b/assets/example/davis-dog/00003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a03a653b661b3642cae3252d5cff8ce34d62424 Binary files /dev/null and b/assets/example/davis-dog/00003.jpg differ diff --git a/assets/example/davis-dog/00004.jpg b/assets/example/davis-dog/00004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e070d2fd3521bb9faefbb3224b009a69924c0c7f Binary files /dev/null and b/assets/example/davis-dog/00004.jpg differ diff --git a/assets/example/davis-dog/00005.jpg b/assets/example/davis-dog/00005.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a3c068a167483ff572a9dfe594bec572f0716b9 Binary files /dev/null and b/assets/example/davis-dog/00005.jpg differ diff --git a/assets/example/davis-dog/00006.jpg b/assets/example/davis-dog/00006.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45d4cee6499a17d42e2cb802a072f7dcf56484d3 Binary files /dev/null and b/assets/example/davis-dog/00006.jpg differ diff --git a/assets/example/davis-dog/00007.jpg b/assets/example/davis-dog/00007.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c55dcc000b853d186a6ee8248b0a209948b53b2 Binary files /dev/null and b/assets/example/davis-dog/00007.jpg differ diff --git a/assets/example/davis-dog/00008.jpg b/assets/example/davis-dog/00008.jpg new file mode 100644 index 0000000000000000000000000000000000000000..426b5b7e7ad81995ba4c2d6de0f8929047cafc38 Binary files /dev/null and b/assets/example/davis-dog/00008.jpg differ diff --git a/assets/example/davis-dog/00009.jpg b/assets/example/davis-dog/00009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d104333700949e4158ca6214124b66a5294a3e3 Binary files /dev/null and b/assets/example/davis-dog/00009.jpg differ diff --git a/assets/example/davis-dog/00010.jpg b/assets/example/davis-dog/00010.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28c61aeb441a87a6108037645f810aed1a8c3b7f Binary files /dev/null and b/assets/example/davis-dog/00010.jpg differ diff --git a/assets/example/davis-dog/00011.jpg b/assets/example/davis-dog/00011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1d9c6d9b4219c86576f685534108943b85e6c72 Binary files /dev/null and b/assets/example/davis-dog/00011.jpg differ diff --git a/assets/example/davis-dog/00012.jpg b/assets/example/davis-dog/00012.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53a8905ae8238221eee9de488da84f85ae5e1149 Binary files /dev/null and b/assets/example/davis-dog/00012.jpg differ diff --git a/assets/example/davis-dog/00013.jpg b/assets/example/davis-dog/00013.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69a7808771ad9852a4fd7c917d8ead20252e6aa7 Binary files /dev/null and b/assets/example/davis-dog/00013.jpg differ diff --git a/assets/example/davis-dog/00014.jpg b/assets/example/davis-dog/00014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..faa0edeb2c7d8a48875679d94d8be54ff7cdf600 Binary files /dev/null and b/assets/example/davis-dog/00014.jpg differ diff --git a/assets/example/davis-dog/00015.jpg b/assets/example/davis-dog/00015.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a01fcfed9eaa6896fa256cbd8e0f9b7a130a3ab Binary files /dev/null and b/assets/example/davis-dog/00015.jpg differ diff --git a/assets/example/davis-dog/00016.jpg b/assets/example/davis-dog/00016.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95286db9843bd396b624fe03b5001d6a081fd4f4 Binary files /dev/null and b/assets/example/davis-dog/00016.jpg differ diff --git a/assets/example/davis-dog/00017.jpg b/assets/example/davis-dog/00017.jpg new file mode 100644 index 0000000000000000000000000000000000000000..370e3c646003af019aecbd9c0fdb1409e573f6cf Binary files /dev/null and b/assets/example/davis-dog/00017.jpg differ diff --git a/assets/example/davis-dog/00018.jpg b/assets/example/davis-dog/00018.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc590cbba10f2351aad3ca13b749e632e258197e Binary files /dev/null and b/assets/example/davis-dog/00018.jpg differ diff --git a/assets/example/davis-dog/00019.jpg b/assets/example/davis-dog/00019.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8eddf5cbfda3c67793d700255200a7bcc1f06df Binary files /dev/null and b/assets/example/davis-dog/00019.jpg differ diff --git a/assets/example/davis-dog/00020.jpg b/assets/example/davis-dog/00020.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbc9a27dca64d96309a17b035e5ab1a9c02547b4 Binary files /dev/null and b/assets/example/davis-dog/00020.jpg differ diff --git a/assets/example/davis-dog/00021.jpg b/assets/example/davis-dog/00021.jpg new file mode 100644 index 0000000000000000000000000000000000000000..016945e123b82ae86187e1494dfa31e420ba90ec Binary files /dev/null and b/assets/example/davis-dog/00021.jpg differ diff --git a/assets/example/davis-dog/00022.jpg b/assets/example/davis-dog/00022.jpg new file mode 100644 index 0000000000000000000000000000000000000000..def7fe8991fbcad609ba6b2efdd2aff5c40faa5c Binary files /dev/null and b/assets/example/davis-dog/00022.jpg differ diff --git a/assets/example/davis-dog/00023.jpg b/assets/example/davis-dog/00023.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d2b892b347978cd66e231d0563a963858254b4b Binary files /dev/null and b/assets/example/davis-dog/00023.jpg differ diff --git a/assets/example/davis-dog/00024.jpg b/assets/example/davis-dog/00024.jpg new file mode 100644 index 0000000000000000000000000000000000000000..890d85970686785162e4d34080ef1c9bb2fb2a9b Binary files /dev/null and b/assets/example/davis-dog/00024.jpg differ diff --git a/assets/example/davis-dog/00025.jpg b/assets/example/davis-dog/00025.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5f584e194a871a655b1c4f186cce3c9167d30caa Binary files /dev/null and b/assets/example/davis-dog/00025.jpg differ diff --git a/assets/example/davis-dog/00026.jpg b/assets/example/davis-dog/00026.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d7767c35ed165fab73a6a0d3a94651f3135ef52 Binary files /dev/null and b/assets/example/davis-dog/00026.jpg differ diff --git a/assets/example/davis-dog/00027.jpg b/assets/example/davis-dog/00027.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f379d96ad082123aa91298e352488419d7d733b2 Binary files /dev/null and b/assets/example/davis-dog/00027.jpg differ diff --git a/assets/example/davis-dog/00028.jpg b/assets/example/davis-dog/00028.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cbab43498888a3e7ec94dd47834ce667a0cf97b Binary files /dev/null and b/assets/example/davis-dog/00028.jpg differ diff --git a/assets/example/davis-dog/00029.jpg b/assets/example/davis-dog/00029.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b76893a6827834da544fd0e828c8a4dc7231e723 Binary files /dev/null and b/assets/example/davis-dog/00029.jpg differ diff --git a/assets/example/davis-dog/00030.jpg b/assets/example/davis-dog/00030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e15962042fc33ac86dc98876990ef9e0f9d7d969 Binary files /dev/null and b/assets/example/davis-dog/00030.jpg differ diff --git a/assets/example/davis-dog/00031.jpg b/assets/example/davis-dog/00031.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e193d3944dd1450bee86661db921d1442b853ca9 Binary files /dev/null and b/assets/example/davis-dog/00031.jpg differ diff --git a/assets/example/davis-dog/00032.jpg b/assets/example/davis-dog/00032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f796529e61eb479da9d93e0402394a890024556 Binary files /dev/null and b/assets/example/davis-dog/00032.jpg differ diff --git a/assets/example/davis-dog/00033.jpg b/assets/example/davis-dog/00033.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cab6d2f908ef604db3196fd4448857eae8e9688 Binary files /dev/null and b/assets/example/davis-dog/00033.jpg differ diff --git a/assets/example/davis-dog/00034.jpg b/assets/example/davis-dog/00034.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61400eed0aea2429a7e4c60776d650836973c7a6 Binary files /dev/null and b/assets/example/davis-dog/00034.jpg differ diff --git a/assets/example/davis-dog/00035.jpg b/assets/example/davis-dog/00035.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d41e97096523f8cc7602cf2386a162773082b32e Binary files /dev/null and b/assets/example/davis-dog/00035.jpg differ diff --git a/assets/example/davis-dog/00036.jpg b/assets/example/davis-dog/00036.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9345f5dda0debc8405e30326c987451964fab3f Binary files /dev/null and b/assets/example/davis-dog/00036.jpg differ diff --git a/assets/example/davis-dog/00037.jpg b/assets/example/davis-dog/00037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..995c87a63915a1ca50143e07460faa29afe6f5ff Binary files /dev/null and b/assets/example/davis-dog/00037.jpg differ diff --git a/assets/example/davis-dog/00038.jpg b/assets/example/davis-dog/00038.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7701e865db962a9ec3ac4c2d60f536d88f15eea2 Binary files /dev/null and b/assets/example/davis-dog/00038.jpg differ diff --git a/assets/example/davis-dog/00039.jpg b/assets/example/davis-dog/00039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb2e8d88fefa5a844b69e0591c8f6f9d61967eaf Binary files /dev/null and b/assets/example/davis-dog/00039.jpg differ diff --git a/assets/example/davis-dog/00040.jpg b/assets/example/davis-dog/00040.jpg new file mode 100644 index 0000000000000000000000000000000000000000..887dda87b9613c14a1ccd47cfe7272baa44452b7 Binary files /dev/null and b/assets/example/davis-dog/00040.jpg differ diff --git a/assets/example/davis-dog/00041.jpg b/assets/example/davis-dog/00041.jpg new file mode 100644 index 0000000000000000000000000000000000000000..977cfe7ac19254429dac1748f6441eca5eb6266d Binary files /dev/null and b/assets/example/davis-dog/00041.jpg differ diff --git a/assets/example/davis-dog/00042.jpg b/assets/example/davis-dog/00042.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caff6b3feb8cba9a00e036a93b660bc6d28324c6 Binary files /dev/null and b/assets/example/davis-dog/00042.jpg differ diff --git a/assets/example/davis-dog/00043.jpg b/assets/example/davis-dog/00043.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0fc4f42cd2f4b68cc3e8cf75976ebac28b5727ef Binary files /dev/null and b/assets/example/davis-dog/00043.jpg differ diff --git a/assets/example/davis-dog/00044.jpg b/assets/example/davis-dog/00044.jpg new file mode 100644 index 0000000000000000000000000000000000000000..263d0053571571b501962f5cf62e61dbf9178a02 Binary files /dev/null and b/assets/example/davis-dog/00044.jpg differ diff --git a/assets/example/davis-dog/00045.jpg b/assets/example/davis-dog/00045.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85ca5a410828b249a508417e3f8a0c1d59904078 Binary files /dev/null and b/assets/example/davis-dog/00045.jpg differ diff --git a/assets/example/davis-dog/00046.jpg b/assets/example/davis-dog/00046.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40763b0aab3c771e858ec789d6e75057aebd4265 Binary files /dev/null and b/assets/example/davis-dog/00046.jpg differ diff --git a/assets/example/davis-dog/00047.jpg b/assets/example/davis-dog/00047.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a89b557776fa616c3cae224f21a518759bd9338 Binary files /dev/null and b/assets/example/davis-dog/00047.jpg differ diff --git a/assets/example/davis-dog/00048.jpg b/assets/example/davis-dog/00048.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dd6700e13d3cdbdd7947756dc43595ce1e99ac5 Binary files /dev/null and b/assets/example/davis-dog/00048.jpg differ diff --git a/assets/example/davis-dog/00049.jpg b/assets/example/davis-dog/00049.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed36dec3a55e73a5f1b5ccb2aa78ec690939c2a7 Binary files /dev/null and b/assets/example/davis-dog/00049.jpg differ diff --git a/assets/example/davis-dog/00050.jpg b/assets/example/davis-dog/00050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a14bec357e4aeccc245abed522320efa3e33bc1f Binary files /dev/null and b/assets/example/davis-dog/00050.jpg differ diff --git a/assets/example/davis-dog/00051.jpg b/assets/example/davis-dog/00051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..974ed8d6bad6a788578f370a1943a491c89c4f4b Binary files /dev/null and b/assets/example/davis-dog/00051.jpg differ diff --git a/assets/example/davis-dog/00052.jpg b/assets/example/davis-dog/00052.jpg new file mode 100644 index 0000000000000000000000000000000000000000..394e7b0416a74a1ee004e58193043c4b71447415 Binary files /dev/null and b/assets/example/davis-dog/00052.jpg differ diff --git a/assets/example/davis-dog/00053.jpg b/assets/example/davis-dog/00053.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e2f0adc92b587a82cacd9d825ad992e204e9f95 Binary files /dev/null and b/assets/example/davis-dog/00053.jpg differ diff --git a/assets/example/davis-dog/00054.jpg b/assets/example/davis-dog/00054.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3d517eb86ca001ceee5e40a63c6ea242942800c Binary files /dev/null and b/assets/example/davis-dog/00054.jpg differ diff --git a/assets/example/davis-dog/00055.jpg b/assets/example/davis-dog/00055.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c7ead10f769c6cc83cfc5ccc86dd1b11eac8e06 Binary files /dev/null and b/assets/example/davis-dog/00055.jpg differ diff --git a/assets/example/davis-dog/00056.jpg b/assets/example/davis-dog/00056.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9fb21a9902b73e755e8f8f3efbc8dc8e4d4bb85c Binary files /dev/null and b/assets/example/davis-dog/00056.jpg differ diff --git a/assets/example/davis-dog/00057.jpg b/assets/example/davis-dog/00057.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89deb5393c73c54f05d5b5ae6a625f1cd4ad48d7 Binary files /dev/null and b/assets/example/davis-dog/00057.jpg differ diff --git a/assets/example/davis-dog/00058.jpg b/assets/example/davis-dog/00058.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f982159c7e295b3e2b4d1227036816efd53a125 Binary files /dev/null and b/assets/example/davis-dog/00058.jpg differ diff --git a/assets/example/davis-dog/00059.jpg b/assets/example/davis-dog/00059.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59b038929f2dfe78b35cf9de4acc4695d076e41c Binary files /dev/null and b/assets/example/davis-dog/00059.jpg differ diff --git a/assets/example/sintel-market_2/frame_0001.png b/assets/example/sintel-market_2/frame_0001.png new file mode 100644 index 0000000000000000000000000000000000000000..cc417d1e038ee6ede1f4149297eb9aab7dd75a7c Binary files /dev/null and b/assets/example/sintel-market_2/frame_0001.png differ diff --git a/assets/example/sintel-market_2/frame_0002.png b/assets/example/sintel-market_2/frame_0002.png new file mode 100644 index 0000000000000000000000000000000000000000..9162dcb0d961b2d7e132f7389087bb6a82e4c83c Binary files /dev/null and b/assets/example/sintel-market_2/frame_0002.png differ diff --git a/assets/example/sintel-market_2/frame_0003.png b/assets/example/sintel-market_2/frame_0003.png new file mode 100644 index 0000000000000000000000000000000000000000..0aad46e8214aeea908ac99bacee28769d3260b98 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0003.png differ diff --git a/assets/example/sintel-market_2/frame_0004.png b/assets/example/sintel-market_2/frame_0004.png new file mode 100644 index 0000000000000000000000000000000000000000..dabef3ab83c3acc5304e0edcbbf5236d31cf1e44 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0004.png differ diff --git a/assets/example/sintel-market_2/frame_0005.png b/assets/example/sintel-market_2/frame_0005.png new file mode 100644 index 0000000000000000000000000000000000000000..3158ce5d72f2446c0e55543d08b00f5ad91ad81f Binary files /dev/null and b/assets/example/sintel-market_2/frame_0005.png differ diff --git a/assets/example/sintel-market_2/frame_0006.png b/assets/example/sintel-market_2/frame_0006.png new file mode 100644 index 0000000000000000000000000000000000000000..579d47d726bc6f7aad9954b852e725dab8ebd10a Binary files /dev/null and b/assets/example/sintel-market_2/frame_0006.png differ diff --git a/assets/example/sintel-market_2/frame_0007.png b/assets/example/sintel-market_2/frame_0007.png new file mode 100644 index 0000000000000000000000000000000000000000..ff28354433478595e7cdae0c417a5be5ba5a35b8 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0007.png differ diff --git a/assets/example/sintel-market_2/frame_0008.png b/assets/example/sintel-market_2/frame_0008.png new file mode 100644 index 0000000000000000000000000000000000000000..d4f45aac79f20b602219cf3cfb64e897cb6195c6 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0008.png differ diff --git a/assets/example/sintel-market_2/frame_0009.png b/assets/example/sintel-market_2/frame_0009.png new file mode 100644 index 0000000000000000000000000000000000000000..1e0e2d5667e3a60ffa22fe585bea1b56b8375088 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0009.png differ diff --git a/assets/example/sintel-market_2/frame_0010.png b/assets/example/sintel-market_2/frame_0010.png new file mode 100644 index 0000000000000000000000000000000000000000..bbdfb686ca6b19f4b1a0000b243948b78723c5d1 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0010.png differ diff --git a/assets/example/sintel-market_2/frame_0011.png b/assets/example/sintel-market_2/frame_0011.png new file mode 100644 index 0000000000000000000000000000000000000000..b4765fcce889a97352ca0f636c07deb24fd90c26 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0011.png differ diff --git a/assets/example/sintel-market_2/frame_0012.png b/assets/example/sintel-market_2/frame_0012.png new file mode 100644 index 0000000000000000000000000000000000000000..200c174326503bf86ccb1f5f2b8caa3b26917a71 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0012.png differ diff --git a/assets/example/sintel-market_2/frame_0013.png b/assets/example/sintel-market_2/frame_0013.png new file mode 100644 index 0000000000000000000000000000000000000000..9b395e2fa3b0b0dd6a1c29c67167830d1998c397 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0013.png differ diff --git a/assets/example/sintel-market_2/frame_0014.png b/assets/example/sintel-market_2/frame_0014.png new file mode 100644 index 0000000000000000000000000000000000000000..f3600170b066fce09ca497b5b962898255963c64 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0014.png differ diff --git a/assets/example/sintel-market_2/frame_0015.png b/assets/example/sintel-market_2/frame_0015.png new file mode 100644 index 0000000000000000000000000000000000000000..3bd5ab51cecae5ddd96995f8d7b5a3e4ada98531 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0015.png differ diff --git a/assets/example/sintel-market_2/frame_0016.png b/assets/example/sintel-market_2/frame_0016.png new file mode 100644 index 0000000000000000000000000000000000000000..99e56e262148a6c3c6f6808111d591a774171a82 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0016.png differ diff --git a/assets/example/sintel-market_2/frame_0017.png b/assets/example/sintel-market_2/frame_0017.png new file mode 100644 index 0000000000000000000000000000000000000000..66e6b756183fbd05e1b9e0569f1e8a9eaf8a8b63 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0017.png differ diff --git a/assets/example/sintel-market_2/frame_0018.png b/assets/example/sintel-market_2/frame_0018.png new file mode 100644 index 0000000000000000000000000000000000000000..bbb9598c578a34c51e8f53592769d343961e7592 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0018.png differ diff --git a/assets/example/sintel-market_2/frame_0019.png b/assets/example/sintel-market_2/frame_0019.png new file mode 100644 index 0000000000000000000000000000000000000000..367c6ec52d040aefdd2a56b9a9cb1a038b581f02 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0019.png differ diff --git a/assets/example/sintel-market_2/frame_0020.png b/assets/example/sintel-market_2/frame_0020.png new file mode 100644 index 0000000000000000000000000000000000000000..f9293f00a1f52bdc2eb5bd9c478137d1777bb00d Binary files /dev/null and b/assets/example/sintel-market_2/frame_0020.png differ diff --git a/assets/example/sintel-market_2/frame_0021.png b/assets/example/sintel-market_2/frame_0021.png new file mode 100644 index 0000000000000000000000000000000000000000..8a470396bd7128b0962db1fe32ddf02ead3e3f9b Binary files /dev/null and b/assets/example/sintel-market_2/frame_0021.png differ diff --git a/assets/example/sintel-market_2/frame_0022.png b/assets/example/sintel-market_2/frame_0022.png new file mode 100644 index 0000000000000000000000000000000000000000..99d7094b53c1189409ee0f0a0fb20d7c1ca31603 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0022.png differ diff --git a/assets/example/sintel-market_2/frame_0023.png b/assets/example/sintel-market_2/frame_0023.png new file mode 100644 index 0000000000000000000000000000000000000000..a9d26b3c094a6d9176318d486e3e890449b48559 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0023.png differ diff --git a/assets/example/sintel-market_2/frame_0024.png b/assets/example/sintel-market_2/frame_0024.png new file mode 100644 index 0000000000000000000000000000000000000000..6efb0879ac57cd2ef60d938928fd9a8db2ca884b Binary files /dev/null and b/assets/example/sintel-market_2/frame_0024.png differ diff --git a/assets/example/sintel-market_2/frame_0025.png b/assets/example/sintel-market_2/frame_0025.png new file mode 100644 index 0000000000000000000000000000000000000000..29d00e8857e2367d971073c7a715ef71ac0c1057 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0025.png differ diff --git a/assets/example/sintel-market_2/frame_0026.png b/assets/example/sintel-market_2/frame_0026.png new file mode 100644 index 0000000000000000000000000000000000000000..78715311e0cc632c609c86c39828dd56c78a79ba Binary files /dev/null and b/assets/example/sintel-market_2/frame_0026.png differ diff --git a/assets/example/sintel-market_2/frame_0027.png b/assets/example/sintel-market_2/frame_0027.png new file mode 100644 index 0000000000000000000000000000000000000000..949c875a5135fb8296d115815cfbe74848b66b71 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0027.png differ diff --git a/assets/example/sintel-market_2/frame_0028.png b/assets/example/sintel-market_2/frame_0028.png new file mode 100644 index 0000000000000000000000000000000000000000..f75fa75e984a56c2dfa432b612d973a51e0b1586 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0028.png differ diff --git a/assets/example/sintel-market_2/frame_0029.png b/assets/example/sintel-market_2/frame_0029.png new file mode 100644 index 0000000000000000000000000000000000000000..0c4993499a2320336ff4ad7b2154043cc2d71b0f Binary files /dev/null and b/assets/example/sintel-market_2/frame_0029.png differ diff --git a/assets/example/sintel-market_2/frame_0030.png b/assets/example/sintel-market_2/frame_0030.png new file mode 100644 index 0000000000000000000000000000000000000000..e859be76f5110adacba3c33b4f4b9e9f2cbb1072 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0030.png differ diff --git a/assets/example/sintel-market_2/frame_0031.png b/assets/example/sintel-market_2/frame_0031.png new file mode 100644 index 0000000000000000000000000000000000000000..ce64437a76e4cb2e24c3b4c0a950a0034da483c0 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0031.png differ diff --git a/assets/example/sintel-market_2/frame_0032.png b/assets/example/sintel-market_2/frame_0032.png new file mode 100644 index 0000000000000000000000000000000000000000..f3c7edcbadd550642a8280877354371e8e0da972 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0032.png differ diff --git a/assets/example/sintel-market_2/frame_0033.png b/assets/example/sintel-market_2/frame_0033.png new file mode 100644 index 0000000000000000000000000000000000000000..76669846fbaf400e301550b5e679a3c656a52e14 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0033.png differ diff --git a/assets/example/sintel-market_2/frame_0034.png b/assets/example/sintel-market_2/frame_0034.png new file mode 100644 index 0000000000000000000000000000000000000000..7a2de6b882e6885a2e5fb3e1f22a2e331939daf5 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0034.png differ diff --git a/assets/example/sintel-market_2/frame_0035.png b/assets/example/sintel-market_2/frame_0035.png new file mode 100644 index 0000000000000000000000000000000000000000..247be1ae88d60ae251a32c914495e57bdb29040f Binary files /dev/null and b/assets/example/sintel-market_2/frame_0035.png differ diff --git a/assets/example/sintel-market_2/frame_0036.png b/assets/example/sintel-market_2/frame_0036.png new file mode 100644 index 0000000000000000000000000000000000000000..357ea68119652e216ed8575e0b8c63a4d82ad9bc Binary files /dev/null and b/assets/example/sintel-market_2/frame_0036.png differ diff --git a/assets/example/sintel-market_2/frame_0037.png b/assets/example/sintel-market_2/frame_0037.png new file mode 100644 index 0000000000000000000000000000000000000000..f6ce642a1fc5df57080a4ebd591cd2097694b74e Binary files /dev/null and b/assets/example/sintel-market_2/frame_0037.png differ diff --git a/assets/example/sintel-market_2/frame_0038.png b/assets/example/sintel-market_2/frame_0038.png new file mode 100644 index 0000000000000000000000000000000000000000..0e3aca24bcc6b512aa8740e80e7f8a22ed3b16c0 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0038.png differ diff --git a/assets/example/sintel-market_2/frame_0039.png b/assets/example/sintel-market_2/frame_0039.png new file mode 100644 index 0000000000000000000000000000000000000000..727e8202b105aec0c540f9b74d14394efc4b8ec0 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0039.png differ diff --git a/assets/example/sintel-market_2/frame_0040.png b/assets/example/sintel-market_2/frame_0040.png new file mode 100644 index 0000000000000000000000000000000000000000..c221145d9a4859441872d6cd23eb0cc844178456 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0040.png differ diff --git a/assets/example/sintel-market_2/frame_0041.png b/assets/example/sintel-market_2/frame_0041.png new file mode 100644 index 0000000000000000000000000000000000000000..3554e0fae2b5c41dd1455e2fe50565725519bcb8 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0041.png differ diff --git a/assets/example/sintel-market_2/frame_0042.png b/assets/example/sintel-market_2/frame_0042.png new file mode 100644 index 0000000000000000000000000000000000000000..6f5e297ec9c84247818b0e1d94c075e2f5b0da76 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0042.png differ diff --git a/assets/example/sintel-market_2/frame_0043.png b/assets/example/sintel-market_2/frame_0043.png new file mode 100644 index 0000000000000000000000000000000000000000..b69dee44da56b869ebc3465cf0f8a75b599475ac Binary files /dev/null and b/assets/example/sintel-market_2/frame_0043.png differ diff --git a/assets/example/sintel-market_2/frame_0044.png b/assets/example/sintel-market_2/frame_0044.png new file mode 100644 index 0000000000000000000000000000000000000000..3721b598d49c8c7ed7b424ea94e3623bcbe04949 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0044.png differ diff --git a/assets/example/sintel-market_2/frame_0045.png b/assets/example/sintel-market_2/frame_0045.png new file mode 100644 index 0000000000000000000000000000000000000000..344f23104fcdf9318d380f8d9e23c0d83108c3e9 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0045.png differ diff --git a/assets/example/sintel-market_2/frame_0046.png b/assets/example/sintel-market_2/frame_0046.png new file mode 100644 index 0000000000000000000000000000000000000000..7b4265e2024d026d0f4ad1b006779d20b7759771 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0046.png differ diff --git a/assets/example/sintel-market_2/frame_0047.png b/assets/example/sintel-market_2/frame_0047.png new file mode 100644 index 0000000000000000000000000000000000000000..f1562294a9ca4c1ce2c04121422e0e1649b7b9ff Binary files /dev/null and b/assets/example/sintel-market_2/frame_0047.png differ diff --git a/assets/example/sintel-market_2/frame_0048.png b/assets/example/sintel-market_2/frame_0048.png new file mode 100644 index 0000000000000000000000000000000000000000..3d68f4f1076fb193ba6d7b1a5b7c5c4dbdb2a420 Binary files /dev/null and b/assets/example/sintel-market_2/frame_0048.png differ diff --git a/assets/example/sintel-market_2/frame_0049.png b/assets/example/sintel-market_2/frame_0049.png new file mode 100644 index 0000000000000000000000000000000000000000..07bb3a2ce25e25e7644f5425ad6ec852b6f18dfa Binary files /dev/null and b/assets/example/sintel-market_2/frame_0049.png differ diff --git a/assets/example/sintel-market_2/frame_0050.png b/assets/example/sintel-market_2/frame_0050.png new file mode 100644 index 0000000000000000000000000000000000000000..a01c6a6f161f0eaf14a758c93ff2ede355a602ef Binary files /dev/null and b/assets/example/sintel-market_2/frame_0050.png differ diff --git a/datasets_preprocess/pointodyssey_rearrange.py b/datasets_preprocess/pointodyssey_rearrange.py new file mode 100644 index 0000000000000000000000000000000000000000..5c309db58122c3554a3a210800bfe8a48bf3ba80 --- /dev/null +++ b/datasets_preprocess/pointodyssey_rearrange.py @@ -0,0 +1,64 @@ +import sys +import torch +sys.path.append('.') +import os +import numpy as np +import glob +from tqdm import tqdm + +dataset_location = '../data/point_odyssey' +# print(dataset_location) +for dset in ["train", "test", "sample"]: + sequences = [] + subdir = os.path.join(dataset_location, dset) + for seq in glob.glob(os.path.join(subdir, "*/")): + sequences.append(seq) + # sequences = sorted(sequences) + squences = sorted(sequences) + + print('found %d unique videos in %s (dset=%s)' % (len(sequences), dataset_location, dset)) + + ## load trajectories + print('loading trajectories...') + + for seq in sequences: + # print('seq', seq) + # if os.path.exists(os.path.join(seq, 'trajs_2d')): + # print('skipping', seq) + # continue + info_path = os.path.join(seq, 'info.npz') + info = np.load(info_path, allow_pickle=True) + trajs_3d_shape = info['trajs_3d'].astype(np.float32) + + if len(trajs_3d_shape): + print('processing', seq) + rgb_path = os.path.join(seq, 'rgbs') + info_path = os.path.join(seq, 'info.npz') + annotations_path = os.path.join(seq, 'anno.npz') + + trajs_3d_path = os.path.join(seq, 'trajs_3d') + trajs_2d_path = os.path.join(seq, 'trajs_2d') + os.makedirs(trajs_3d_path, exist_ok=True) + os.makedirs(trajs_2d_path, exist_ok=True) + + + info = np.load(info_path, allow_pickle=True) + trajs_3d_shape = info['trajs_3d'] + anno = np.load(annotations_path, allow_pickle=True) + keys = {'trajs_2d': 'traj_2d', 'trajs_3d': 'traj_3d', 'valids': 'valid', 'visibs': 'visib', 'intrinsics': 'intrinsic', 'extrinsics': 'extrinsic'} + if len(trajs_3d_shape) == 0: + print(anno['trajs_3d']) + print('skipping', seq) + continue + tensors = {key: torch.tensor(anno[key]).cuda() for key in keys} + + for t in tqdm(range(trajs_3d_shape[0])): + for key, item_key in keys.items(): + path = os.path.join(seq, key) + os.makedirs(path, exist_ok=True) + filename = os.path.join(path, f'{item_key}_{t:05d}.npy') + np.save(filename, tensors[key][t].cpu().numpy()) + + + + diff --git a/datasets_preprocess/sintel_get_dynamics.py b/datasets_preprocess/sintel_get_dynamics.py new file mode 100644 index 0000000000000000000000000000000000000000..e116380fafcbf1dbd9d633412045b7b84ca34b10 --- /dev/null +++ b/datasets_preprocess/sintel_get_dynamics.py @@ -0,0 +1,170 @@ +import numpy as np +import cv2 +import os +from tqdm import tqdm +import argparse +import torch + +TAG_FLOAT = 202021.25 +def flow_read(filename): + """ Read optical flow from file, return (U,V) tuple. + + Original code by Deqing Sun, adapted from Daniel Scharstein. + """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, 'flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine?'.format(TAG_FLOAT,check) + width = np.fromfile(f,dtype=np.int32,count=1)[0] + height = np.fromfile(f,dtype=np.int32,count=1)[0] + size = width*height + assert width > 0 and height > 0 and size > 1 and size < 100000000, 'flow_read:: Invalid input size (width = {0}, height = {1}).'.format(width,height) + tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2)) + u = tmp[:,np.arange(width)*2] + v = tmp[:,np.arange(width)*2 + 1] + return u,v + +def cam_read(filename): + """ Read camera data, return (M,N) tuple. + + M is the intrinsic matrix, N is the extrinsic matrix, so that + + x = M*N*X, + where x is a point in homogeneous image pixel coordinates, and X is a + point in homogeneous world coordinates. + """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, 'cam_read:: Wrong tag in cam file (should be: {0}, is: {1}). Big-endian machine?'.format(TAG_FLOAT,check) + M = np.fromfile(f,dtype='float64',count=9).reshape((3,3)) + N = np.fromfile(f,dtype='float64',count=12).reshape((3,4)) + return M,N + +def depth_read(filename): + """ Read depth data from file, return as numpy array. """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, 'depth_read:: Wrong tag in depth file (should be: {0}, is: {1}). Big-endian machine?'.format(TAG_FLOAT,check) + width = np.fromfile(f,dtype=np.int32,count=1)[0] + height = np.fromfile(f,dtype=np.int32,count=1)[0] + size = width*height + assert width > 0 and height > 0 and size > 1 and size < 100000000, 'depth_read:: Invalid input size (width = {0}, height = {1}).'.format(width,height) + depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width)) + return depth + +def RT_to_extrinsic_matrix(R, T): + extrinsic_matrix = np.concatenate([R, T], axis=-1) + extrinsic_matrix = np.concatenate([extrinsic_matrix, np.array([[0, 0, 0, 1]])], axis=0) + return np.linalg.inv(extrinsic_matrix) + +def depth_to_3d(depth_map, intrinsic_matrix): + height, width = depth_map.shape + i, j = np.meshgrid(np.arange(width), np.arange(height)) + + # Convert pixel coordinates and depth values to 3D points + x = (i - intrinsic_matrix[0, 2]) * depth_map / intrinsic_matrix[0, 0] + y = (j - intrinsic_matrix[1, 2]) * depth_map / intrinsic_matrix[1, 1] + z = depth_map + + points_3d = np.stack([x, y, z], axis=-1) + return points_3d + +def project_3d_to_2d(points_3d, intrinsic_matrix): + # Convert 3D points to homogeneous coordinates + projected_2d_hom = intrinsic_matrix @ points_3d.T + # Convert from homogeneous coordinates to 2D image coordinates + projected_2d = projected_2d_hom[:2, :] / projected_2d_hom[2, :] + return projected_2d.T + +def compute_optical_flow(depth1, depth2, pose1, pose2, intrinsic_matrix1, intrinsic_matrix2): + # Input: All inputs as numpy arrays; convert torch tensors to numpy arrays if needed + if isinstance(depth1, torch.Tensor): + depth1 = depth1.cpu().numpy() + if isinstance(depth2, torch.Tensor): + depth2 = depth2.cpu().numpy() + if isinstance(pose1, torch.Tensor): + pose1 = pose1.cpu().numpy() + if isinstance(pose2, torch.Tensor): + pose2 = pose2.cpu().numpy() + if isinstance(intrinsic_matrix1, torch.Tensor): + intrinsic_matrix1 = intrinsic_matrix1.cpu().numpy() + if isinstance(intrinsic_matrix2, torch.Tensor): + intrinsic_matrix2 = intrinsic_matrix2.cpu().numpy() + + points_3d_frame1 = depth_to_3d(depth1, intrinsic_matrix1).reshape(-1, 3) + points_3d_frame1_hom = np.concatenate([points_3d_frame1, np.ones((points_3d_frame1.shape[0], 1))], axis=1).T + + # Calculate the transformation matrix from frame 1 to frame 2 + transformation_matrix = (pose2) @ np.linalg.inv(pose1) + points_3d_frame2_hom = transformation_matrix @ points_3d_frame1_hom + points_3d_frame2 = (points_3d_frame2_hom[:3, :]).T + + points_2d_frame1 = project_3d_to_2d(points_3d_frame1, intrinsic_matrix1) + points_2d_frame2 = project_3d_to_2d(points_3d_frame2, intrinsic_matrix2) + + # Compute optical flow vectors + optical_flow = points_2d_frame2 - points_2d_frame1 + return optical_flow + +def get_dynamic_label(base_dir, seq, continuous=False, threshold=13.75, save_dir='dynamic_label'): + depth_dir = os.path.join(base_dir, 'depth', seq) + cam_dir = os.path.join(base_dir, 'camdata_left', seq) + flow_dir = os.path.join(base_dir, 'flow', seq) + dynamic_label_dir = os.path.join(base_dir, save_dir, seq) + os.makedirs(dynamic_label_dir, exist_ok=True) + + frames = sorted([f for f in os.listdir(depth_dir) if f.endswith('.dpt')]) + for i, frame1 in enumerate(frames): + if i == len(frames) - 1: + continue + frame2 = frames[i + 1] + + frame1_id = frame1.split('.')[0] + frame2_id = frame2.split('.')[0] + + # Load depth maps + depth_map_frame1 = depth_read(os.path.join(depth_dir, frame1)) + depth_map_frame2 = depth_read(os.path.join(depth_dir, frame2)) + + # Load camera intrinsics and poses + intrinsic_matrix1, pose_frame1 = cam_read(os.path.join(cam_dir, f'{frame1_id}.cam')) + intrinsic_matrix2, pose_frame2 = cam_read(os.path.join(cam_dir, f'{frame2_id}.cam')) + + # Pad pose with [0,0,0,1] + pose_frame1 = np.concatenate([pose_frame1, np.array([[0, 0, 0, 1]])], axis=0) + pose_frame2 = np.concatenate([pose_frame2, np.array([[0, 0, 0, 1]])], axis=0) + + # Compute optical flow + optical_flow = compute_optical_flow(depth_map_frame1, depth_map_frame2, pose_frame1, pose_frame2, intrinsic_matrix1, intrinsic_matrix2) + + # Reshape the optical flow to the image dimensions + height, width = depth_map_frame1.shape + optical_flow_image = optical_flow.reshape(height, width, 2) + + # Load ground truth optical flow + u, v = flow_read(os.path.join(flow_dir, f'{frame1_id}.flo')) + gt_flow = np.stack([u, v], axis=-1) + + # Compute the error map + error_map = np.linalg.norm(gt_flow - optical_flow_image, axis=-1) + if not continuous: + binary_error_map = error_map > threshold + + # Save the binary error map + cv2.imwrite(os.path.join(dynamic_label_dir, f'{frame1_id}.png'), binary_error_map.astype(np.uint8) * 255) + else: + # Normalize the error map + error_map = error_map / error_map.max() + cv2.imwrite(os.path.join(dynamic_label_dir, f'{frame1_id}.png'), (error_map * 255).astype(np.uint8)) + +if __name__ == '__main__': + # Process all sequences + sequences = sorted(os.listdir('data/sintel/training/depth')) + base_dir = 'data/sintel/training' + parser = argparse.ArgumentParser() + parser.add_argument('--continuous', action='store_true') + parser.add_argument('--threshold', type=float, default=13.75) + parser.add_argument('--save_dir', type=str, default='dynamic_label') + args = parser.parse_args() + for seq in tqdm(sequences): + get_dynamic_label(base_dir, seq, continuous=args.continuous, threshold=args.threshold, save_dir=args.save_dir) + print(f'Finished processing {seq}') diff --git a/dynamic_predictor/DAS3R_b32_g4.sh b/dynamic_predictor/DAS3R_b32_g4.sh new file mode 100644 index 0000000000000000000000000000000000000000..58f65de331aea15f65d44e9de288d99cec3753eb --- /dev/null +++ b/dynamic_predictor/DAS3R_b32_g4.sh @@ -0,0 +1,19 @@ +# Set environment variables +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +# Run the Python script using torchrun (adjust if using distributed training) +torchrun --nproc_per_node=1 --master_port=27777 launch.py --mode=train \ + --model="AsymmetricCroCo3DStereo(pos_embed='RoPE100', patch_embed_cls='ManyAR_PatchEmbed', \ + img_size=(512, 512), head_type='dpt', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), \ + enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12, freeze='encoder_and_3d_predictor')" \ + --train_dataset="10_000 @ PointOdysseyDUSt3R(dset='train', z_far=80, dataset_location='../data/point_odyssey', S=2, aug_crop=16, resolution=[(512, 288), (512, 384), (512, 336)], transform=ColorJitter, strides=[1,2,3,4,5,6,7,8,9], dist_type='linear_1_2', aug_focal=0.9)" \ + --test_dataset="1 * PointOdysseyDUSt3R(dset='test', z_far=80, dataset_location='../data/point_odyssey', S=2, strides=[1,2,3,4,5,6,7,8,9], resolution=[(512, 288)], seed=777) + 1 * SintelDUSt3R(dset='final', z_far=80, S=2, strides=[1,2,3,4,5,6,7,8,9], resolution=[(512, 224)], seed=777)" \ + --train_criterion="ConfLoss(Regr3D_MMask(L21, norm_mode='avg_dis'), alpha=0.2)" \ + --test_criterion="Regr3D_ScaleShiftInv_MMask(L21, gt_scale=True)" \ + --pretrained="Junyi42/MonST3R_PO-TA-S-W_ViTLarge_BaseDecoder_512_dpt" \ + --lr=0.00005 --min_lr=1e-06 --warmup_epochs=3 --epochs=50 --batch_size=8 --accum_iter=1 \ + --test_batch_size=8 \ + --save_freq=3 --keep_freq=5 --eval_freq=50 \ + --output_dir=results/MSeg_from_monst3r_b32_g4 \ + --num_workers=16 --wandb \ + diff --git a/dynamic_predictor/croco/.gitignore b/dynamic_predictor/croco/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b6e47617de110dea7ca47e087ff1347cc2646eda --- /dev/null +++ b/dynamic_predictor/croco/.gitignore @@ -0,0 +1,129 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/dynamic_predictor/croco/LICENSE b/dynamic_predictor/croco/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d9b84b1a65f9db6d8920a9048d162f52ba3ea56d --- /dev/null +++ b/dynamic_predictor/croco/LICENSE @@ -0,0 +1,52 @@ +CroCo, Copyright (c) 2022-present Naver Corporation, is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 license. + +A summary of the CC BY-NC-SA 4.0 license is located here: + https://creativecommons.org/licenses/by-nc-sa/4.0/ + +The CC BY-NC-SA 4.0 license is located here: + https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode + + +SEE NOTICE BELOW WITH RESPECT TO THE FILE: models/pos_embed.py, models/blocks.py + +*************************** + +NOTICE WITH RESPECT TO THE FILE: models/pos_embed.py + +This software is being redistributed in a modifiled form. The original form is available here: + +https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py + +This software in this file incorporates parts of the following software available here: + +Transformer: https://github.com/tensorflow/models/blob/master/official/legacy/transformer/model_utils.py +available under the following license: https://github.com/tensorflow/models/blob/master/LICENSE + +MoCo v3: https://github.com/facebookresearch/moco-v3 +available under the following license: https://github.com/facebookresearch/moco-v3/blob/main/LICENSE + +DeiT: https://github.com/facebookresearch/deit +available under the following license: https://github.com/facebookresearch/deit/blob/main/LICENSE + + +ORIGINAL COPYRIGHT NOTICE AND PERMISSION NOTICE AVAILABLE HERE IS REPRODUCE BELOW: + +https://github.com/facebookresearch/mae/blob/main/LICENSE + +Attribution-NonCommercial 4.0 International + +*************************** + +NOTICE WITH RESPECT TO THE FILE: models/blocks.py + +This software is being redistributed in a modifiled form. The original form is available here: + +https://github.com/rwightman/pytorch-image-models + +ORIGINAL COPYRIGHT NOTICE AND PERMISSION NOTICE AVAILABLE HERE IS REPRODUCE BELOW: + +https://github.com/rwightman/pytorch-image-models/blob/master/LICENSE + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ \ No newline at end of file diff --git a/dynamic_predictor/croco/NOTICE b/dynamic_predictor/croco/NOTICE new file mode 100644 index 0000000000000000000000000000000000000000..d51bb365036c12d428d6e3a4fd00885756d5261c --- /dev/null +++ b/dynamic_predictor/croco/NOTICE @@ -0,0 +1,21 @@ +CroCo +Copyright 2022-present NAVER Corp. + +This project contains subcomponents with separate copyright notices and license terms. +Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses. + +==== + +facebookresearch/mae +https://github.com/facebookresearch/mae + +Attribution-NonCommercial 4.0 International + +==== + +rwightman/pytorch-image-models +https://github.com/rwightman/pytorch-image-models + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ \ No newline at end of file diff --git a/dynamic_predictor/croco/README.MD b/dynamic_predictor/croco/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..38e33b001a60bd16749317fb297acd60f28a6f1b --- /dev/null +++ b/dynamic_predictor/croco/README.MD @@ -0,0 +1,124 @@ +# CroCo + CroCo v2 / CroCo-Stereo / CroCo-Flow + +[[`CroCo arXiv`](https://arxiv.org/abs/2210.10716)] [[`CroCo v2 arXiv`](https://arxiv.org/abs/2211.10408)] [[`project page and demo`](https://croco.europe.naverlabs.com/)] + +This repository contains the code for our CroCo model presented in our NeurIPS'22 paper [CroCo: Self-Supervised Pre-training for 3D Vision Tasks by Cross-View Completion](https://openreview.net/pdf?id=wZEfHUM5ri) and its follow-up extension published at ICCV'23 [Improved Cross-view Completion Pre-training for Stereo Matching and Optical Flow](https://openaccess.thecvf.com/content/ICCV2023/html/Weinzaepfel_CroCo_v2_Improved_Cross-view_Completion_Pre-training_for_Stereo_Matching_and_ICCV_2023_paper.html), refered to as CroCo v2: + +![image](assets/arch.jpg) + +```bibtex +@inproceedings{croco, + title={{CroCo: Self-Supervised Pre-training for 3D Vision Tasks by Cross-View Completion}}, + author={{Weinzaepfel, Philippe and Leroy, Vincent and Lucas, Thomas and Br\'egier, Romain and Cabon, Yohann and Arora, Vaibhav and Antsfeld, Leonid and Chidlovskii, Boris and Csurka, Gabriela and Revaud J\'er\^ome}}, + booktitle={{NeurIPS}}, + year={2022} +} + +@inproceedings{croco_v2, + title={{CroCo v2: Improved Cross-view Completion Pre-training for Stereo Matching and Optical Flow}}, + author={Weinzaepfel, Philippe and Lucas, Thomas and Leroy, Vincent and Cabon, Yohann and Arora, Vaibhav and Br{\'e}gier, Romain and Csurka, Gabriela and Antsfeld, Leonid and Chidlovskii, Boris and Revaud, J{\'e}r{\^o}me}, + booktitle={ICCV}, + year={2023} +} +``` + +## License + +The code is distributed under the CC BY-NC-SA 4.0 License. See [LICENSE](LICENSE) for more information. +Some components are based on code from [MAE](https://github.com/facebookresearch/mae) released under the CC BY-NC-SA 4.0 License and [timm](https://github.com/rwightman/pytorch-image-models) released under the Apache 2.0 License. +Some components for stereo matching and optical flow are based on code from [unimatch](https://github.com/autonomousvision/unimatch) released under the MIT license. + +## Preparation + +1. Install dependencies on a machine with a NVidia GPU using e.g. conda. Note that `habitat-sim` is required only for the interactive demo and the synthetic pre-training data generation. If you don't plan to use it, you can ignore the line installing it and use a more recent python version. + +```bash +conda create -n croco python=3.7 cmake=3.14.0 +conda activate croco +conda install habitat-sim headless -c conda-forge -c aihabitat +conda install pytorch torchvision -c pytorch +conda install notebook ipykernel matplotlib +conda install ipywidgets widgetsnbextension +conda install scikit-learn tqdm quaternion opencv # only for pretraining / habitat data generation + +``` + +2. Compile cuda kernels for RoPE + +CroCo v2 relies on RoPE positional embeddings for which you need to compile some cuda kernels. +```bash +cd models/curope/ +python setup.py build_ext --inplace +cd ../../ +``` + +This can be a bit long as we compile for all cuda architectures, feel free to update L9 of `models/curope/setup.py` to compile for specific architectures only. +You might also need to set the environment `CUDA_HOME` in case you use a custom cuda installation. + +In case you cannot provide, we also provide a slow pytorch version, which will be automatically loaded. + +3. Download pre-trained model + +We provide several pre-trained models: + +| modelname | pre-training data | pos. embed. | Encoder | Decoder | +|------------------------------------------------------------------------------------------------------------------------------------|-------------------|-------------|---------|---------| +| [`CroCo.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo.pth) | Habitat | cosine | ViT-B | Small | +| [`CroCo_V2_ViTBase_SmallDecoder.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo_V2_ViTBase_SmallDecoder.pth) | Habitat + real | RoPE | ViT-B | Small | +| [`CroCo_V2_ViTBase_BaseDecoder.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo_V2_ViTBase_BaseDecoder.pth) | Habitat + real | RoPE | ViT-B | Base | +| [`CroCo_V2_ViTLarge_BaseDecoder.pth`](https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo_V2_ViTLarge_BaseDecoder.pth) | Habitat + real | RoPE | ViT-L | Base | + +To download a specific model, i.e., the first one (`CroCo.pth`) +```bash +mkdir -p pretrained_models/ +wget https://download.europe.naverlabs.com/ComputerVision/CroCo/CroCo.pth -P pretrained_models/ +``` + +## Reconstruction example + +Simply run after downloading the `CroCo_V2_ViTLarge_BaseDecoder` pretrained model (or update the corresponding line in `demo.py`) +```bash +python demo.py +``` + +## Interactive demonstration of cross-view completion reconstruction on the Habitat simulator + +First download the test scene from Habitat: +```bash +python -m habitat_sim.utils.datasets_download --uids habitat_test_scenes --data-path habitat-sim-data/ +``` + +Then, run the Notebook demo `interactive_demo.ipynb`. + +In this demo, you should be able to sample a random reference viewpoint from an [Habitat](https://github.com/facebookresearch/habitat-sim) test scene. Use the sliders to change viewpoint and select a masked target view to reconstruct using CroCo. +![croco_interactive_demo](https://user-images.githubusercontent.com/1822210/200516576-7937bc6a-55f8-49ed-8618-3ddf89433ea4.jpg) + +## Pre-training + +### CroCo + +To pre-train CroCo, please first generate the pre-training data from the Habitat simulator, following the instructions in [datasets/habitat_sim/README.MD](datasets/habitat_sim/README.MD) and then run the following command: +``` +torchrun --nproc_per_node=4 pretrain.py --output_dir ./output/pretraining/ +``` + +Our CroCo pre-training was launched on a single server with 4 GPUs. +It should take around 10 days with A100 or 15 days with V100 to do the 400 pre-training epochs, but decent performances are obtained earlier in training. +Note that, while the code contains the same scaling rule of the learning rate as MAE when changing the effective batch size, we did not experimented if it is valid in our case. +The first run can take a few minutes to start, to parse all available pre-training pairs. + +### CroCo v2 + +For CroCo v2 pre-training, in addition to the generation of the pre-training data from the Habitat simulator above, please pre-extract the crops from the real datasets following the instructions in [datasets/crops/README.MD](datasets/crops/README.MD). +Then, run the following command for the largest model (ViT-L encoder, Base decoder): +``` +torchrun --nproc_per_node=8 pretrain.py --model "CroCoNet(enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_num_heads=12, dec_depth=12, pos_embed='RoPE100')" --dataset "habitat_release+ARKitScenes+MegaDepth+3DStreetView+IndoorVL" --warmup_epochs 12 --max_epoch 125 --epochs 250 --amp 0 --keep_freq 5 --output_dir ./output/pretraining_crocov2/ +``` + +Our CroCo v2 pre-training was launched on a single server with 8 GPUs for the largest model, and on a single server with 4 GPUs for the smaller ones, keeping a batch size of 64 per gpu in all cases. +The largest model should take around 12 days on A100. +Note that, while the code contains the same scaling rule of the learning rate as MAE when changing the effective batch size, we did not experimented if it is valid in our case. + +## Stereo matching and Optical flow downstream tasks + +For CroCo-Stereo and CroCo-Flow, please refer to [stereoflow/README.MD](stereoflow/README.MD). diff --git a/dynamic_predictor/croco/assets/Chateau1.png b/dynamic_predictor/croco/assets/Chateau1.png new file mode 100644 index 0000000000000000000000000000000000000000..d282fc6a51c00b8dd8267d5d507220ae253c2d65 Binary files /dev/null and b/dynamic_predictor/croco/assets/Chateau1.png differ diff --git a/dynamic_predictor/croco/assets/Chateau2.png b/dynamic_predictor/croco/assets/Chateau2.png new file mode 100644 index 0000000000000000000000000000000000000000..722b2fc553ec089346722efb9445526ddfa8e7bd Binary files /dev/null and b/dynamic_predictor/croco/assets/Chateau2.png differ diff --git a/dynamic_predictor/croco/assets/arch.jpg b/dynamic_predictor/croco/assets/arch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f5b032729ddc58c06d890a0ebda1749276070c4 Binary files /dev/null and b/dynamic_predictor/croco/assets/arch.jpg differ diff --git a/dynamic_predictor/croco/croco-stereo-flow-demo.ipynb b/dynamic_predictor/croco/croco-stereo-flow-demo.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2b00a7607ab5f82d1857041969bfec977e56b3e0 --- /dev/null +++ b/dynamic_predictor/croco/croco-stereo-flow-demo.ipynb @@ -0,0 +1,191 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "9bca0f41", + "metadata": {}, + "source": [ + "# Simple inference example with CroCo-Stereo or CroCo-Flow" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80653ef7", + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright (C) 2022-present Naver Corporation. All rights reserved.\n", + "# Licensed under CC BY-NC-SA 4.0 (non-commercial use only)." + ] + }, + { + "cell_type": "markdown", + "id": "4f033862", + "metadata": {}, + "source": [ + "First download the model(s) of your choice by running\n", + "```\n", + "bash stereoflow/download_model.sh crocostereo.pth\n", + "bash stereoflow/download_model.sh crocoflow.pth\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1fb2e392", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "use_gpu = torch.cuda.is_available() and torch.cuda.device_count()>0\n", + "device = torch.device('cuda:0' if use_gpu else 'cpu')\n", + "import matplotlib.pylab as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0e25d77", + "metadata": {}, + "outputs": [], + "source": [ + "from stereoflow.test import _load_model_and_criterion\n", + "from stereoflow.engine import tiled_pred\n", + "from stereoflow.datasets_stereo import img_to_tensor, vis_disparity\n", + "from stereoflow.datasets_flow import flowToColor\n", + "tile_overlap=0.7 # recommended value, higher value can be slightly better but slower" + ] + }, + { + "cell_type": "markdown", + "id": "86a921f5", + "metadata": {}, + "source": [ + "### CroCo-Stereo example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64e483cb", + "metadata": {}, + "outputs": [], + "source": [ + "image1 = np.asarray(Image.open(''))\n", + "image2 = np.asarray(Image.open(''))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0d04303", + "metadata": {}, + "outputs": [], + "source": [ + "model, _, cropsize, with_conf, task, tile_conf_mode = _load_model_and_criterion('stereoflow_models/crocostereo.pth', None, device)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47dc14b5", + "metadata": {}, + "outputs": [], + "source": [ + "im1 = img_to_tensor(image1).to(device).unsqueeze(0)\n", + "im2 = img_to_tensor(image2).to(device).unsqueeze(0)\n", + "with torch.inference_mode():\n", + " pred, _, _ = tiled_pred(model, None, im1, im2, None, conf_mode=tile_conf_mode, overlap=tile_overlap, crop=cropsize, with_conf=with_conf, return_time=False)\n", + "pred = pred.squeeze(0).squeeze(0).cpu().numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "583b9f16", + "metadata": {}, + "outputs": [], + "source": [ + "plt.imshow(vis_disparity(pred))\n", + "plt.axis('off')" + ] + }, + { + "cell_type": "markdown", + "id": "d2df5d70", + "metadata": {}, + "source": [ + "### CroCo-Flow example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ee257a7", + "metadata": {}, + "outputs": [], + "source": [ + "image1 = np.asarray(Image.open(''))\n", + "image2 = np.asarray(Image.open(''))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d5edccf0", + "metadata": {}, + "outputs": [], + "source": [ + "model, _, cropsize, with_conf, task, tile_conf_mode = _load_model_and_criterion('stereoflow_models/crocoflow.pth', None, device)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b19692c3", + "metadata": {}, + "outputs": [], + "source": [ + "im1 = img_to_tensor(image1).to(device).unsqueeze(0)\n", + "im2 = img_to_tensor(image2).to(device).unsqueeze(0)\n", + "with torch.inference_mode():\n", + " pred, _, _ = tiled_pred(model, None, im1, im2, None, conf_mode=tile_conf_mode, overlap=tile_overlap, crop=cropsize, with_conf=with_conf, return_time=False)\n", + "pred = pred.squeeze(0).permute(1,2,0).cpu().numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26f79db3", + "metadata": {}, + "outputs": [], + "source": [ + "plt.imshow(flowToColor(pred))\n", + "plt.axis('off')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/dynamic_predictor/croco/datasets/__init__.py b/dynamic_predictor/croco/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamic_predictor/croco/datasets/crops/README.MD b/dynamic_predictor/croco/datasets/crops/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..47ddabebb177644694ee247ae878173a3a16644f --- /dev/null +++ b/dynamic_predictor/croco/datasets/crops/README.MD @@ -0,0 +1,104 @@ +## Generation of crops from the real datasets + +The instructions below allow to generate the crops used for pre-training CroCo v2 from the following real-world datasets: ARKitScenes, MegaDepth, 3DStreetView and IndoorVL. + +### Download the metadata of the crops to generate + +First, download the metadata and put them in `./data/`: +``` +mkdir -p data +cd data/ +wget https://download.europe.naverlabs.com/ComputerVision/CroCo/data/crop_metadata.zip +unzip crop_metadata.zip +rm crop_metadata.zip +cd .. +``` + +### Prepare the original datasets + +Second, download the original datasets in `./data/original_datasets/`. +``` +mkdir -p data/original_datasets +``` + +##### ARKitScenes + +Download the `raw` dataset from https://github.com/apple/ARKitScenes/blob/main/DATA.md and put it in `./data/original_datasets/ARKitScenes/`. +The resulting file structure should be like: +``` +./data/original_datasets/ARKitScenes/ +└───Training + └───40753679 + │ │ ultrawide + │ │ ... + └───40753686 + │ + ... +``` + +##### MegaDepth + +Download `MegaDepth v1 Dataset` from https://www.cs.cornell.edu/projects/megadepth/ and put it in `./data/original_datasets/MegaDepth/`. +The resulting file structure should be like: + +``` +./data/original_datasets/MegaDepth/ +└───0000 +│ └───images +│ │ │ 1000557903_87fa96b8a4_o.jpg +│ │ └ ... +│ └─── ... +└───0001 +│ │ +│ └ ... +└─── ... +``` + +##### 3DStreetView + +Download `3D_Street_View` dataset from https://github.com/amir32002/3D_Street_View and put it in `./data/original_datasets/3DStreetView/`. +The resulting file structure should be like: + +``` +./data/original_datasets/3DStreetView/ +└───dataset_aligned +│ └───0002 +│ │ │ 0000002_0000001_0000002_0000001.jpg +│ │ └ ... +│ └─── ... +└───dataset_unaligned +│ └───0003 +│ │ │ 0000003_0000001_0000002_0000001.jpg +│ │ └ ... +│ └─── ... +``` + +##### IndoorVL + +Download the `IndoorVL` datasets using [Kapture](https://github.com/naver/kapture). + +``` +pip install kapture +mkdir -p ./data/original_datasets/IndoorVL +cd ./data/original_datasets/IndoorVL +kapture_download_dataset.py update +kapture_download_dataset.py install "HyundaiDepartmentStore_*" +kapture_download_dataset.py install "GangnamStation_*" +cd - +``` + +### Extract the crops + +Now, extract the crops for each of the dataset: +``` +for dataset in ARKitScenes MegaDepth 3DStreetView IndoorVL; +do + python3 datasets/crops/extract_crops_from_images.py --crops ./data/crop_metadata/${dataset}/crops_release.txt --root-dir ./data/original_datasets/${dataset}/ --output-dir ./data/${dataset}_crops/ --imsize 256 --nthread 8 --max-subdir-levels 5 --ideal-number-pairs-in-dir 500; +done +``` + +##### Note for IndoorVL + +Due to some legal issues, we can only release 144,228 pairs out of the 1,593,689 pairs used in the paper. +To account for it in terms of number of pre-training iterations, the pre-training command in this repository uses 125 training epochs including 12 warm-up epochs and learning rate cosine schedule of 250, instead of 100, 10 and 200 respectively. +The impact on the performance is negligible. diff --git a/dynamic_predictor/croco/datasets/crops/extract_crops_from_images.py b/dynamic_predictor/croco/datasets/crops/extract_crops_from_images.py new file mode 100644 index 0000000000000000000000000000000000000000..eb66a0474ce44b54c44c08887cbafdb045b11ff3 --- /dev/null +++ b/dynamic_predictor/croco/datasets/crops/extract_crops_from_images.py @@ -0,0 +1,159 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Extracting crops for pre-training +# -------------------------------------------------------- + +import os +import argparse +from tqdm import tqdm +from PIL import Image +import functools +from multiprocessing import Pool +import math + + +def arg_parser(): + parser = argparse.ArgumentParser('Generate cropped image pairs from image crop list') + + parser.add_argument('--crops', type=str, required=True, help='crop file') + parser.add_argument('--root-dir', type=str, required=True, help='root directory') + parser.add_argument('--output-dir', type=str, required=True, help='output directory') + parser.add_argument('--imsize', type=int, default=256, help='size of the crops') + parser.add_argument('--nthread', type=int, required=True, help='number of simultaneous threads') + parser.add_argument('--max-subdir-levels', type=int, default=5, help='maximum number of subdirectories') + parser.add_argument('--ideal-number-pairs-in-dir', type=int, default=500, help='number of pairs stored in a dir') + return parser + + +def main(args): + listing_path = os.path.join(args.output_dir, 'listing.txt') + + print(f'Loading list of crops ... ({args.nthread} threads)') + crops, num_crops_to_generate = load_crop_file(args.crops) + + print(f'Preparing jobs ({len(crops)} candidate image pairs)...') + num_levels = min(math.ceil(math.log(num_crops_to_generate, args.ideal_number_pairs_in_dir)), args.max_subdir_levels) + num_pairs_in_dir = math.ceil(num_crops_to_generate ** (1/num_levels)) + + jobs = prepare_jobs(crops, num_levels, num_pairs_in_dir) + del crops + + os.makedirs(args.output_dir, exist_ok=True) + mmap = Pool(args.nthread).imap_unordered if args.nthread > 1 else map + call = functools.partial(save_image_crops, args) + + print(f"Generating cropped images to {args.output_dir} ...") + with open(listing_path, 'w') as listing: + listing.write('# pair_path\n') + for results in tqdm(mmap(call, jobs), total=len(jobs)): + for path in results: + listing.write(f'{path}\n') + print('Finished writing listing to', listing_path) + + +def load_crop_file(path): + data = open(path).read().splitlines() + pairs = [] + num_crops_to_generate = 0 + for line in tqdm(data): + if line.startswith('#'): + continue + line = line.split(', ') + if len(line) < 8: + img1, img2, rotation = line + pairs.append((img1, img2, int(rotation), [])) + else: + l1, r1, t1, b1, l2, r2, t2, b2 = map(int, line) + rect1, rect2 = (l1, t1, r1, b1), (l2, t2, r2, b2) + pairs[-1][-1].append((rect1, rect2)) + num_crops_to_generate += 1 + return pairs, num_crops_to_generate + + +def prepare_jobs(pairs, num_levels, num_pairs_in_dir): + jobs = [] + powers = [num_pairs_in_dir**level for level in reversed(range(num_levels))] + + def get_path(idx): + idx_array = [] + d = idx + for level in range(num_levels - 1): + idx_array.append(idx // powers[level]) + idx = idx % powers[level] + idx_array.append(d) + return '/'.join(map(lambda x: hex(x)[2:], idx_array)) + + idx = 0 + for pair_data in tqdm(pairs): + img1, img2, rotation, crops = pair_data + if -60 <= rotation and rotation <= 60: + rotation = 0 # most likely not a true rotation + paths = [get_path(idx + k) for k in range(len(crops))] + idx += len(crops) + jobs.append(((img1, img2), rotation, crops, paths)) + return jobs + + +def load_image(path): + try: + return Image.open(path).convert('RGB') + except Exception as e: + print('skipping', path, e) + raise OSError() + + +def save_image_crops(args, data): + # load images + img_pair, rot, crops, paths = data + try: + img1, img2 = [load_image(os.path.join(args.root_dir, impath)) for impath in img_pair] + except OSError as e: + return [] + + def area(sz): + return sz[0] * sz[1] + + tgt_size = (args.imsize, args.imsize) + + def prepare_crop(img, rect, rot=0): + # actual crop + img = img.crop(rect) + + # resize to desired size + interp = Image.Resampling.LANCZOS if area(img.size) > 4*area(tgt_size) else Image.Resampling.BICUBIC + img = img.resize(tgt_size, resample=interp) + + # rotate the image + rot90 = (round(rot/90) % 4) * 90 + if rot90 == 90: + img = img.transpose(Image.Transpose.ROTATE_90) + elif rot90 == 180: + img = img.transpose(Image.Transpose.ROTATE_180) + elif rot90 == 270: + img = img.transpose(Image.Transpose.ROTATE_270) + return img + + results = [] + for (rect1, rect2), path in zip(crops, paths): + crop1 = prepare_crop(img1, rect1) + crop2 = prepare_crop(img2, rect2, rot) + + fullpath1 = os.path.join(args.output_dir, path+'_1.jpg') + fullpath2 = os.path.join(args.output_dir, path+'_2.jpg') + os.makedirs(os.path.dirname(fullpath1), exist_ok=True) + + assert not os.path.isfile(fullpath1), fullpath1 + assert not os.path.isfile(fullpath2), fullpath2 + crop1.save(fullpath1) + crop2.save(fullpath2) + results.append(path) + + return results + + +if __name__ == '__main__': + args = arg_parser().parse_args() + main(args) + diff --git a/dynamic_predictor/croco/datasets/habitat_sim/README.MD b/dynamic_predictor/croco/datasets/habitat_sim/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..a505781ff9eb91bce7f1d189e848f8ba1c560940 --- /dev/null +++ b/dynamic_predictor/croco/datasets/habitat_sim/README.MD @@ -0,0 +1,76 @@ +## Generation of synthetic image pairs using Habitat-Sim + +These instructions allow to generate pre-training pairs from the Habitat simulator. +As we did not save metadata of the pairs used in the original paper, they are not strictly the same, but these data use the same setting and are equivalent. + +### Download Habitat-Sim scenes +Download Habitat-Sim scenes: +- Download links can be found here: https://github.com/facebookresearch/habitat-sim/blob/main/DATASETS.md +- We used scenes from the HM3D, habitat-test-scenes, Replica, ReplicaCad and ScanNet datasets. +- Please put the scenes under `./data/habitat-sim-data/scene_datasets/` following the structure below, or update manually paths in `paths.py`. +``` +./data/ +└──habitat-sim-data/ + └──scene_datasets/ + ├──hm3d/ + ├──gibson/ + ├──habitat-test-scenes/ + ├──replica_cad_baked_lighting/ + ├──replica_cad/ + ├──ReplicaDataset/ + └──scannet/ +``` + +### Image pairs generation +We provide metadata to generate reproducible images pairs for pretraining and validation. +Experiments described in the paper used similar data, but whose generation was not reproducible at the time. + +Specifications: +- 256x256 resolution images, with 60 degrees field of view . +- Up to 1000 image pairs per scene. +- Number of scenes considered/number of images pairs per dataset: + - Scannet: 1097 scenes / 985 209 pairs + - HM3D: + - hm3d/train: 800 / 800k pairs + - hm3d/val: 100 scenes / 100k pairs + - hm3d/minival: 10 scenes / 10k pairs + - habitat-test-scenes: 3 scenes / 3k pairs + - replica_cad_baked_lighting: 13 scenes / 13k pairs + +- Scenes from hm3d/val and hm3d/minival pairs were not used for the pre-training but kept for validation purposes. + +Download metadata and extract it: +```bash +mkdir -p data/habitat_release_metadata/ +cd data/habitat_release_metadata/ +wget https://download.europe.naverlabs.com/ComputerVision/CroCo/data/habitat_release_metadata/multiview_habitat_metadata.tar.gz +tar -xvf multiview_habitat_metadata.tar.gz +cd ../.. +# Location of the metadata +METADATA_DIR="./data/habitat_release_metadata/multiview_habitat_metadata" +``` + +Generate image pairs from metadata: +- The following command will print a list of commandlines to generate image pairs for each scene: +```bash +# Target output directory +PAIRS_DATASET_DIR="./data/habitat_release/" +python datasets/habitat_sim/generate_from_metadata_files.py --input_dir=$METADATA_DIR --output_dir=$PAIRS_DATASET_DIR +``` +- One can launch multiple of such commands in parallel e.g. using GNU Parallel: +```bash +python datasets/habitat_sim/generate_from_metadata_files.py --input_dir=$METADATA_DIR --output_dir=$PAIRS_DATASET_DIR | parallel -j 16 +``` + +## Metadata generation + +Image pairs were randomly sampled using the following commands, whose outputs contain randomness and are thus not exactly reproducible: +```bash +# Print commandlines to generate image pairs from the different scenes available. +PAIRS_DATASET_DIR=MY_CUSTOM_PATH +python datasets/habitat_sim/generate_multiview_images.py --list_commands --output_dir=$PAIRS_DATASET_DIR + +# Once a dataset is generated, pack metadata files for reproducibility. +METADATA_DIR=MY_CUSTON_PATH +python datasets/habitat_sim/pack_metadata_files.py $PAIRS_DATASET_DIR $METADATA_DIR +``` diff --git a/dynamic_predictor/croco/datasets/habitat_sim/__init__.py b/dynamic_predictor/croco/datasets/habitat_sim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamic_predictor/croco/datasets/habitat_sim/generate_from_metadata.py b/dynamic_predictor/croco/datasets/habitat_sim/generate_from_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..fbe0d399084359495250dc8184671ff498adfbf2 --- /dev/null +++ b/dynamic_predictor/croco/datasets/habitat_sim/generate_from_metadata.py @@ -0,0 +1,92 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +""" +Script to generate image pairs for a given scene reproducing poses provided in a metadata file. +""" +import os +from datasets.habitat_sim.multiview_habitat_sim_generator import MultiviewHabitatSimGenerator +from datasets.habitat_sim.paths import SCENES_DATASET +import argparse +import quaternion +import PIL.Image +import cv2 +import json +from tqdm import tqdm + +def generate_multiview_images_from_metadata(metadata_filename, + output_dir, + overload_params = dict(), + scene_datasets_paths=None, + exist_ok=False): + """ + Generate images from a metadata file for reproducibility purposes. + """ + # Reorder paths by decreasing label length, to avoid collisions when testing if a string by such label + if scene_datasets_paths is not None: + scene_datasets_paths = dict(sorted(scene_datasets_paths.items(), key= lambda x: len(x[0]), reverse=True)) + + with open(metadata_filename, 'r') as f: + input_metadata = json.load(f) + metadata = dict() + for key, value in input_metadata.items(): + # Optionally replace some paths + if key in ("scene_dataset_config_file", "scene", "navmesh") and value != "": + if scene_datasets_paths is not None: + for dataset_label, dataset_path in scene_datasets_paths.items(): + if value.startswith(dataset_label): + value = os.path.normpath(os.path.join(dataset_path, os.path.relpath(value, dataset_label))) + break + metadata[key] = value + + # Overload some parameters + for key, value in overload_params.items(): + metadata[key] = value + + generation_entries = dict([(key, value) for key, value in metadata.items() if not (key in ('multiviews', 'output_dir', 'generate_depth'))]) + generate_depth = metadata["generate_depth"] + + os.makedirs(output_dir, exist_ok=exist_ok) + + generator = MultiviewHabitatSimGenerator(**generation_entries) + + # Generate views + for idx_label, data in tqdm(metadata['multiviews'].items()): + positions = data["positions"] + orientations = data["orientations"] + n = len(positions) + for oidx in range(n): + observation = generator.render_viewpoint(positions[oidx], quaternion.from_float_array(orientations[oidx])) + observation_label = f"{oidx + 1}" # Leonid is indexing starting from 1 + # Color image saved using PIL + img = PIL.Image.fromarray(observation['color'][:,:,:3]) + filename = os.path.join(output_dir, f"{idx_label}_{observation_label}.jpeg") + img.save(filename) + if generate_depth: + # Depth image as EXR file + filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_depth.exr") + cv2.imwrite(filename, observation['depth'], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) + # Camera parameters + camera_params = dict([(key, observation[key].tolist()) for key in ("camera_intrinsics", "R_cam2world", "t_cam2world")]) + filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_camera_params.json") + with open(filename, "w") as f: + json.dump(camera_params, f) + # Save metadata + with open(os.path.join(output_dir, "metadata.json"), "w") as f: + json.dump(metadata, f) + + generator.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--metadata_filename", required=True) + parser.add_argument("--output_dir", required=True) + args = parser.parse_args() + + generate_multiview_images_from_metadata(metadata_filename=args.metadata_filename, + output_dir=args.output_dir, + scene_datasets_paths=SCENES_DATASET, + overload_params=dict(), + exist_ok=True) + + \ No newline at end of file diff --git a/dynamic_predictor/croco/datasets/habitat_sim/generate_from_metadata_files.py b/dynamic_predictor/croco/datasets/habitat_sim/generate_from_metadata_files.py new file mode 100644 index 0000000000000000000000000000000000000000..962ef849d8c31397b8622df4f2d9140175d78873 --- /dev/null +++ b/dynamic_predictor/croco/datasets/habitat_sim/generate_from_metadata_files.py @@ -0,0 +1,27 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +""" +Script generating commandlines to generate image pairs from metadata files. +""" +import os +import glob +from tqdm import tqdm +import argparse + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_dir", required=True) + parser.add_argument("--output_dir", required=True) + parser.add_argument("--prefix", default="", help="Commanline prefix, useful e.g. to setup environment.") + args = parser.parse_args() + + input_metadata_filenames = glob.iglob(f"{args.input_dir}/**/metadata.json", recursive=True) + + for metadata_filename in tqdm(input_metadata_filenames): + output_dir = os.path.join(args.output_dir, os.path.relpath(os.path.dirname(metadata_filename), args.input_dir)) + # Do not process the scene if the metadata file already exists + if os.path.exists(os.path.join(output_dir, "metadata.json")): + continue + commandline = f"{args.prefix}python datasets/habitat_sim/generate_from_metadata.py --metadata_filename={metadata_filename} --output_dir={output_dir}" + print(commandline) diff --git a/dynamic_predictor/croco/datasets/habitat_sim/generate_multiview_images.py b/dynamic_predictor/croco/datasets/habitat_sim/generate_multiview_images.py new file mode 100644 index 0000000000000000000000000000000000000000..421d49a1696474415940493296b3f2d982398850 --- /dev/null +++ b/dynamic_predictor/croco/datasets/habitat_sim/generate_multiview_images.py @@ -0,0 +1,177 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +import os +from tqdm import tqdm +import argparse +import PIL.Image +import numpy as np +import json +from datasets.habitat_sim.multiview_habitat_sim_generator import MultiviewHabitatSimGenerator, NoNaviguableSpaceError +from datasets.habitat_sim.paths import list_scenes_available +import cv2 +import quaternion +import shutil + +def generate_multiview_images_for_scene(scene_dataset_config_file, + scene, + navmesh, + output_dir, + views_count, + size, + exist_ok=False, + generate_depth=False, + **kwargs): + """ + Generate tuples of overlapping views for a given scene. + generate_depth: generate depth images and camera parameters. + """ + if os.path.exists(output_dir) and not exist_ok: + print(f"Scene {scene}: data already generated. Ignoring generation.") + return + try: + print(f"Scene {scene}: {size} multiview acquisitions to generate...") + os.makedirs(output_dir, exist_ok=exist_ok) + + metadata_filename = os.path.join(output_dir, "metadata.json") + + metadata_template = dict(scene_dataset_config_file=scene_dataset_config_file, + scene=scene, + navmesh=navmesh, + views_count=views_count, + size=size, + generate_depth=generate_depth, + **kwargs) + metadata_template["multiviews"] = dict() + + if os.path.exists(metadata_filename): + print("Metadata file already exists:", metadata_filename) + print("Loading already generated metadata file...") + with open(metadata_filename, "r") as f: + metadata = json.load(f) + + for key in metadata_template.keys(): + if key != "multiviews": + assert metadata_template[key] == metadata[key], f"existing file is inconsistent with the input parameters:\nKey: {key}\nmetadata: {metadata[key]}\ntemplate: {metadata_template[key]}." + else: + print("No temporary file found. Starting generation from scratch...") + metadata = metadata_template + + starting_id = len(metadata["multiviews"]) + print(f"Starting generation from index {starting_id}/{size}...") + if starting_id >= size: + print("Generation already done.") + return + + generator = MultiviewHabitatSimGenerator(scene_dataset_config_file=scene_dataset_config_file, + scene=scene, + navmesh=navmesh, + views_count = views_count, + size = size, + **kwargs) + + for idx in tqdm(range(starting_id, size)): + # Generate / re-generate the observations + try: + data = generator[idx] + observations = data["observations"] + positions = data["positions"] + orientations = data["orientations"] + + idx_label = f"{idx:08}" + for oidx, observation in enumerate(observations): + observation_label = f"{oidx + 1}" # Leonid is indexing starting from 1 + # Color image saved using PIL + img = PIL.Image.fromarray(observation['color'][:,:,:3]) + filename = os.path.join(output_dir, f"{idx_label}_{observation_label}.jpeg") + img.save(filename) + if generate_depth: + # Depth image as EXR file + filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_depth.exr") + cv2.imwrite(filename, observation['depth'], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]) + # Camera parameters + camera_params = dict([(key, observation[key].tolist()) for key in ("camera_intrinsics", "R_cam2world", "t_cam2world")]) + filename = os.path.join(output_dir, f"{idx_label}_{observation_label}_camera_params.json") + with open(filename, "w") as f: + json.dump(camera_params, f) + metadata["multiviews"][idx_label] = {"positions": positions.tolist(), + "orientations": orientations.tolist(), + "covisibility_ratios": data["covisibility_ratios"].tolist(), + "valid_fractions": data["valid_fractions"].tolist(), + "pairwise_visibility_ratios": data["pairwise_visibility_ratios"].tolist()} + except RecursionError: + print("Recursion error: unable to sample observations for this scene. We will stop there.") + break + + # Regularly save a temporary metadata file, in case we need to restart the generation + if idx % 10 == 0: + with open(metadata_filename, "w") as f: + json.dump(metadata, f) + + # Save metadata + with open(metadata_filename, "w") as f: + json.dump(metadata, f) + + generator.close() + except NoNaviguableSpaceError: + pass + +def create_commandline(scene_data, generate_depth, exist_ok=False): + """ + Create a commandline string to generate a scene. + """ + def my_formatting(val): + if val is None or val == "": + return '""' + else: + return val + commandline = f"""python {__file__} --scene {my_formatting(scene_data.scene)} + --scene_dataset_config_file {my_formatting(scene_data.scene_dataset_config_file)} + --navmesh {my_formatting(scene_data.navmesh)} + --output_dir {my_formatting(scene_data.output_dir)} + --generate_depth {int(generate_depth)} + --exist_ok {int(exist_ok)} + """ + commandline = " ".join(commandline.split()) + return commandline + +if __name__ == "__main__": + os.umask(2) + + parser = argparse.ArgumentParser(description="""Example of use -- listing commands to generate data for scenes available: + > python datasets/habitat_sim/generate_multiview_habitat_images.py --list_commands + """) + + parser.add_argument("--output_dir", type=str, required=True) + parser.add_argument("--list_commands", action='store_true', help="list commandlines to run if true") + parser.add_argument("--scene", type=str, default="") + parser.add_argument("--scene_dataset_config_file", type=str, default="") + parser.add_argument("--navmesh", type=str, default="") + + parser.add_argument("--generate_depth", type=int, default=1) + parser.add_argument("--exist_ok", type=int, default=0) + + kwargs = dict(resolution=(256,256), hfov=60, views_count = 2, size=1000) + + args = parser.parse_args() + generate_depth=bool(args.generate_depth) + exist_ok = bool(args.exist_ok) + + if args.list_commands: + # Listing scenes available... + scenes_data = list_scenes_available(base_output_dir=args.output_dir) + + for scene_data in scenes_data: + print(create_commandline(scene_data, generate_depth=generate_depth, exist_ok=exist_ok)) + else: + if args.scene == "" or args.output_dir == "": + print("Missing scene or output dir argument!") + print(parser.format_help()) + else: + generate_multiview_images_for_scene(scene=args.scene, + scene_dataset_config_file = args.scene_dataset_config_file, + navmesh = args.navmesh, + output_dir = args.output_dir, + exist_ok=exist_ok, + generate_depth=generate_depth, + **kwargs) \ No newline at end of file diff --git a/dynamic_predictor/croco/datasets/habitat_sim/multiview_habitat_sim_generator.py b/dynamic_predictor/croco/datasets/habitat_sim/multiview_habitat_sim_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..91e5f923b836a645caf5d8e4aacc425047e3c144 --- /dev/null +++ b/dynamic_predictor/croco/datasets/habitat_sim/multiview_habitat_sim_generator.py @@ -0,0 +1,390 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +import os +import numpy as np +import quaternion +import habitat_sim +import json +from sklearn.neighbors import NearestNeighbors +import cv2 + +# OpenCV to habitat camera convention transformation +R_OPENCV2HABITAT = np.stack((habitat_sim.geo.RIGHT, -habitat_sim.geo.UP, habitat_sim.geo.FRONT), axis=0) +R_HABITAT2OPENCV = R_OPENCV2HABITAT.T +DEG2RAD = np.pi / 180 + +def compute_camera_intrinsics(height, width, hfov): + f = width/2 / np.tan(hfov/2 * np.pi/180) + cu, cv = width/2, height/2 + return f, cu, cv + +def compute_camera_pose_opencv_convention(camera_position, camera_orientation): + R_cam2world = quaternion.as_rotation_matrix(camera_orientation) @ R_OPENCV2HABITAT + t_cam2world = np.asarray(camera_position) + return R_cam2world, t_cam2world + +def compute_pointmap(depthmap, hfov): + """ Compute a HxWx3 pointmap in camera frame from a HxW depth map.""" + height, width = depthmap.shape + f, cu, cv = compute_camera_intrinsics(height, width, hfov) + # Cast depth map to point + z_cam = depthmap + u, v = np.meshgrid(range(width), range(height)) + x_cam = (u - cu) / f * z_cam + y_cam = (v - cv) / f * z_cam + X_cam = np.stack((x_cam, y_cam, z_cam), axis=-1) + return X_cam + +def compute_pointcloud(depthmap, hfov, camera_position, camera_rotation): + """Return a 3D point cloud corresponding to valid pixels of the depth map""" + R_cam2world, t_cam2world = compute_camera_pose_opencv_convention(camera_position, camera_rotation) + + X_cam = compute_pointmap(depthmap=depthmap, hfov=hfov) + valid_mask = (X_cam[:,:,2] != 0.0) + + X_cam = X_cam.reshape(-1, 3)[valid_mask.flatten()] + X_world = X_cam @ R_cam2world.T + t_cam2world.reshape(1, 3) + return X_world + +def compute_pointcloud_overlaps_scikit(pointcloud1, pointcloud2, distance_threshold, compute_symmetric=False): + """ + Compute 'overlapping' metrics based on a distance threshold between two point clouds. + """ + nbrs = NearestNeighbors(n_neighbors=1, algorithm = 'kd_tree').fit(pointcloud2) + distances, indices = nbrs.kneighbors(pointcloud1) + intersection1 = np.count_nonzero(distances.flatten() < distance_threshold) + + data = {"intersection1": intersection1, + "size1": len(pointcloud1)} + if compute_symmetric: + nbrs = NearestNeighbors(n_neighbors=1, algorithm = 'kd_tree').fit(pointcloud1) + distances, indices = nbrs.kneighbors(pointcloud2) + intersection2 = np.count_nonzero(distances.flatten() < distance_threshold) + data["intersection2"] = intersection2 + data["size2"] = len(pointcloud2) + + return data + +def _append_camera_parameters(observation, hfov, camera_location, camera_rotation): + """ + Add camera parameters to the observation dictionnary produced by Habitat-Sim + In-place modifications. + """ + R_cam2world, t_cam2world = compute_camera_pose_opencv_convention(camera_location, camera_rotation) + height, width = observation['depth'].shape + f, cu, cv = compute_camera_intrinsics(height, width, hfov) + K = np.asarray([[f, 0, cu], + [0, f, cv], + [0, 0, 1.0]]) + observation["camera_intrinsics"] = K + observation["t_cam2world"] = t_cam2world + observation["R_cam2world"] = R_cam2world + +def look_at(eye, center, up, return_cam2world=True): + """ + Return camera pose looking at a given center point. + Analogous of gluLookAt function, using OpenCV camera convention. + """ + z = center - eye + z /= np.linalg.norm(z, axis=-1, keepdims=True) + y = -up + y = y - np.sum(y * z, axis=-1, keepdims=True) * z + y /= np.linalg.norm(y, axis=-1, keepdims=True) + x = np.cross(y, z, axis=-1) + + if return_cam2world: + R = np.stack((x, y, z), axis=-1) + t = eye + else: + # World to camera transformation + # Transposed matrix + R = np.stack((x, y, z), axis=-2) + t = - np.einsum('...ij, ...j', R, eye) + return R, t + +def look_at_for_habitat(eye, center, up, return_cam2world=True): + R, t = look_at(eye, center, up) + orientation = quaternion.from_rotation_matrix(R @ R_OPENCV2HABITAT.T) + return orientation, t + +def generate_orientation_noise(pan_range, tilt_range, roll_range): + return (quaternion.from_rotation_vector(np.random.uniform(*pan_range) * DEG2RAD * habitat_sim.geo.UP) + * quaternion.from_rotation_vector(np.random.uniform(*tilt_range) * DEG2RAD * habitat_sim.geo.RIGHT) + * quaternion.from_rotation_vector(np.random.uniform(*roll_range) * DEG2RAD * habitat_sim.geo.FRONT)) + + +class NoNaviguableSpaceError(RuntimeError): + def __init__(self, *args): + super().__init__(*args) + +class MultiviewHabitatSimGenerator: + def __init__(self, + scene, + navmesh, + scene_dataset_config_file, + resolution = (240, 320), + views_count=2, + hfov = 60, + gpu_id = 0, + size = 10000, + minimum_covisibility = 0.5, + transform = None): + self.scene = scene + self.navmesh = navmesh + self.scene_dataset_config_file = scene_dataset_config_file + self.resolution = resolution + self.views_count = views_count + assert(self.views_count >= 1) + self.hfov = hfov + self.gpu_id = gpu_id + self.size = size + self.transform = transform + + # Noise added to camera orientation + self.pan_range = (-3, 3) + self.tilt_range = (-10, 10) + self.roll_range = (-5, 5) + + # Height range to sample cameras + self.height_range = (1.2, 1.8) + + # Random steps between the camera views + self.random_steps_count = 5 + self.random_step_variance = 2.0 + + # Minimum fraction of the scene which should be valid (well defined depth) + self.minimum_valid_fraction = 0.7 + + # Distance threshold to see to select pairs + self.distance_threshold = 0.05 + # Minimum IoU of a view point cloud with respect to the reference view to be kept. + self.minimum_covisibility = minimum_covisibility + + # Maximum number of retries. + self.max_attempts_count = 100 + + self.seed = None + self._lazy_initialization() + + def _lazy_initialization(self): + # Lazy random seeding and instantiation of the simulator to deal with multiprocessing properly + if self.seed == None: + # Re-seed numpy generator + np.random.seed() + self.seed = np.random.randint(2**32-1) + sim_cfg = habitat_sim.SimulatorConfiguration() + sim_cfg.scene_id = self.scene + if self.scene_dataset_config_file is not None and self.scene_dataset_config_file != "": + sim_cfg.scene_dataset_config_file = self.scene_dataset_config_file + sim_cfg.random_seed = self.seed + sim_cfg.load_semantic_mesh = False + sim_cfg.gpu_device_id = self.gpu_id + + depth_sensor_spec = habitat_sim.CameraSensorSpec() + depth_sensor_spec.uuid = "depth" + depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH + depth_sensor_spec.resolution = self.resolution + depth_sensor_spec.hfov = self.hfov + depth_sensor_spec.position = [0.0, 0.0, 0] + depth_sensor_spec.orientation + + rgb_sensor_spec = habitat_sim.CameraSensorSpec() + rgb_sensor_spec.uuid = "color" + rgb_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR + rgb_sensor_spec.resolution = self.resolution + rgb_sensor_spec.hfov = self.hfov + rgb_sensor_spec.position = [0.0, 0.0, 0] + agent_cfg = habitat_sim.agent.AgentConfiguration(sensor_specifications=[rgb_sensor_spec, depth_sensor_spec]) + + cfg = habitat_sim.Configuration(sim_cfg, [agent_cfg]) + self.sim = habitat_sim.Simulator(cfg) + if self.navmesh is not None and self.navmesh != "": + # Use pre-computed navmesh when available (usually better than those generated automatically) + self.sim.pathfinder.load_nav_mesh(self.navmesh) + + if not self.sim.pathfinder.is_loaded: + # Try to compute a navmesh + navmesh_settings = habitat_sim.NavMeshSettings() + navmesh_settings.set_defaults() + self.sim.recompute_navmesh(self.sim.pathfinder, navmesh_settings, True) + + # Ensure that the navmesh is not empty + if not self.sim.pathfinder.is_loaded: + raise NoNaviguableSpaceError(f"No naviguable location (scene: {self.scene} -- navmesh: {self.navmesh})") + + self.agent = self.sim.initialize_agent(agent_id=0) + + def close(self): + self.sim.close() + + def __del__(self): + self.sim.close() + + def __len__(self): + return self.size + + def sample_random_viewpoint(self): + """ Sample a random viewpoint using the navmesh """ + nav_point = self.sim.pathfinder.get_random_navigable_point() + + # Sample a random viewpoint height + viewpoint_height = np.random.uniform(*self.height_range) + viewpoint_position = nav_point + viewpoint_height * habitat_sim.geo.UP + viewpoint_orientation = quaternion.from_rotation_vector(np.random.uniform(0, 2 * np.pi) * habitat_sim.geo.UP) * generate_orientation_noise(self.pan_range, self.tilt_range, self.roll_range) + return viewpoint_position, viewpoint_orientation, nav_point + + def sample_other_random_viewpoint(self, observed_point, nav_point): + """ Sample a random viewpoint close to an existing one, using the navmesh and a reference observed point.""" + other_nav_point = nav_point + + walk_directions = self.random_step_variance * np.asarray([1,0,1]) + for i in range(self.random_steps_count): + temp = self.sim.pathfinder.snap_point(other_nav_point + walk_directions * np.random.normal(size=3)) + # Snapping may return nan when it fails + if not np.isnan(temp[0]): + other_nav_point = temp + + other_viewpoint_height = np.random.uniform(*self.height_range) + other_viewpoint_position = other_nav_point + other_viewpoint_height * habitat_sim.geo.UP + + # Set viewing direction towards the central point + rotation, position = look_at_for_habitat(eye=other_viewpoint_position, center=observed_point, up=habitat_sim.geo.UP, return_cam2world=True) + rotation = rotation * generate_orientation_noise(self.pan_range, self.tilt_range, self.roll_range) + return position, rotation, other_nav_point + + def is_other_pointcloud_overlapping(self, ref_pointcloud, other_pointcloud): + """ Check if a viewpoint is valid and overlaps significantly with a reference one. """ + # Observation + pixels_count = self.resolution[0] * self.resolution[1] + valid_fraction = len(other_pointcloud) / pixels_count + assert valid_fraction <= 1.0 and valid_fraction >= 0.0 + overlap = compute_pointcloud_overlaps_scikit(ref_pointcloud, other_pointcloud, self.distance_threshold, compute_symmetric=True) + covisibility = min(overlap["intersection1"] / pixels_count, overlap["intersection2"] / pixels_count) + is_valid = (valid_fraction >= self.minimum_valid_fraction) and (covisibility >= self.minimum_covisibility) + return is_valid, valid_fraction, covisibility + + def is_other_viewpoint_overlapping(self, ref_pointcloud, observation, position, rotation): + """ Check if a viewpoint is valid and overlaps significantly with a reference one. """ + # Observation + other_pointcloud = compute_pointcloud(observation['depth'], self.hfov, position, rotation) + return self.is_other_pointcloud_overlapping(ref_pointcloud, other_pointcloud) + + def render_viewpoint(self, viewpoint_position, viewpoint_orientation): + agent_state = habitat_sim.AgentState() + agent_state.position = viewpoint_position + agent_state.rotation = viewpoint_orientation + self.agent.set_state(agent_state) + viewpoint_observations = self.sim.get_sensor_observations(agent_ids=0) + _append_camera_parameters(viewpoint_observations, self.hfov, viewpoint_position, viewpoint_orientation) + return viewpoint_observations + + def __getitem__(self, useless_idx): + ref_position, ref_orientation, nav_point = self.sample_random_viewpoint() + ref_observations = self.render_viewpoint(ref_position, ref_orientation) + # Extract point cloud + ref_pointcloud = compute_pointcloud(depthmap=ref_observations['depth'], hfov=self.hfov, + camera_position=ref_position, camera_rotation=ref_orientation) + + pixels_count = self.resolution[0] * self.resolution[1] + ref_valid_fraction = len(ref_pointcloud) / pixels_count + assert ref_valid_fraction <= 1.0 and ref_valid_fraction >= 0.0 + if ref_valid_fraction < self.minimum_valid_fraction: + # This should produce a recursion error at some point when something is very wrong. + return self[0] + # Pick an reference observed point in the point cloud + observed_point = np.mean(ref_pointcloud, axis=0) + + # Add the first image as reference + viewpoints_observations = [ref_observations] + viewpoints_covisibility = [ref_valid_fraction] + viewpoints_positions = [ref_position] + viewpoints_orientations = [quaternion.as_float_array(ref_orientation)] + viewpoints_clouds = [ref_pointcloud] + viewpoints_valid_fractions = [ref_valid_fraction] + + for _ in range(self.views_count - 1): + # Generate an other viewpoint using some dummy random walk + successful_sampling = False + for sampling_attempt in range(self.max_attempts_count): + position, rotation, _ = self.sample_other_random_viewpoint(observed_point, nav_point) + # Observation + other_viewpoint_observations = self.render_viewpoint(position, rotation) + other_pointcloud = compute_pointcloud(other_viewpoint_observations['depth'], self.hfov, position, rotation) + + is_valid, valid_fraction, covisibility = self.is_other_pointcloud_overlapping(ref_pointcloud, other_pointcloud) + if is_valid: + successful_sampling = True + break + if not successful_sampling: + print("WARNING: Maximum number of attempts reached.") + # Dirty hack, try using a novel original viewpoint + return self[0] + viewpoints_observations.append(other_viewpoint_observations) + viewpoints_covisibility.append(covisibility) + viewpoints_positions.append(position) + viewpoints_orientations.append(quaternion.as_float_array(rotation)) # WXYZ convention for the quaternion encoding. + viewpoints_clouds.append(other_pointcloud) + viewpoints_valid_fractions.append(valid_fraction) + + # Estimate relations between all pairs of images + pairwise_visibility_ratios = np.ones((len(viewpoints_observations), len(viewpoints_observations))) + for i in range(len(viewpoints_observations)): + pairwise_visibility_ratios[i,i] = viewpoints_valid_fractions[i] + for j in range(i+1, len(viewpoints_observations)): + overlap = compute_pointcloud_overlaps_scikit(viewpoints_clouds[i], viewpoints_clouds[j], self.distance_threshold, compute_symmetric=True) + pairwise_visibility_ratios[i,j] = overlap['intersection1'] / pixels_count + pairwise_visibility_ratios[j,i] = overlap['intersection2'] / pixels_count + + # IoU is relative to the image 0 + data = {"observations": viewpoints_observations, + "positions": np.asarray(viewpoints_positions), + "orientations": np.asarray(viewpoints_orientations), + "covisibility_ratios": np.asarray(viewpoints_covisibility), + "valid_fractions": np.asarray(viewpoints_valid_fractions, dtype=float), + "pairwise_visibility_ratios": np.asarray(pairwise_visibility_ratios, dtype=float), + } + + if self.transform is not None: + data = self.transform(data) + return data + + def generate_random_spiral_trajectory(self, images_count = 100, max_radius=0.5, half_turns=5, use_constant_orientation=False): + """ + Return a list of images corresponding to a spiral trajectory from a random starting point. + Useful to generate nice visualisations. + Use an even number of half turns to get a nice "C1-continuous" loop effect + """ + ref_position, ref_orientation, navpoint = self.sample_random_viewpoint() + ref_observations = self.render_viewpoint(ref_position, ref_orientation) + ref_pointcloud = compute_pointcloud(depthmap=ref_observations['depth'], hfov=self.hfov, + camera_position=ref_position, camera_rotation=ref_orientation) + pixels_count = self.resolution[0] * self.resolution[1] + if len(ref_pointcloud) / pixels_count < self.minimum_valid_fraction: + # Dirty hack: ensure that the valid part of the image is significant + return self.generate_random_spiral_trajectory(images_count, max_radius, half_turns, use_constant_orientation) + + # Pick an observed point in the point cloud + observed_point = np.mean(ref_pointcloud, axis=0) + ref_R, ref_t = compute_camera_pose_opencv_convention(ref_position, ref_orientation) + + images = [] + is_valid = [] + # Spiral trajectory, use_constant orientation + for i, alpha in enumerate(np.linspace(0, 1, images_count)): + r = max_radius * np.abs(np.sin(alpha * np.pi)) # Increase then decrease the radius + theta = alpha * half_turns * np.pi + x = r * np.cos(theta) + y = r * np.sin(theta) + z = 0.0 + position = ref_position + (ref_R @ np.asarray([x, y, z]).reshape(3,1)).flatten() + if use_constant_orientation: + orientation = ref_orientation + else: + # trajectory looking at a mean point in front of the ref observation + orientation, position = look_at_for_habitat(eye=position, center=observed_point, up=habitat_sim.geo.UP) + observations = self.render_viewpoint(position, orientation) + images.append(observations['color'][...,:3]) + _is_valid, valid_fraction, iou = self.is_other_viewpoint_overlapping(ref_pointcloud, observations, position, orientation) + is_valid.append(_is_valid) + return images, np.all(is_valid) \ No newline at end of file diff --git a/dynamic_predictor/croco/datasets/habitat_sim/pack_metadata_files.py b/dynamic_predictor/croco/datasets/habitat_sim/pack_metadata_files.py new file mode 100644 index 0000000000000000000000000000000000000000..10672a01f7dd615d3b4df37781f7f6f97e753ba6 --- /dev/null +++ b/dynamic_predictor/croco/datasets/habitat_sim/pack_metadata_files.py @@ -0,0 +1,69 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +""" +Utility script to pack metadata files of the dataset in order to be able to re-generate it elsewhere. +""" +import os +import glob +from tqdm import tqdm +import shutil +import json +from datasets.habitat_sim.paths import * +import argparse +import collections + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("input_dir") + parser.add_argument("output_dir") + args = parser.parse_args() + + input_dirname = args.input_dir + output_dirname = args.output_dir + + input_metadata_filenames = glob.iglob(f"{input_dirname}/**/metadata.json", recursive=True) + + images_count = collections.defaultdict(lambda : 0) + + os.makedirs(output_dirname) + for input_filename in tqdm(input_metadata_filenames): + # Ignore empty files + with open(input_filename, "r") as f: + original_metadata = json.load(f) + if "multiviews" not in original_metadata or len(original_metadata["multiviews"]) == 0: + print("No views in", input_filename) + continue + + relpath = os.path.relpath(input_filename, input_dirname) + print(relpath) + + # Copy metadata, while replacing scene paths by generic keys depending on the dataset, for portability. + # Data paths are sorted by decreasing length to avoid potential bugs due to paths starting by the same string pattern. + scenes_dataset_paths = dict(sorted(SCENES_DATASET.items(), key=lambda x: len(x[1]), reverse=True)) + metadata = dict() + for key, value in original_metadata.items(): + if key in ("scene_dataset_config_file", "scene", "navmesh") and value != "": + known_path = False + for dataset, dataset_path in scenes_dataset_paths.items(): + if value.startswith(dataset_path): + value = os.path.join(dataset, os.path.relpath(value, dataset_path)) + known_path = True + break + if not known_path: + raise KeyError("Unknown path:" + value) + metadata[key] = value + + # Compile some general statistics while packing data + scene_split = metadata["scene"].split("/") + upper_level = "/".join(scene_split[:2]) if scene_split[0] == "hm3d" else scene_split[0] + images_count[upper_level] += len(metadata["multiviews"]) + + output_filename = os.path.join(output_dirname, relpath) + os.makedirs(os.path.dirname(output_filename), exist_ok=True) + with open(output_filename, "w") as f: + json.dump(metadata, f) + + # Print statistics + print("Images count:") + for upper_level, count in images_count.items(): + print(f"- {upper_level}: {count}") \ No newline at end of file diff --git a/dynamic_predictor/croco/datasets/habitat_sim/paths.py b/dynamic_predictor/croco/datasets/habitat_sim/paths.py new file mode 100644 index 0000000000000000000000000000000000000000..4d63b5fa29c274ddfeae084734a35ba66d7edee8 --- /dev/null +++ b/dynamic_predictor/croco/datasets/habitat_sim/paths.py @@ -0,0 +1,129 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +""" +Paths to Habitat-Sim scenes +""" + +import os +import json +import collections +from tqdm import tqdm + + +# Hardcoded path to the different scene datasets +SCENES_DATASET = { + "hm3d": "./data/habitat-sim-data/scene_datasets/hm3d/", + "gibson": "./data/habitat-sim-data/scene_datasets/gibson/", + "habitat-test-scenes": "./data/habitat-sim/scene_datasets/habitat-test-scenes/", + "replica_cad_baked_lighting": "./data/habitat-sim/scene_datasets/replica_cad_baked_lighting/", + "replica_cad": "./data/habitat-sim/scene_datasets/replica_cad/", + "replica": "./data/habitat-sim/scene_datasets/ReplicaDataset/", + "scannet": "./data/habitat-sim/scene_datasets/scannet/" +} + +SceneData = collections.namedtuple("SceneData", ["scene_dataset_config_file", "scene", "navmesh", "output_dir"]) + +def list_replicacad_scenes(base_output_dir, base_path=SCENES_DATASET["replica_cad"]): + scene_dataset_config_file = os.path.join(base_path, "replicaCAD.scene_dataset_config.json") + scenes = [f"apt_{i}" for i in range(6)] + ["empty_stage"] + navmeshes = [f"navmeshes/apt_{i}_static_furniture.navmesh" for i in range(6)] + ["empty_stage.navmesh"] + scenes_data = [] + for idx in range(len(scenes)): + output_dir = os.path.join(base_output_dir, "ReplicaCAD", scenes[idx]) + # Add scene + data = SceneData(scene_dataset_config_file=scene_dataset_config_file, + scene = scenes[idx] + ".scene_instance.json", + navmesh = os.path.join(base_path, navmeshes[idx]), + output_dir = output_dir) + scenes_data.append(data) + return scenes_data + +def list_replica_cad_baked_lighting_scenes(base_output_dir, base_path=SCENES_DATASET["replica_cad_baked_lighting"]): + scene_dataset_config_file = os.path.join(base_path, "replicaCAD_baked.scene_dataset_config.json") + scenes = sum([[f"Baked_sc{i}_staging_{j:02}" for i in range(5)] for j in range(21)], []) + navmeshes = ""#[f"navmeshes/apt_{i}_static_furniture.navmesh" for i in range(6)] + ["empty_stage.navmesh"] + scenes_data = [] + for idx in range(len(scenes)): + output_dir = os.path.join(base_output_dir, "replica_cad_baked_lighting", scenes[idx]) + data = SceneData(scene_dataset_config_file=scene_dataset_config_file, + scene = scenes[idx], + navmesh = "", + output_dir = output_dir) + scenes_data.append(data) + return scenes_data + +def list_replica_scenes(base_output_dir, base_path): + scenes_data = [] + for scene_id in os.listdir(base_path): + scene = os.path.join(base_path, scene_id, "mesh.ply") + navmesh = os.path.join(base_path, scene_id, "habitat/mesh_preseg_semantic.navmesh") # Not sure if I should use it + scene_dataset_config_file = "" + output_dir = os.path.join(base_output_dir, scene_id) + # Add scene only if it does not exist already, or if exist_ok + data = SceneData(scene_dataset_config_file = scene_dataset_config_file, + scene = scene, + navmesh = navmesh, + output_dir = output_dir) + scenes_data.append(data) + return scenes_data + + +def list_scenes(base_output_dir, base_path): + """ + Generic method iterating through a base_path folder to find scenes. + """ + scenes_data = [] + for root, dirs, files in os.walk(base_path, followlinks=True): + folder_scenes_data = [] + for file in files: + name, ext = os.path.splitext(file) + if ext == ".glb": + scene = os.path.join(root, name + ".glb") + navmesh = os.path.join(root, name + ".navmesh") + if not os.path.exists(navmesh): + navmesh = "" + relpath = os.path.relpath(root, base_path) + output_dir = os.path.abspath(os.path.join(base_output_dir, relpath, name)) + data = SceneData(scene_dataset_config_file="", + scene = scene, + navmesh = navmesh, + output_dir = output_dir) + folder_scenes_data.append(data) + + # Specific check for HM3D: + # When two meshesxxxx.basis.glb and xxxx.glb are present, use the 'basis' version. + basis_scenes = [data.scene[:-len(".basis.glb")] for data in folder_scenes_data if data.scene.endswith(".basis.glb")] + if len(basis_scenes) != 0: + folder_scenes_data = [data for data in folder_scenes_data if not (data.scene[:-len(".glb")] in basis_scenes)] + + scenes_data.extend(folder_scenes_data) + return scenes_data + +def list_scenes_available(base_output_dir, scenes_dataset_paths=SCENES_DATASET): + scenes_data = [] + + # HM3D + for split in ("minival", "train", "val", "examples"): + scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, f"hm3d/{split}/"), + base_path=f"{scenes_dataset_paths['hm3d']}/{split}") + + # Gibson + scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, "gibson"), + base_path=scenes_dataset_paths["gibson"]) + + # Habitat test scenes (just a few) + scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, "habitat-test-scenes"), + base_path=scenes_dataset_paths["habitat-test-scenes"]) + + # ReplicaCAD (baked lightning) + scenes_data += list_replica_cad_baked_lighting_scenes(base_output_dir=base_output_dir) + + # ScanNet + scenes_data += list_scenes(base_output_dir=os.path.join(base_output_dir, "scannet"), + base_path=scenes_dataset_paths["scannet"]) + + # Replica + list_replica_scenes(base_output_dir=os.path.join(base_output_dir, "replica"), + base_path=scenes_dataset_paths["replica"]) + return scenes_data diff --git a/dynamic_predictor/croco/datasets/pairs_dataset.py b/dynamic_predictor/croco/datasets/pairs_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9f107526b34e154d9013a9a7a0bde3d5ff6f581c --- /dev/null +++ b/dynamic_predictor/croco/datasets/pairs_dataset.py @@ -0,0 +1,109 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +import os +from torch.utils.data import Dataset +from PIL import Image + +from datasets.transforms import get_pair_transforms + +def load_image(impath): + return Image.open(impath) + +def load_pairs_from_cache_file(fname, root=''): + assert os.path.isfile(fname), "cannot parse pairs from {:s}, file does not exist".format(fname) + with open(fname, 'r') as fid: + lines = fid.read().strip().splitlines() + pairs = [ (os.path.join(root,l.split()[0]), os.path.join(root,l.split()[1])) for l in lines] + return pairs + +def load_pairs_from_list_file(fname, root=''): + assert os.path.isfile(fname), "cannot parse pairs from {:s}, file does not exist".format(fname) + with open(fname, 'r') as fid: + lines = fid.read().strip().splitlines() + pairs = [ (os.path.join(root,l+'_1.jpg'), os.path.join(root,l+'_2.jpg')) for l in lines if not l.startswith('#')] + return pairs + + +def write_cache_file(fname, pairs, root=''): + if len(root)>0: + if not root.endswith('/'): root+='/' + assert os.path.isdir(root) + s = '' + for im1, im2 in pairs: + if len(root)>0: + assert im1.startswith(root), im1 + assert im2.startswith(root), im2 + s += '{:s} {:s}\n'.format(im1[len(root):], im2[len(root):]) + with open(fname, 'w') as fid: + fid.write(s[:-1]) + +def parse_and_cache_all_pairs(dname, data_dir='./data/'): + if dname=='habitat_release': + dirname = os.path.join(data_dir, 'habitat_release') + assert os.path.isdir(dirname), "cannot find folder for habitat_release pairs: "+dirname + cache_file = os.path.join(dirname, 'pairs.txt') + assert not os.path.isfile(cache_file), "cache file already exists: "+cache_file + + print('Parsing pairs for dataset: '+dname) + pairs = [] + for root, dirs, files in os.walk(dirname): + if 'val' in root: continue + dirs.sort() + pairs += [ (os.path.join(root,f), os.path.join(root,f[:-len('_1.jpeg')]+'_2.jpeg')) for f in sorted(files) if f.endswith('_1.jpeg')] + print('Found {:,} pairs'.format(len(pairs))) + print('Writing cache to: '+cache_file) + write_cache_file(cache_file, pairs, root=dirname) + + else: + raise NotImplementedError('Unknown dataset: '+dname) + +def dnames_to_image_pairs(dnames, data_dir='./data/'): + """ + dnames: list of datasets with image pairs, separated by + + """ + all_pairs = [] + for dname in dnames.split('+'): + if dname=='habitat_release': + dirname = os.path.join(data_dir, 'habitat_release') + assert os.path.isdir(dirname), "cannot find folder for habitat_release pairs: "+dirname + cache_file = os.path.join(dirname, 'pairs.txt') + assert os.path.isfile(cache_file), "cannot find cache file for habitat_release pairs, please first create the cache file, see instructions. "+cache_file + pairs = load_pairs_from_cache_file(cache_file, root=dirname) + elif dname in ['ARKitScenes', 'MegaDepth', '3DStreetView', 'IndoorVL']: + dirname = os.path.join(data_dir, dname+'_crops') + assert os.path.isdir(dirname), "cannot find folder for {:s} pairs: {:s}".format(dname, dirname) + list_file = os.path.join(dirname, 'listing.txt') + assert os.path.isfile(list_file), "cannot find list file for {:s} pairs, see instructions. {:s}".format(dname, list_file) + pairs = load_pairs_from_list_file(list_file, root=dirname) + print(' {:s}: {:,} pairs'.format(dname, len(pairs))) + all_pairs += pairs + if '+' in dnames: print(' Total: {:,} pairs'.format(len(all_pairs))) + return all_pairs + + +class PairsDataset(Dataset): + + def __init__(self, dnames, trfs='', totensor=True, normalize=True, data_dir='./data/'): + super().__init__() + self.image_pairs = dnames_to_image_pairs(dnames, data_dir=data_dir) + self.transforms = get_pair_transforms(transform_str=trfs, totensor=totensor, normalize=normalize) + + def __len__(self): + return len(self.image_pairs) + + def __getitem__(self, index): + im1path, im2path = self.image_pairs[index] + im1 = load_image(im1path) + im2 = load_image(im2path) + if self.transforms is not None: im1, im2 = self.transforms(im1, im2) + return im1, im2 + + +if __name__=="__main__": + import argparse + parser = argparse.ArgumentParser(prog="Computing and caching list of pairs for a given dataset") + parser.add_argument('--data_dir', default='./data/', type=str, help="path where data are stored") + parser.add_argument('--dataset', default='habitat_release', type=str, help="name of the dataset") + args = parser.parse_args() + parse_and_cache_all_pairs(dname=args.dataset, data_dir=args.data_dir) diff --git a/dynamic_predictor/croco/datasets/transforms.py b/dynamic_predictor/croco/datasets/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..216bac61f8254fd50e7f269ee80301f250a2d11e --- /dev/null +++ b/dynamic_predictor/croco/datasets/transforms.py @@ -0,0 +1,95 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +import torch +import torchvision.transforms +import torchvision.transforms.functional as F + +# "Pair": apply a transform on a pair +# "Both": apply the exact same transform to both images + +class ComposePair(torchvision.transforms.Compose): + def __call__(self, img1, img2): + for t in self.transforms: + img1, img2 = t(img1, img2) + return img1, img2 + +class NormalizeBoth(torchvision.transforms.Normalize): + def forward(self, img1, img2): + img1 = super().forward(img1) + img2 = super().forward(img2) + return img1, img2 + +class ToTensorBoth(torchvision.transforms.ToTensor): + def __call__(self, img1, img2): + img1 = super().__call__(img1) + img2 = super().__call__(img2) + return img1, img2 + +class RandomCropPair(torchvision.transforms.RandomCrop): + # the crop will be intentionally different for the two images with this class + def forward(self, img1, img2): + img1 = super().forward(img1) + img2 = super().forward(img2) + return img1, img2 + +class ColorJitterPair(torchvision.transforms.ColorJitter): + # can be symmetric (same for both images) or assymetric (different jitter params for each image) depending on assymetric_prob + def __init__(self, assymetric_prob, **kwargs): + super().__init__(**kwargs) + self.assymetric_prob = assymetric_prob + def jitter_one(self, img, fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor): + for fn_id in fn_idx: + if fn_id == 0 and brightness_factor is not None: + img = F.adjust_brightness(img, brightness_factor) + elif fn_id == 1 and contrast_factor is not None: + img = F.adjust_contrast(img, contrast_factor) + elif fn_id == 2 and saturation_factor is not None: + img = F.adjust_saturation(img, saturation_factor) + elif fn_id == 3 and hue_factor is not None: + img = F.adjust_hue(img, hue_factor) + return img + + def forward(self, img1, img2): + + fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue + ) + img1 = self.jitter_one(img1, fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor) + if torch.rand(1) < self.assymetric_prob: # assymetric: + fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue + ) + img2 = self.jitter_one(img2, fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor) + return img1, img2 + +def get_pair_transforms(transform_str, totensor=True, normalize=True): + # transform_str is eg crop224+color + trfs = [] + for s in transform_str.split('+'): + if s.startswith('crop'): + size = int(s[len('crop'):]) + trfs.append(RandomCropPair(size)) + elif s=='acolor': + trfs.append(ColorJitterPair(assymetric_prob=1.0, brightness=(0.6, 1.4), contrast=(0.6, 1.4), saturation=(0.6, 1.4), hue=0.0)) + elif s=='': # if transform_str was "" + pass + else: + raise NotImplementedError('Unknown augmentation: '+s) + + if totensor: + trfs.append( ToTensorBoth() ) + if normalize: + trfs.append( NormalizeBoth(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ) + + if len(trfs)==0: + return None + elif len(trfs)==1: + return trfs + else: + return ComposePair(trfs) + + + + + diff --git a/dynamic_predictor/croco/demo.py b/dynamic_predictor/croco/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..91b80ccc5c98c18e20d1ce782511aa824ef28f77 --- /dev/null +++ b/dynamic_predictor/croco/demo.py @@ -0,0 +1,55 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +import torch +from models.croco import CroCoNet +from PIL import Image +import torchvision.transforms +from torchvision.transforms import ToTensor, Normalize, Compose + +def main(): + device = torch.device('cuda:0' if torch.cuda.is_available() and torch.cuda.device_count()>0 else 'cpu') + + # load 224x224 images and transform them to tensor + imagenet_mean = [0.485, 0.456, 0.406] + imagenet_mean_tensor = torch.tensor(imagenet_mean).view(1,3,1,1).to(device, non_blocking=True) + imagenet_std = [0.229, 0.224, 0.225] + imagenet_std_tensor = torch.tensor(imagenet_std).view(1,3,1,1).to(device, non_blocking=True) + trfs = Compose([ToTensor(), Normalize(mean=imagenet_mean, std=imagenet_std)]) + image1 = trfs(Image.open('assets/Chateau1.png').convert('RGB')).to(device, non_blocking=True).unsqueeze(0) + image2 = trfs(Image.open('assets/Chateau2.png').convert('RGB')).to(device, non_blocking=True).unsqueeze(0) + + # load model + ckpt = torch.load('pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth', 'cpu') + model = CroCoNet( **ckpt.get('croco_kwargs',{})).to(device) + model.eval() + msg = model.load_state_dict(ckpt['model'], strict=True) + + # forward + with torch.inference_mode(): + out, mask, target = model(image1, image2) + + # the output is normalized, thus use the mean/std of the actual image to go back to RGB space + patchified = model.patchify(image1) + mean = patchified.mean(dim=-1, keepdim=True) + var = patchified.var(dim=-1, keepdim=True) + decoded_image = model.unpatchify(out * (var + 1.e-6)**.5 + mean) + # undo imagenet normalization, prepare masked image + decoded_image = decoded_image * imagenet_std_tensor + imagenet_mean_tensor + input_image = image1 * imagenet_std_tensor + imagenet_mean_tensor + ref_image = image2 * imagenet_std_tensor + imagenet_mean_tensor + image_masks = model.unpatchify(model.patchify(torch.ones_like(ref_image)) * mask[:,:,None]) + masked_input_image = ((1 - image_masks) * input_image) + + # make visualization + visualization = torch.cat((ref_image, masked_input_image, decoded_image, input_image), dim=3) # 4*(B, 3, H, W) -> B, 3, H, W*4 + B, C, H, W = visualization.shape + visualization = visualization.permute(1, 0, 2, 3).reshape(C, B*H, W) + visualization = torchvision.transforms.functional.to_pil_image(torch.clamp(visualization, 0, 1)) + fname = "demo_output.png" + visualization.save(fname) + print('Visualization save in '+fname) + + +if __name__=="__main__": + main() diff --git a/dynamic_predictor/croco/interactive_demo.ipynb b/dynamic_predictor/croco/interactive_demo.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6cfc960af5baac9a69029c29a16eea4e24123a71 --- /dev/null +++ b/dynamic_predictor/croco/interactive_demo.ipynb @@ -0,0 +1,271 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Interactive demo of Cross-view Completion." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright (C) 2022-present Naver Corporation. All rights reserved.\n", + "# Licensed under CC BY-NC-SA 4.0 (non-commercial use only)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import numpy as np\n", + "from models.croco import CroCoNet\n", + "from ipywidgets import interact, interactive, fixed, interact_manual\n", + "import ipywidgets as widgets\n", + "import matplotlib.pyplot as plt\n", + "import quaternion\n", + "import models.masking" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load CroCo model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ckpt = torch.load('pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth', 'cpu')\n", + "model = CroCoNet( **ckpt.get('croco_kwargs',{}))\n", + "msg = model.load_state_dict(ckpt['model'], strict=True)\n", + "use_gpu = torch.cuda.is_available() and torch.cuda.device_count()>0\n", + "device = torch.device('cuda:0' if use_gpu else 'cpu')\n", + "model = model.eval()\n", + "model = model.to(device=device)\n", + "print(msg)\n", + "\n", + "def process_images(ref_image, target_image, masking_ratio, reconstruct_unmasked_patches=False):\n", + " \"\"\"\n", + " Perform Cross-View completion using two input images, specified using Numpy arrays.\n", + " \"\"\"\n", + " # Replace the mask generator\n", + " model.mask_generator = models.masking.RandomMask(model.patch_embed.num_patches, masking_ratio)\n", + "\n", + " # ImageNet-1k color normalization\n", + " imagenet_mean = torch.as_tensor([0.485, 0.456, 0.406]).reshape(1,3,1,1).to(device)\n", + " imagenet_std = torch.as_tensor([0.229, 0.224, 0.225]).reshape(1,3,1,1).to(device)\n", + "\n", + " normalize_input_colors = True\n", + " is_output_normalized = True\n", + " with torch.no_grad():\n", + " # Cast data to torch\n", + " target_image = (torch.as_tensor(target_image, dtype=torch.float, device=device).permute(2,0,1) / 255)[None]\n", + " ref_image = (torch.as_tensor(ref_image, dtype=torch.float, device=device).permute(2,0,1) / 255)[None]\n", + "\n", + " if normalize_input_colors:\n", + " ref_image = (ref_image - imagenet_mean) / imagenet_std\n", + " target_image = (target_image - imagenet_mean) / imagenet_std\n", + "\n", + " out, mask, _ = model(target_image, ref_image)\n", + " # # get target\n", + " if not is_output_normalized:\n", + " predicted_image = model.unpatchify(out)\n", + " else:\n", + " # The output only contains higher order information,\n", + " # we retrieve mean and standard deviation from the actual target image\n", + " patchified = model.patchify(target_image)\n", + " mean = patchified.mean(dim=-1, keepdim=True)\n", + " var = patchified.var(dim=-1, keepdim=True)\n", + " pred_renorm = out * (var + 1.e-6)**.5 + mean\n", + " predicted_image = model.unpatchify(pred_renorm)\n", + "\n", + " image_masks = model.unpatchify(model.patchify(torch.ones_like(ref_image)) * mask[:,:,None])\n", + " masked_target_image = (1 - image_masks) * target_image\n", + " \n", + " if not reconstruct_unmasked_patches:\n", + " # Replace unmasked patches by their actual values\n", + " predicted_image = predicted_image * image_masks + masked_target_image\n", + "\n", + " # Unapply color normalization\n", + " if normalize_input_colors:\n", + " predicted_image = predicted_image * imagenet_std + imagenet_mean\n", + " masked_target_image = masked_target_image * imagenet_std + imagenet_mean\n", + " \n", + " # Cast to Numpy\n", + " masked_target_image = np.asarray(torch.clamp(masked_target_image.squeeze(0).permute(1,2,0) * 255, 0, 255).cpu().numpy(), dtype=np.uint8)\n", + " predicted_image = np.asarray(torch.clamp(predicted_image.squeeze(0).permute(1,2,0) * 255, 0, 255).cpu().numpy(), dtype=np.uint8)\n", + " return masked_target_image, predicted_image" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Use the Habitat simulator to render images from arbitrary viewpoints (requires habitat_sim to be installed)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "os.environ[\"MAGNUM_LOG\"]=\"quiet\"\n", + "os.environ[\"HABITAT_SIM_LOG\"]=\"quiet\"\n", + "import habitat_sim\n", + "\n", + "scene = \"habitat-sim-data/scene_datasets/habitat-test-scenes/skokloster-castle.glb\"\n", + "navmesh = \"habitat-sim-data/scene_datasets/habitat-test-scenes/skokloster-castle.navmesh\"\n", + "\n", + "sim_cfg = habitat_sim.SimulatorConfiguration()\n", + "if use_gpu: sim_cfg.gpu_device_id = 0\n", + "sim_cfg.scene_id = scene\n", + "sim_cfg.load_semantic_mesh = False\n", + "rgb_sensor_spec = habitat_sim.CameraSensorSpec()\n", + "rgb_sensor_spec.uuid = \"color\"\n", + "rgb_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR\n", + "rgb_sensor_spec.resolution = (224,224)\n", + "rgb_sensor_spec.hfov = 56.56\n", + "rgb_sensor_spec.position = [0.0, 0.0, 0.0]\n", + "rgb_sensor_spec.orientation = [0, 0, 0]\n", + "agent_cfg = habitat_sim.agent.AgentConfiguration(sensor_specifications=[rgb_sensor_spec])\n", + "\n", + "\n", + "cfg = habitat_sim.Configuration(sim_cfg, [agent_cfg])\n", + "sim = habitat_sim.Simulator(cfg)\n", + "if navmesh is not None:\n", + " sim.pathfinder.load_nav_mesh(navmesh)\n", + "agent = sim.initialize_agent(agent_id=0)\n", + "\n", + "def sample_random_viewpoint():\n", + " \"\"\" Sample a random viewpoint using the navmesh \"\"\"\n", + " nav_point = sim.pathfinder.get_random_navigable_point()\n", + " # Sample a random viewpoint height\n", + " viewpoint_height = np.random.uniform(1.0, 1.6)\n", + " viewpoint_position = nav_point + viewpoint_height * habitat_sim.geo.UP\n", + " viewpoint_orientation = quaternion.from_rotation_vector(np.random.uniform(-np.pi, np.pi) * habitat_sim.geo.UP)\n", + " return viewpoint_position, viewpoint_orientation\n", + "\n", + "def render_viewpoint(position, orientation):\n", + " agent_state = habitat_sim.AgentState()\n", + " agent_state.position = position\n", + " agent_state.rotation = orientation\n", + " agent.set_state(agent_state)\n", + " viewpoint_observations = sim.get_sensor_observations(agent_ids=0)\n", + " image = viewpoint_observations['color'][:,:,:3]\n", + " image = np.asarray(np.clip(1.5 * np.asarray(image, dtype=float), 0, 255), dtype=np.uint8)\n", + " return image" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Sample a random reference view" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ref_position, ref_orientation = sample_random_viewpoint()\n", + "ref_image = render_viewpoint(ref_position, ref_orientation)\n", + "plt.clf()\n", + "fig, axes = plt.subplots(1,1, squeeze=False, num=1)\n", + "axes[0,0].imshow(ref_image)\n", + "for ax in axes.flatten():\n", + " ax.set_xticks([])\n", + " ax.set_yticks([])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Interactive cross-view completion using CroCo" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reconstruct_unmasked_patches = False\n", + "\n", + "def show_demo(masking_ratio, x, y, z, panorama, elevation):\n", + " R = quaternion.as_rotation_matrix(ref_orientation)\n", + " target_position = ref_position + x * R[:,0] + y * R[:,1] + z * R[:,2]\n", + " target_orientation = (ref_orientation\n", + " * quaternion.from_rotation_vector(-elevation * np.pi/180 * habitat_sim.geo.LEFT) \n", + " * quaternion.from_rotation_vector(-panorama * np.pi/180 * habitat_sim.geo.UP))\n", + " \n", + " ref_image = render_viewpoint(ref_position, ref_orientation)\n", + " target_image = render_viewpoint(target_position, target_orientation)\n", + "\n", + " masked_target_image, predicted_image = process_images(ref_image, target_image, masking_ratio, reconstruct_unmasked_patches)\n", + "\n", + " fig, axes = plt.subplots(1,4, squeeze=True, dpi=300)\n", + " axes[0].imshow(ref_image)\n", + " axes[0].set_xlabel(\"Reference\")\n", + " axes[1].imshow(masked_target_image)\n", + " axes[1].set_xlabel(\"Masked target\")\n", + " axes[2].imshow(predicted_image)\n", + " axes[2].set_xlabel(\"Reconstruction\") \n", + " axes[3].imshow(target_image)\n", + " axes[3].set_xlabel(\"Target\")\n", + " for ax in axes.flatten():\n", + " ax.set_xticks([])\n", + " ax.set_yticks([])\n", + "\n", + "interact(show_demo,\n", + " masking_ratio=widgets.FloatSlider(description='masking', value=0.9, min=0.0, max=1.0),\n", + " x=widgets.FloatSlider(value=0.0, min=-0.5, max=0.5, step=0.05),\n", + " y=widgets.FloatSlider(value=0.0, min=-0.5, max=0.5, step=0.05),\n", + " z=widgets.FloatSlider(value=0.0, min=-0.5, max=0.5, step=0.05),\n", + " panorama=widgets.FloatSlider(value=0.0, min=-20, max=20, step=0.5),\n", + " elevation=widgets.FloatSlider(value=0.0, min=-20, max=20, step=0.5));" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.13" + }, + "vscode": { + "interpreter": { + "hash": "f9237820cd248d7e07cb4fb9f0e4508a85d642f19d831560c0a4b61f3e907e67" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/dynamic_predictor/croco/models/blocks.py b/dynamic_predictor/croco/models/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc2d82c43dbbfa7990aa55a418c74c8e011bb15 --- /dev/null +++ b/dynamic_predictor/croco/models/blocks.py @@ -0,0 +1,252 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + + +# -------------------------------------------------------- +# Main encoder/decoder blocks +# -------------------------------------------------------- +# References: +# timm +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/helpers.py +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/mlp.py +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/patch_embed.py + + +import torch +import torch.nn as nn + +from itertools import repeat +import collections.abc + + +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return x + return tuple(repeat(x, n)) + return parse +to_2tuple = _ntuple(2) + +def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f'drop_prob={round(self.drop_prob,3):0.3f}' + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks""" + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + +class Attention(nn.Module): + + def __init__(self, dim, rope=None, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.rope = rope + + def forward(self, x, xpos): + B, N, C = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).transpose(1,3) + q, k, v = [qkv[:,:,i] for i in range(3)] + # q,k,v = qkv.unbind(2) # make torchscript happy (cannot use tensor as tuple) + + if self.rope is not None: + q = self.rope(q, xpos) + k = self.rope(k, xpos) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, rope=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, rope=rope, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, xpos): + x = x + self.drop_path(self.attn(self.norm1(x), xpos)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + +class CrossAttention(nn.Module): + + def __init__(self, dim, rope=None, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.projq = nn.Linear(dim, dim, bias=qkv_bias) + self.projk = nn.Linear(dim, dim, bias=qkv_bias) + self.projv = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.rope = rope + + def forward(self, query, key, value, qpos, kpos): + B, Nq, C = query.shape + Nk = key.shape[1] + Nv = value.shape[1] + + q = self.projq(query).reshape(B,Nq,self.num_heads, C// self.num_heads).permute(0, 2, 1, 3) + k = self.projk(key).reshape(B,Nk,self.num_heads, C// self.num_heads).permute(0, 2, 1, 3) + v = self.projv(value).reshape(B,Nv,self.num_heads, C// self.num_heads).permute(0, 2, 1, 3) + + if self.rope is not None: + q = self.rope(q, qpos) + k = self.rope(k, kpos) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, Nq, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + +class DecoderBlock(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_mem=True, rope=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, rope=rope, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.cross_attn = CrossAttention(dim, rope=rope, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.norm3 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.norm_y = norm_layer(dim) if norm_mem else nn.Identity() + + def forward(self, x, y, xpos, ypos): + x = x + self.drop_path(self.attn(self.norm1(x), xpos)) + y_ = self.norm_y(y) + x = x + self.drop_path(self.cross_attn(self.norm2(x), y_, y_, xpos, ypos)) + x = x + self.drop_path(self.mlp(self.norm3(x))) + return x, y + + +# patch embedding +class PositionGetter(object): + """ return positions of patches """ + + def __init__(self): + self.cache_positions = {} + + def __call__(self, b, h, w, device): + if not (h,w) in self.cache_positions: + x = torch.arange(w, device=device) + y = torch.arange(h, device=device) + self.cache_positions[h,w] = torch.cartesian_prod(y, x) # (h, w, 2) + pos = self.cache_positions[h,w].view(1, h*w, 2).expand(b, -1, 2).clone() + return pos + +class PatchEmbed(nn.Module): + """ just adding _init_weights + position getter compared to timm.models.layers.patch_embed.PatchEmbed""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, init='xavier'): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + self.position_getter = PositionGetter() + self.init_type = init + + def forward(self, x): + B, C, H, W = x.shape + torch._assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + torch._assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + x = self.proj(x) + pos = self.position_getter(B, x.size(2), x.size(3), x.device) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x, pos + + def _init_weights(self): + w = self.proj.weight.data + if self.init_type == 'xavier': + torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + elif self.init_type == 'kaiming': + torch.nn.init.kaiming_uniform_(w.view([w.shape[0], -1])) + elif self.init_type == 'zero': + torch.nn.init.zeros_(w) + bias = getattr(self.proj, 'bias', None) + if bias is not None: + torch.nn.init.zeros_(bias) + else: + raise ValueError(f"Unknown init type {self.init_type}") + diff --git a/dynamic_predictor/croco/models/criterion.py b/dynamic_predictor/croco/models/criterion.py new file mode 100644 index 0000000000000000000000000000000000000000..11696c40865344490f23796ea45e8fbd5e654731 --- /dev/null +++ b/dynamic_predictor/croco/models/criterion.py @@ -0,0 +1,37 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Criterion to train CroCo +# -------------------------------------------------------- +# References: +# MAE: https://github.com/facebookresearch/mae +# -------------------------------------------------------- + +import torch + +class MaskedMSE(torch.nn.Module): + + def __init__(self, norm_pix_loss=False, masked=True): + """ + norm_pix_loss: normalize each patch by their pixel mean and variance + masked: compute loss over the masked patches only + """ + super().__init__() + self.norm_pix_loss = norm_pix_loss + self.masked = masked + + def forward(self, pred, mask, target): + + if self.norm_pix_loss: + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6)**.5 + + loss = (pred - target) ** 2 + loss = loss.mean(dim=-1) # [N, L], mean loss per patch + if self.masked: + loss = (loss * mask).sum() / mask.sum() # mean loss on masked patches + else: + loss = loss.mean() # mean loss + return loss diff --git a/dynamic_predictor/croco/models/croco.py b/dynamic_predictor/croco/models/croco.py new file mode 100644 index 0000000000000000000000000000000000000000..14c68634152d75555b4c35c25af268394c5821fe --- /dev/null +++ b/dynamic_predictor/croco/models/croco.py @@ -0,0 +1,249 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + + +# -------------------------------------------------------- +# CroCo model during pretraining +# -------------------------------------------------------- + + + +import torch +import torch.nn as nn +torch.backends.cuda.matmul.allow_tf32 = True # for gpu >= Ampere and pytorch >= 1.12 +from functools import partial + +from models.blocks import Block, DecoderBlock, PatchEmbed +from models.pos_embed import get_2d_sincos_pos_embed, RoPE2D +from models.masking import RandomMask + + +class CroCoNet(nn.Module): + + def __init__(self, + img_size=224, # input image size + patch_size=16, # patch_size + mask_ratio=0.9, # ratios of masked tokens + enc_embed_dim=768, # encoder feature dimension + enc_depth=12, # encoder depth + enc_num_heads=12, # encoder number of heads in the transformer block + dec_embed_dim=512, # decoder feature dimension + dec_depth=8, # decoder depth + dec_num_heads=16, # decoder number of heads in the transformer block + mlp_ratio=4, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + norm_im2_in_dec=True, # whether to apply normalization of the 'memory' = (second image) in the decoder + pos_embed='cosine', # positional embedding (either cosine or RoPE100) + ): + + super(CroCoNet, self).__init__() + + # patch embeddings (with initialization done as in MAE) + self._set_patch_embed(img_size, patch_size, enc_embed_dim) + + # mask generations + self._set_mask_generator(self.patch_embed.num_patches, mask_ratio) + + self.pos_embed = pos_embed + if pos_embed=='cosine': + # positional embedding of the encoder + enc_pos_embed = get_2d_sincos_pos_embed(enc_embed_dim, int(self.patch_embed.num_patches**.5), n_cls_token=0) + self.register_buffer('enc_pos_embed', torch.from_numpy(enc_pos_embed).float()) + # positional embedding of the decoder + dec_pos_embed = get_2d_sincos_pos_embed(dec_embed_dim, int(self.patch_embed.num_patches**.5), n_cls_token=0) + self.register_buffer('dec_pos_embed', torch.from_numpy(dec_pos_embed).float()) + # pos embedding in each block + self.rope = None # nothing for cosine + elif pos_embed.startswith('RoPE'): # eg RoPE100 + self.enc_pos_embed = None # nothing to add in the encoder with RoPE + self.dec_pos_embed = None # nothing to add in the decoder with RoPE + if RoPE2D is None: raise ImportError("Cannot find cuRoPE2D, please install it following the README instructions") + freq = float(pos_embed[len('RoPE'):]) + self.rope = RoPE2D(freq=freq) + else: + raise NotImplementedError('Unknown pos_embed '+pos_embed) + + # transformer for the encoder + self.enc_depth = enc_depth + self.enc_embed_dim = enc_embed_dim + self.enc_blocks = nn.ModuleList([ + Block(enc_embed_dim, enc_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, rope=self.rope) + for i in range(enc_depth)]) + self.enc_norm = norm_layer(enc_embed_dim) + + # masked tokens + self._set_mask_token(dec_embed_dim) + + # decoder + self._set_decoder(enc_embed_dim, dec_embed_dim, dec_num_heads, dec_depth, mlp_ratio, norm_layer, norm_im2_in_dec) + + # prediction head + self._set_prediction_head(dec_embed_dim, patch_size) + + # initializer weights + self.initialize_weights() + + def _set_patch_embed(self, img_size=224, patch_size=16, enc_embed_dim=768): + self.patch_embed = PatchEmbed(img_size, patch_size, 3, enc_embed_dim) + + def _set_mask_generator(self, num_patches, mask_ratio): + self.mask_generator = RandomMask(num_patches, mask_ratio) + + def _set_mask_token(self, dec_embed_dim): + self.mask_token = nn.Parameter(torch.zeros(1, 1, dec_embed_dim)) + + def _set_decoder(self, enc_embed_dim, dec_embed_dim, dec_num_heads, dec_depth, mlp_ratio, norm_layer, norm_im2_in_dec): + self.dec_depth = dec_depth + self.dec_embed_dim = dec_embed_dim + # transfer from encoder to decoder + self.decoder_embed = nn.Linear(enc_embed_dim, dec_embed_dim, bias=True) + # transformer for the decoder + self.dec_blocks = nn.ModuleList([ + DecoderBlock(dec_embed_dim, dec_num_heads, mlp_ratio=mlp_ratio, qkv_bias=True, norm_layer=norm_layer, norm_mem=norm_im2_in_dec, rope=self.rope) + for i in range(dec_depth)]) + # final norm layer + self.dec_norm = norm_layer(dec_embed_dim) + + def _set_prediction_head(self, dec_embed_dim, patch_size): + self.prediction_head = nn.Linear(dec_embed_dim, patch_size**2 * 3, bias=True) + + + def initialize_weights(self): + # patch embed + self.patch_embed._init_weights() + # mask tokens + if self.mask_token is not None: torch.nn.init.normal_(self.mask_token, std=.02) + # linears and layer norms + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + # we use xavier_uniform following official JAX ViT: + torch.nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def _encode_image(self, image, do_mask=False, return_all_blocks=False): + """ + image has B x 3 x img_size x img_size + do_mask: whether to perform masking or not + return_all_blocks: if True, return the features at the end of every block + instead of just the features from the last block (eg for some prediction heads) + """ + # embed the image into patches (x has size B x Npatches x C) + # and get position if each return patch (pos has size B x Npatches x 2) + x, pos = self.patch_embed(image) + # add positional embedding without cls token + if self.enc_pos_embed is not None: + x = x + self.enc_pos_embed[None,...] + # apply masking + B,N,C = x.size() + if do_mask: + masks = self.mask_generator(x) + x = x[~masks].view(B, -1, C) + posvis = pos[~masks].view(B, -1, 2) + else: + B,N,C = x.size() + masks = torch.zeros((B,N), dtype=bool) + posvis = pos + # now apply the transformer encoder and normalization + if return_all_blocks: + out = [] + for blk in self.enc_blocks: + x = blk(x, posvis) + out.append(x) + out[-1] = self.enc_norm(out[-1]) + return out, pos, masks + else: + for blk in self.enc_blocks: + x = blk(x, posvis) + x = self.enc_norm(x) + return x, pos, masks + + def _decoder(self, feat1, pos1, masks1, feat2, pos2, return_all_blocks=False): + """ + return_all_blocks: if True, return the features at the end of every block + instead of just the features from the last block (eg for some prediction heads) + + masks1 can be None => assume image1 fully visible + """ + # encoder to decoder layer + visf1 = self.decoder_embed(feat1) + f2 = self.decoder_embed(feat2) + # append masked tokens to the sequence + B,Nenc,C = visf1.size() + if masks1 is None: # downstreams + f1_ = visf1 + else: # pretraining + Ntotal = masks1.size(1) + f1_ = self.mask_token.repeat(B, Ntotal, 1).to(dtype=visf1.dtype) + f1_[~masks1] = visf1.view(B * Nenc, C) + # add positional embedding + if self.dec_pos_embed is not None: + f1_ = f1_ + self.dec_pos_embed + f2 = f2 + self.dec_pos_embed + # apply Transformer blocks + out = f1_ + out2 = f2 + if return_all_blocks: + _out, out = out, [] + for blk in self.dec_blocks: + _out, out2 = blk(_out, out2, pos1, pos2) + out.append(_out) + out[-1] = self.dec_norm(out[-1]) + else: + for blk in self.dec_blocks: + out, out2 = blk(out, out2, pos1, pos2) + out = self.dec_norm(out) + return out + + def patchify(self, imgs): + """ + imgs: (B, 3, H, W) + x: (B, L, patch_size**2 *3) + """ + p = self.patch_embed.patch_size[0] + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 + + h = w = imgs.shape[2] // p + x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) + x = torch.einsum('nchpwq->nhwpqc', x) + x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3)) + + return x + + def unpatchify(self, x, channels=3): + """ + x: (N, L, patch_size**2 *channels) + imgs: (N, 3, H, W) + """ + patch_size = self.patch_embed.patch_size[0] + h = w = int(x.shape[1]**.5) + assert h * w == x.shape[1] + x = x.reshape(shape=(x.shape[0], h, w, patch_size, patch_size, channels)) + x = torch.einsum('nhwpqc->nchpwq', x) + imgs = x.reshape(shape=(x.shape[0], channels, h * patch_size, h * patch_size)) + return imgs + + def forward(self, img1, img2): + """ + img1: tensor of size B x 3 x img_size x img_size + img2: tensor of size B x 3 x img_size x img_size + + out will be B x N x (3*patch_size*patch_size) + masks are also returned as B x N just in case + """ + # encoder of the masked first image + feat1, pos1, mask1 = self._encode_image(img1, do_mask=True) + # encoder of the second image + feat2, pos2, _ = self._encode_image(img2, do_mask=False) + # decoder + decfeat = self._decoder(feat1, pos1, mask1, feat2, pos2) + # prediction head + out = self.prediction_head(decfeat) + # get target + target = self.patchify(img1) + return out, mask1, target diff --git a/dynamic_predictor/croco/models/croco_downstream.py b/dynamic_predictor/croco/models/croco_downstream.py new file mode 100644 index 0000000000000000000000000000000000000000..159dfff4d2c1461bc235e21441b57ce1e2088f76 --- /dev/null +++ b/dynamic_predictor/croco/models/croco_downstream.py @@ -0,0 +1,122 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# CroCo model for downstream tasks +# -------------------------------------------------------- + +import torch + +from .croco import CroCoNet + + +def croco_args_from_ckpt(ckpt): + if 'croco_kwargs' in ckpt: # CroCo v2 released models + return ckpt['croco_kwargs'] + elif 'args' in ckpt and hasattr(ckpt['args'], 'model'): # pretrained using the official code release + s = ckpt['args'].model # eg "CroCoNet(enc_embed_dim=1024, enc_num_heads=16, enc_depth=24)" + assert s.startswith('CroCoNet(') + return eval('dict'+s[len('CroCoNet'):]) # transform it into the string of a dictionary and evaluate it + else: # CroCo v1 released models + return dict() + +class CroCoDownstreamMonocularEncoder(CroCoNet): + + def __init__(self, + head, + **kwargs): + """ Build network for monocular downstream task, only using the encoder. + It takes an extra argument head, that is called with the features + and a dictionary img_info containing 'width' and 'height' keys + The head is setup with the croconet arguments in this init function + NOTE: It works by *calling super().__init__() but with redefined setters + + """ + super(CroCoDownstreamMonocularEncoder, self).__init__(**kwargs) + head.setup(self) + self.head = head + + def _set_mask_generator(self, *args, **kwargs): + """ No mask generator """ + return + + def _set_mask_token(self, *args, **kwargs): + """ No mask token """ + self.mask_token = None + return + + def _set_decoder(self, *args, **kwargs): + """ No decoder """ + return + + def _set_prediction_head(self, *args, **kwargs): + """ No 'prediction head' for downstream tasks.""" + return + + def forward(self, img): + """ + img if of size batch_size x 3 x h x w + """ + B, C, H, W = img.size() + img_info = {'height': H, 'width': W} + need_all_layers = hasattr(self.head, 'return_all_blocks') and self.head.return_all_blocks + out, _, _ = self._encode_image(img, do_mask=False, return_all_blocks=need_all_layers) + return self.head(out, img_info) + + +class CroCoDownstreamBinocular(CroCoNet): + + def __init__(self, + head, + **kwargs): + """ Build network for binocular downstream task + It takes an extra argument head, that is called with the features + and a dictionary img_info containing 'width' and 'height' keys + The head is setup with the croconet arguments in this init function + """ + super(CroCoDownstreamBinocular, self).__init__(**kwargs) + head.setup(self) + self.head = head + + def _set_mask_generator(self, *args, **kwargs): + """ No mask generator """ + return + + def _set_mask_token(self, *args, **kwargs): + """ No mask token """ + self.mask_token = None + return + + def _set_prediction_head(self, *args, **kwargs): + """ No prediction head for downstream tasks, define your own head """ + return + + def encode_image_pairs(self, img1, img2, return_all_blocks=False): + """ run encoder for a pair of images + it is actually ~5% faster to concatenate the images along the batch dimension + than to encode them separately + """ + ## the two commented lines below is the naive version with separate encoding + #out, pos, _ = self._encode_image(img1, do_mask=False, return_all_blocks=return_all_blocks) + #out2, pos2, _ = self._encode_image(img2, do_mask=False, return_all_blocks=False) + ## and now the faster version + out, pos, _ = self._encode_image( torch.cat( (img1,img2), dim=0), do_mask=False, return_all_blocks=return_all_blocks ) + if return_all_blocks: + out,out2 = list(map(list, zip(*[o.chunk(2, dim=0) for o in out]))) + out2 = out2[-1] + else: + out,out2 = out.chunk(2, dim=0) + pos,pos2 = pos.chunk(2, dim=0) + return out, out2, pos, pos2 + + def forward(self, img1, img2): + B, C, H, W = img1.size() + img_info = {'height': H, 'width': W} + return_all_blocks = hasattr(self.head, 'return_all_blocks') and self.head.return_all_blocks + out, out2, pos, pos2 = self.encode_image_pairs(img1, img2, return_all_blocks=return_all_blocks) + if return_all_blocks: + decout = self._decoder(out[-1], pos, None, out2, pos2, return_all_blocks=return_all_blocks) + decout = out+decout + else: + decout = self._decoder(out, pos, None, out2, pos2, return_all_blocks=return_all_blocks) + return self.head(decout, img_info) \ No newline at end of file diff --git a/dynamic_predictor/croco/models/curope/__init__.py b/dynamic_predictor/croco/models/curope/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..25e3d48a162760260826080f6366838e83e26878 --- /dev/null +++ b/dynamic_predictor/croco/models/curope/__init__.py @@ -0,0 +1,4 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +from .curope2d import cuRoPE2D diff --git a/dynamic_predictor/croco/models/curope/curope.cpp b/dynamic_predictor/croco/models/curope/curope.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8fe9058e05aa1bf3f37b0d970edc7312bc68455b --- /dev/null +++ b/dynamic_predictor/croco/models/curope/curope.cpp @@ -0,0 +1,69 @@ +/* + Copyright (C) 2022-present Naver Corporation. All rights reserved. + Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +*/ + +#include + +// forward declaration +void rope_2d_cuda( torch::Tensor tokens, const torch::Tensor pos, const float base, const float fwd ); + +void rope_2d_cpu( torch::Tensor tokens, const torch::Tensor positions, const float base, const float fwd ) +{ + const int B = tokens.size(0); + const int N = tokens.size(1); + const int H = tokens.size(2); + const int D = tokens.size(3) / 4; + + auto tok = tokens.accessor(); + auto pos = positions.accessor(); + + for (int b = 0; b < B; b++) { + for (int x = 0; x < 2; x++) { // y and then x (2d) + for (int n = 0; n < N; n++) { + + // grab the token position + const int p = pos[b][n][x]; + + for (int h = 0; h < H; h++) { + for (int d = 0; d < D; d++) { + // grab the two values + float u = tok[b][n][h][d+0+x*2*D]; + float v = tok[b][n][h][d+D+x*2*D]; + + // grab the cos,sin + const float inv_freq = fwd * p / powf(base, d/float(D)); + float c = cosf(inv_freq); + float s = sinf(inv_freq); + + // write the result + tok[b][n][h][d+0+x*2*D] = u*c - v*s; + tok[b][n][h][d+D+x*2*D] = v*c + u*s; + } + } + } + } + } +} + +void rope_2d( torch::Tensor tokens, // B,N,H,D + const torch::Tensor positions, // B,N,2 + const float base, + const float fwd ) +{ + TORCH_CHECK(tokens.dim() == 4, "tokens must have 4 dimensions"); + TORCH_CHECK(positions.dim() == 3, "positions must have 3 dimensions"); + TORCH_CHECK(tokens.size(0) == positions.size(0), "batch size differs between tokens & positions"); + TORCH_CHECK(tokens.size(1) == positions.size(1), "seq_length differs between tokens & positions"); + TORCH_CHECK(positions.size(2) == 2, "positions.shape[2] must be equal to 2"); + TORCH_CHECK(tokens.is_cuda() == positions.is_cuda(), "tokens and positions are not on the same device" ); + + if (tokens.is_cuda()) + rope_2d_cuda( tokens, positions, base, fwd ); + else + rope_2d_cpu( tokens, positions, base, fwd ); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("rope_2d", &rope_2d, "RoPE 2d forward/backward"); +} diff --git a/dynamic_predictor/croco/models/curope/curope2d.py b/dynamic_predictor/croco/models/curope/curope2d.py new file mode 100644 index 0000000000000000000000000000000000000000..a49c12f8c529e9a889b5ac20c5767158f238e17d --- /dev/null +++ b/dynamic_predictor/croco/models/curope/curope2d.py @@ -0,0 +1,40 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +import torch + +try: + import curope as _kernels # run `python setup.py install` +except ModuleNotFoundError: + from . import curope as _kernels # run `python setup.py build_ext --inplace` + + +class cuRoPE2D_func (torch.autograd.Function): + + @staticmethod + def forward(ctx, tokens, positions, base, F0=1): + ctx.save_for_backward(positions) + ctx.saved_base = base + ctx.saved_F0 = F0 + # tokens = tokens.clone() # uncomment this if inplace doesn't work + _kernels.rope_2d( tokens, positions, base, F0 ) + ctx.mark_dirty(tokens) + return tokens + + @staticmethod + def backward(ctx, grad_res): + positions, base, F0 = ctx.saved_tensors[0], ctx.saved_base, ctx.saved_F0 + _kernels.rope_2d( grad_res, positions, base, -F0 ) + ctx.mark_dirty(grad_res) + return grad_res, None, None, None + + +class cuRoPE2D(torch.nn.Module): + def __init__(self, freq=100.0, F0=1.0): + super().__init__() + self.base = freq + self.F0 = F0 + + def forward(self, tokens, positions): + cuRoPE2D_func.apply( tokens.transpose(1,2), positions, self.base, self.F0 ) + return tokens \ No newline at end of file diff --git a/dynamic_predictor/croco/models/curope/kernels.cu b/dynamic_predictor/croco/models/curope/kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..7156cd1bb935cb1f0be45e58add53f9c21505c20 --- /dev/null +++ b/dynamic_predictor/croco/models/curope/kernels.cu @@ -0,0 +1,108 @@ +/* + Copyright (C) 2022-present Naver Corporation. All rights reserved. + Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +*/ + +#include +#include +#include +#include + +#define CHECK_CUDA(tensor) {\ + TORCH_CHECK((tensor).is_cuda(), #tensor " is not in cuda memory"); \ + TORCH_CHECK((tensor).is_contiguous(), #tensor " is not contiguous"); } +void CHECK_KERNEL() {auto error = cudaGetLastError(); TORCH_CHECK( error == cudaSuccess, cudaGetErrorString(error));} + + +template < typename scalar_t > +__global__ void rope_2d_cuda_kernel( + //scalar_t* __restrict__ tokens, + torch::PackedTensorAccessor32 tokens, + const int64_t* __restrict__ pos, + const float base, + const float fwd ) + // const int N, const int H, const int D ) +{ + // tokens shape = (B, N, H, D) + const int N = tokens.size(1); + const int H = tokens.size(2); + const int D = tokens.size(3); + + // each block update a single token, for all heads + // each thread takes care of a single output + extern __shared__ float shared[]; + float* shared_inv_freq = shared + D; + + const int b = blockIdx.x / N; + const int n = blockIdx.x % N; + + const int Q = D / 4; + // one token = [0..Q : Q..2Q : 2Q..3Q : 3Q..D] + // u_Y v_Y u_X v_X + + // shared memory: first, compute inv_freq + if (threadIdx.x < Q) + shared_inv_freq[threadIdx.x] = fwd / powf(base, threadIdx.x/float(Q)); + __syncthreads(); + + // start of X or Y part + const int X = threadIdx.x < D/2 ? 0 : 1; + const int m = (X*D/2) + (threadIdx.x % Q); // index of u_Y or u_X + + // grab the cos,sin appropriate for me + const float freq = pos[blockIdx.x*2+X] * shared_inv_freq[threadIdx.x % Q]; + const float cos = cosf(freq); + const float sin = sinf(freq); + /* + float* shared_cos_sin = shared + D + D/4; + if ((threadIdx.x % (D/2)) < Q) + shared_cos_sin[m+0] = cosf(freq); + else + shared_cos_sin[m+Q] = sinf(freq); + __syncthreads(); + const float cos = shared_cos_sin[m+0]; + const float sin = shared_cos_sin[m+Q]; + */ + + for (int h = 0; h < H; h++) + { + // then, load all the token for this head in shared memory + shared[threadIdx.x] = tokens[b][n][h][threadIdx.x]; + __syncthreads(); + + const float u = shared[m]; + const float v = shared[m+Q]; + + // write output + if ((threadIdx.x % (D/2)) < Q) + tokens[b][n][h][threadIdx.x] = u*cos - v*sin; + else + tokens[b][n][h][threadIdx.x] = v*cos + u*sin; + } +} + +void rope_2d_cuda( torch::Tensor tokens, const torch::Tensor pos, const float base, const float fwd ) +{ + const int B = tokens.size(0); // batch size + const int N = tokens.size(1); // sequence length + const int H = tokens.size(2); // number of heads + const int D = tokens.size(3); // dimension per head + + TORCH_CHECK(tokens.stride(3) == 1 && tokens.stride(2) == D, "tokens are not contiguous"); + TORCH_CHECK(pos.is_contiguous(), "positions are not contiguous"); + TORCH_CHECK(pos.size(0) == B && pos.size(1) == N && pos.size(2) == 2, "bad pos.shape"); + TORCH_CHECK(D % 4 == 0, "token dim must be multiple of 4"); + + // one block for each layer, one thread per local-max + const int THREADS_PER_BLOCK = D; + const int N_BLOCKS = B * N; // each block takes care of H*D values + const int SHARED_MEM = sizeof(float) * (D + D/4); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(tokens.type(), "rope_2d_cuda", ([&] { + rope_2d_cuda_kernel <<>> ( + //tokens.data_ptr(), + tokens.packed_accessor32(), + pos.data_ptr(), + base, fwd); //, N, H, D ); + })); +} diff --git a/dynamic_predictor/croco/models/curope/setup.py b/dynamic_predictor/croco/models/curope/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..230632ed05e309200e8f93a3a852072333975009 --- /dev/null +++ b/dynamic_predictor/croco/models/curope/setup.py @@ -0,0 +1,34 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +from setuptools import setup +from torch import cuda +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +# compile for all possible CUDA architectures +all_cuda_archs = cuda.get_gencode_flags().replace('compute=','arch=').split() +# alternatively, you can list cuda archs that you want, eg: +# all_cuda_archs = [ + # '-gencode', 'arch=compute_70,code=sm_70', + # '-gencode', 'arch=compute_75,code=sm_75', + # '-gencode', 'arch=compute_80,code=sm_80', + # '-gencode', 'arch=compute_86,code=sm_86' +# ] + +setup( + name = 'curope', + ext_modules = [ + CUDAExtension( + name='curope', + sources=[ + "curope.cpp", + "kernels.cu", + ], + extra_compile_args = dict( + nvcc=['-O3','--ptxas-options=-v',"--use_fast_math"]+all_cuda_archs, + cxx=['-O3']) + ) + ], + cmdclass = { + 'build_ext': BuildExtension + }) diff --git a/dynamic_predictor/croco/models/dpt_block.py b/dynamic_predictor/croco/models/dpt_block.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ddfb74e2769ceca88720d4c730e00afd71c763 --- /dev/null +++ b/dynamic_predictor/croco/models/dpt_block.py @@ -0,0 +1,450 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# DPT head for ViTs +# -------------------------------------------------------- +# References: +# https://github.com/isl-org/DPT +# https://github.com/EPFL-VILAB/MultiMAE/blob/main/multimae/output_adapters.py + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange, repeat +from typing import Union, Tuple, Iterable, List, Optional, Dict + +def pair(t): + return t if isinstance(t, tuple) else (t, t) + +def make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + out_shape4 = out_shape + if expand == True: + out_shape1 = out_shape + out_shape2 = out_shape * 2 + out_shape3 = out_shape * 4 + out_shape4 = out_shape * 8 + + scratch.layer1_rn = nn.Conv2d( + in_shape[0], + out_shape1, + kernel_size=3, + stride=1, + padding=1, + bias=False, + groups=groups, + ) + scratch.layer2_rn = nn.Conv2d( + in_shape[1], + out_shape2, + kernel_size=3, + stride=1, + padding=1, + bias=False, + groups=groups, + ) + scratch.layer3_rn = nn.Conv2d( + in_shape[2], + out_shape3, + kernel_size=3, + stride=1, + padding=1, + bias=False, + groups=groups, + ) + scratch.layer4_rn = nn.Conv2d( + in_shape[3], + out_shape4, + kernel_size=3, + stride=1, + padding=1, + bias=False, + groups=groups, + ) + + scratch.layer_rn = nn.ModuleList([ + scratch.layer1_rn, + scratch.layer2_rn, + scratch.layer3_rn, + scratch.layer4_rn, + ]) + + return scratch + +class ResidualConvUnit_custom(nn.Module): + """Residual convolution module.""" + + def __init__(self, features, activation, bn): + """Init. + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups = 1 + + self.conv1 = nn.Conv2d( + features, + features, + kernel_size=3, + stride=1, + padding=1, + bias=not self.bn, + groups=self.groups, + ) + + self.conv2 = nn.Conv2d( + features, + features, + kernel_size=3, + stride=1, + padding=1, + bias=not self.bn, + groups=self.groups, + ) + + if self.bn == True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + Args: + x (tensor): input + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn == True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn == True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + +class FeatureFusionBlock_custom(nn.Module): + """Feature fusion block.""" + + def __init__( + self, + features, + activation, + deconv=False, + bn=False, + expand=False, + align_corners=True, + width_ratio=1, + ): + """Init. + Args: + features (int): number of features + """ + super(FeatureFusionBlock_custom, self).__init__() + self.width_ratio = width_ratio + + self.deconv = deconv + self.align_corners = align_corners + + self.groups = 1 + + self.expand = expand + out_features = features + if self.expand == True: + out_features = features // 2 + + self.out_conv = nn.Conv2d( + features, + out_features, + kernel_size=1, + stride=1, + padding=0, + bias=True, + groups=1, + ) + + self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, *xs): + """Forward pass. + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + if self.width_ratio != 1: + res = F.interpolate(res, size=(output.shape[2], output.shape[3]), mode='bilinear') + + output = self.skip_add.add(output, res) + # output += res + + output = self.resConfUnit2(output) + + if self.width_ratio != 1: + # and output.shape[3] < self.width_ratio * output.shape[2] + #size=(image.shape[]) + if (output.shape[3] / output.shape[2]) < (2 / 3) * self.width_ratio: + shape = 3 * output.shape[3] + else: + shape = int(self.width_ratio * 2 * output.shape[2]) + output = F.interpolate(output, size=(2* output.shape[2], shape), mode='bilinear') + else: + output = nn.functional.interpolate(output, scale_factor=2, + mode="bilinear", align_corners=self.align_corners) + output = self.out_conv(output) + return output + +def make_fusion_block(features, use_bn, width_ratio=1): + return FeatureFusionBlock_custom( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + width_ratio=width_ratio, + ) + +class Interpolate(nn.Module): + """Interpolation module.""" + + def __init__(self, scale_factor, mode, align_corners=False): + """Init. + Args: + scale_factor (float): scaling + mode (str): interpolation mode + """ + super(Interpolate, self).__init__() + + self.interp = nn.functional.interpolate + self.scale_factor = scale_factor + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + """Forward pass. + Args: + x (tensor): input + Returns: + tensor: interpolated data + """ + + x = self.interp( + x, + scale_factor=self.scale_factor, + mode=self.mode, + align_corners=self.align_corners, + ) + + return x + +class DPTOutputAdapter(nn.Module): + """DPT output adapter. + + :param num_cahnnels: Number of output channels + :param stride_level: tride level compared to the full-sized image. + E.g. 4 for 1/4th the size of the image. + :param patch_size_full: Int or tuple of the patch size over the full image size. + Patch size for smaller inputs will be computed accordingly. + :param hooks: Index of intermediate layers + :param layer_dims: Dimension of intermediate layers + :param feature_dim: Feature dimension + :param last_dim: out_channels/in_channels for the last two Conv2d when head_type == regression + :param use_bn: If set to True, activates batch norm + :param dim_tokens_enc: Dimension of tokens coming from encoder + """ + + def __init__(self, + num_channels: int = 1, + stride_level: int = 1, + patch_size: Union[int, Tuple[int, int]] = 16, + main_tasks: Iterable[str] = ('rgb',), + hooks: List[int] = [2, 5, 8, 11], + layer_dims: List[int] = [96, 192, 384, 768], + feature_dim: int = 256, + last_dim: int = 32, + use_bn: bool = False, + dim_tokens_enc: Optional[int] = None, + head_type: str = 'regression', + output_width_ratio=1, + **kwargs): + super().__init__() + self.num_channels = num_channels + self.stride_level = stride_level + self.patch_size = pair(patch_size) + self.main_tasks = main_tasks + self.hooks = hooks + self.layer_dims = layer_dims + self.feature_dim = feature_dim + self.dim_tokens_enc = dim_tokens_enc * len(self.main_tasks) if dim_tokens_enc is not None else None + self.head_type = head_type + + # Actual patch height and width, taking into account stride of input + self.P_H = max(1, self.patch_size[0] // stride_level) + self.P_W = max(1, self.patch_size[1] // stride_level) + + self.scratch = make_scratch(layer_dims, feature_dim, groups=1, expand=False) + + self.scratch.refinenet1 = make_fusion_block(feature_dim, use_bn, output_width_ratio) + self.scratch.refinenet2 = make_fusion_block(feature_dim, use_bn, output_width_ratio) + self.scratch.refinenet3 = make_fusion_block(feature_dim, use_bn, output_width_ratio) + self.scratch.refinenet4 = make_fusion_block(feature_dim, use_bn, output_width_ratio) + + if self.head_type == 'regression': + # The "DPTDepthModel" head + self.head = nn.Sequential( + nn.Conv2d(feature_dim, feature_dim // 2, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(feature_dim // 2, last_dim, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(last_dim, self.num_channels, kernel_size=1, stride=1, padding=0) + ) + elif self.head_type == 'semseg': + # The "DPTSegmentationModel" head + self.head = nn.Sequential( + nn.Conv2d(feature_dim, feature_dim, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(feature_dim) if use_bn else nn.Identity(), + nn.ReLU(True), + nn.Dropout(0.1, False), + nn.Conv2d(feature_dim, self.num_channels, kernel_size=1), + Interpolate(scale_factor=2, mode="bilinear", align_corners=True), + ) + else: + raise ValueError('DPT head_type must be "regression" or "semseg".') + + if self.dim_tokens_enc is not None: + self.init(dim_tokens_enc=dim_tokens_enc) + + def init(self, dim_tokens_enc=768): + """ + Initialize parts of decoder that are dependent on dimension of encoder tokens. + Should be called when setting up MultiMAE. + + :param dim_tokens_enc: Dimension of tokens coming from encoder + """ + #print(dim_tokens_enc) + + # Set up activation postprocessing layers + if isinstance(dim_tokens_enc, int): + dim_tokens_enc = 4 * [dim_tokens_enc] + + self.dim_tokens_enc = [dt * len(self.main_tasks) for dt in dim_tokens_enc] + + self.act_1_postprocess = nn.Sequential( + nn.Conv2d( + in_channels=self.dim_tokens_enc[0], + out_channels=self.layer_dims[0], + kernel_size=1, stride=1, padding=0, + ), + nn.ConvTranspose2d( + in_channels=self.layer_dims[0], + out_channels=self.layer_dims[0], + kernel_size=4, stride=4, padding=0, + bias=True, dilation=1, groups=1, + ) + ) + + self.act_2_postprocess = nn.Sequential( + nn.Conv2d( + in_channels=self.dim_tokens_enc[1], + out_channels=self.layer_dims[1], + kernel_size=1, stride=1, padding=0, + ), + nn.ConvTranspose2d( + in_channels=self.layer_dims[1], + out_channels=self.layer_dims[1], + kernel_size=2, stride=2, padding=0, + bias=True, dilation=1, groups=1, + ) + ) + + self.act_3_postprocess = nn.Sequential( + nn.Conv2d( + in_channels=self.dim_tokens_enc[2], + out_channels=self.layer_dims[2], + kernel_size=1, stride=1, padding=0, + ) + ) + + self.act_4_postprocess = nn.Sequential( + nn.Conv2d( + in_channels=self.dim_tokens_enc[3], + out_channels=self.layer_dims[3], + kernel_size=1, stride=1, padding=0, + ), + nn.Conv2d( + in_channels=self.layer_dims[3], + out_channels=self.layer_dims[3], + kernel_size=3, stride=2, padding=1, + ) + ) + + self.act_postprocess = nn.ModuleList([ + self.act_1_postprocess, + self.act_2_postprocess, + self.act_3_postprocess, + self.act_4_postprocess + ]) + + def adapt_tokens(self, encoder_tokens): + # Adapt tokens + x = [] + x.append(encoder_tokens[:, :]) + x = torch.cat(x, dim=-1) + return x + + def forward(self, encoder_tokens: List[torch.Tensor], image_size): + #input_info: Dict): + assert self.dim_tokens_enc is not None, 'Need to call init(dim_tokens_enc) function first' + H, W = image_size + + # Number of patches in height and width + N_H = H // (self.stride_level * self.P_H) + N_W = W // (self.stride_level * self.P_W) + + # Hook decoder onto 4 layers from specified ViT layers + layers = [encoder_tokens[hook] for hook in self.hooks] + + # Extract only task-relevant tokens and ignore global tokens. + layers = [self.adapt_tokens(l) for l in layers] + + # Reshape tokens to spatial representation + layers = [rearrange(l, 'b (nh nw) c -> b c nh nw', nh=N_H, nw=N_W) for l in layers] + + layers = [self.act_postprocess[idx](l) for idx, l in enumerate(layers)] + # Project layers to chosen feature dim + layers = [self.scratch.layer_rn[idx](l) for idx, l in enumerate(layers)] + + # Fuse layers using refinement stages + path_4 = self.scratch.refinenet4(layers[3]) + path_3 = self.scratch.refinenet3(path_4, layers[2]) + path_2 = self.scratch.refinenet2(path_3, layers[1]) + path_1 = self.scratch.refinenet1(path_2, layers[0]) + + # Output head + out = self.head(path_1) + + return out diff --git a/dynamic_predictor/croco/models/head_downstream.py b/dynamic_predictor/croco/models/head_downstream.py new file mode 100644 index 0000000000000000000000000000000000000000..bd40c91ba244d6c3522c6efd4ed4d724b7bdc650 --- /dev/null +++ b/dynamic_predictor/croco/models/head_downstream.py @@ -0,0 +1,58 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# Heads for downstream tasks +# -------------------------------------------------------- + +""" +A head is a module where the __init__ defines only the head hyperparameters. +A method setup(croconet) takes a CroCoNet and set all layers according to the head and croconet attributes. +The forward takes the features as well as a dictionary img_info containing the keys 'width' and 'height' +""" + +import torch +import torch.nn as nn +from .dpt_block import DPTOutputAdapter + + +class PixelwiseTaskWithDPT(nn.Module): + """ DPT module for CroCo. + by default, hooks_idx will be equal to: + * for encoder-only: 4 equally spread layers + * for encoder+decoder: last encoder + 3 equally spread layers of the decoder + """ + + def __init__(self, *, hooks_idx=None, layer_dims=[96,192,384,768], + output_width_ratio=1, num_channels=1, postprocess=None, **kwargs): + super(PixelwiseTaskWithDPT, self).__init__() + self.return_all_blocks = True # backbone needs to return all layers + self.postprocess = postprocess + self.output_width_ratio = output_width_ratio + self.num_channels = num_channels + self.hooks_idx = hooks_idx + self.layer_dims = layer_dims + + def setup(self, croconet): + dpt_args = {'output_width_ratio': self.output_width_ratio, 'num_channels': self.num_channels} + if self.hooks_idx is None: + if hasattr(croconet, 'dec_blocks'): # encoder + decoder + step = {8: 3, 12: 4, 24: 8}[croconet.dec_depth] + hooks_idx = [croconet.dec_depth+croconet.enc_depth-1-i*step for i in range(3,-1,-1)] + else: # encoder only + step = croconet.enc_depth//4 + hooks_idx = [croconet.enc_depth-1-i*step for i in range(3,-1,-1)] + self.hooks_idx = hooks_idx + print(f' PixelwiseTaskWithDPT: automatically setting hook_idxs={self.hooks_idx}') + dpt_args['hooks'] = self.hooks_idx + dpt_args['layer_dims'] = self.layer_dims + self.dpt = DPTOutputAdapter(**dpt_args) + dim_tokens = [croconet.enc_embed_dim if hook0: + pos_embed = np.concatenate([np.zeros([n_cls_token, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=float) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +# -------------------------------------------------------- +# Interpolate position embeddings for high-resolution +# References: +# MAE: https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + + +#---------------------------------------------------------- +# RoPE2D: RoPE implementation in 2D +#---------------------------------------------------------- + +try: + from models.curope import cuRoPE2D + RoPE2D = cuRoPE2D +except ImportError: + print('Warning, cannot find cuda-compiled version of RoPE2D, using a slow pytorch version instead') + + class RoPE2D(torch.nn.Module): + + def __init__(self, freq=100.0, F0=1.0): + super().__init__() + self.base = freq + self.F0 = F0 + self.cache = {} + + def get_cos_sin(self, D, seq_len, device, dtype): + if (D,seq_len,device,dtype) not in self.cache: + inv_freq = 1.0 / (self.base ** (torch.arange(0, D, 2).float().to(device) / D)) + t = torch.arange(seq_len, device=device, dtype=inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, inv_freq).to(dtype) + freqs = torch.cat((freqs, freqs), dim=-1) + cos = freqs.cos() # (Seq, Dim) + sin = freqs.sin() + self.cache[D,seq_len,device,dtype] = (cos,sin) + return self.cache[D,seq_len,device,dtype] + + @staticmethod + def rotate_half(x): + x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + def apply_rope1d(self, tokens, pos1d, cos, sin): + assert pos1d.ndim==2 + cos = torch.nn.functional.embedding(pos1d, cos)[:, None, :, :] + sin = torch.nn.functional.embedding(pos1d, sin)[:, None, :, :] + return (tokens * cos) + (self.rotate_half(tokens) * sin) + + def forward(self, tokens, positions): + """ + input: + * tokens: batch_size x nheads x ntokens x dim + * positions: batch_size x ntokens x 2 (y and x position of each token) + output: + * tokens after appplying RoPE2D (batch_size x nheads x ntokens x dim) + """ + assert tokens.size(3)%2==0, "number of dimensions should be a multiple of two" + D = tokens.size(3) // 2 + assert positions.ndim==3 and positions.shape[-1] == 2 # Batch, Seq, 2 + cos, sin = self.get_cos_sin(D, int(positions.max())+1, tokens.device, tokens.dtype) + # split features into two along the feature dimension, and apply rope1d on each half + y, x = tokens.chunk(2, dim=-1) + y = self.apply_rope1d(y, positions[:,:,0], cos, sin) + x = self.apply_rope1d(x, positions[:,:,1], cos, sin) + tokens = torch.cat((y, x), dim=-1) + return tokens \ No newline at end of file diff --git a/dynamic_predictor/croco/pretrain.py b/dynamic_predictor/croco/pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..2c45e488015ef5380c71d0381ff453fdb860759e --- /dev/null +++ b/dynamic_predictor/croco/pretrain.py @@ -0,0 +1,254 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Pre-training CroCo +# -------------------------------------------------------- +# References: +# MAE: https://github.com/facebookresearch/mae +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- +import argparse +import datetime +import json +import numpy as np +import os +import sys +import time +import math +from pathlib import Path +from typing import Iterable + +import torch +import torch.distributed as dist +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter +import torchvision.transforms as transforms +import torchvision.datasets as datasets + +import utils.misc as misc +from utils.misc import NativeScalerWithGradNormCount as NativeScaler +from models.croco import CroCoNet +from models.criterion import MaskedMSE +from datasets.pairs_dataset import PairsDataset + + +def get_args_parser(): + parser = argparse.ArgumentParser('CroCo pre-training', add_help=False) + # model and criterion + parser.add_argument('--model', default='CroCoNet()', type=str, help="string containing the model to build") + parser.add_argument('--norm_pix_loss', default=1, choices=[0,1], help="apply per-patch mean/std normalization before applying the loss") + # dataset + parser.add_argument('--dataset', default='habitat_release', type=str, help="training set") + parser.add_argument('--transforms', default='crop224+acolor', type=str, help="transforms to apply") # in the paper, we also use some homography and rotation, but find later that they were not useful or even harmful + # training + parser.add_argument('--seed', default=0, type=int, help="Random seed") + parser.add_argument('--batch_size', default=64, type=int, help="Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus") + parser.add_argument('--epochs', default=800, type=int, help="Maximum number of epochs for the scheduler") + parser.add_argument('--max_epoch', default=400, type=int, help="Stop training at this epoch") + parser.add_argument('--accum_iter', default=1, type=int, help="Accumulate gradient iterations (for increasing the effective batch size under memory constraints)") + parser.add_argument('--weight_decay', type=float, default=0.05, help="weight decay (default: 0.05)") + parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)') + parser.add_argument('--blr', type=float, default=1.5e-4, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256') + parser.add_argument('--min_lr', type=float, default=0., metavar='LR', help='lower lr bound for cyclic schedulers that hit 0') + parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', help='epochs to warmup LR') + parser.add_argument('--amp', type=int, default=1, choices=[0,1], help="Use Automatic Mixed Precision for pretraining") + # others + parser.add_argument('--num_workers', default=8, type=int) + parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') + parser.add_argument('--save_freq', default=1, type=int, help='frequence (number of epochs) to save checkpoint in checkpoint-last.pth') + parser.add_argument('--keep_freq', default=20, type=int, help='frequence (number of epochs) to save checkpoint in checkpoint-%d.pth') + parser.add_argument('--print_freq', default=20, type=int, help='frequence (number of iterations) to print infos while training') + # paths + parser.add_argument('--output_dir', default='./output/', type=str, help="path where to save the output") + parser.add_argument('--data_dir', default='./data/', type=str, help="path where data are stored") + return parser + + + + +def main(args): + misc.init_distributed_mode(args) + global_rank = misc.get_rank() + world_size = misc.get_world_size() + + print("output_dir: "+args.output_dir) + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + + # auto resume + last_ckpt_fname = os.path.join(args.output_dir, f'checkpoint-last.pth') + args.resume = last_ckpt_fname if os.path.isfile(last_ckpt_fname) else None + + print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + + device = "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # fix the seed + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = True + + ## training dataset and loader + print('Building dataset for {:s} with transforms {:s}'.format(args.dataset, args.transforms)) + dataset = PairsDataset(args.dataset, trfs=args.transforms, data_dir=args.data_dir) + if world_size>1: + sampler_train = torch.utils.data.DistributedSampler( + dataset, num_replicas=world_size, rank=global_rank, shuffle=True + ) + print("Sampler_train = %s" % str(sampler_train)) + else: + sampler_train = torch.utils.data.RandomSampler(dataset) + data_loader_train = torch.utils.data.DataLoader( + dataset, sampler=sampler_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=True, + drop_last=True, + ) + + ## model + print('Loading model: {:s}'.format(args.model)) + model = eval(args.model) + print('Loading criterion: MaskedMSE(norm_pix_loss={:s})'.format(str(bool(args.norm_pix_loss)))) + criterion = MaskedMSE(norm_pix_loss=bool(args.norm_pix_loss)) + + model.to(device) + model_without_ddp = model + print("Model = %s" % str(model_without_ddp)) + + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + if args.lr is None: # only base_lr is specified + args.lr = args.blr * eff_batch_size / 256 + print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) + print("actual lr: %.2e" % args.lr) + print("accumulate grad iterations: %d" % args.accum_iter) + print("effective batch size: %d" % eff_batch_size) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True, static_graph=True) + model_without_ddp = model.module + + param_groups = misc.get_parameter_groups(model_without_ddp, args.weight_decay) # following timm: set wd as 0 for bias and norm layers + optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95)) + print(optimizer) + loss_scaler = NativeScaler() + + misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) + + if global_rank == 0 and args.output_dir is not None: + log_writer = SummaryWriter(log_dir=args.output_dir) + else: + log_writer = None + + print(f"Start training until {args.max_epoch} epochs") + start_time = time.time() + for epoch in range(args.start_epoch, args.max_epoch): + if world_size>1: + data_loader_train.sampler.set_epoch(epoch) + + train_stats = train_one_epoch( + model, criterion, data_loader_train, + optimizer, device, epoch, loss_scaler, + log_writer=log_writer, + args=args + ) + + if args.output_dir and epoch % args.save_freq == 0 : + misc.save_model( + args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch, fname='last') + + if args.output_dir and (epoch % args.keep_freq == 0 or epoch + 1 == args.max_epoch) and (epoch>0 or args.max_epoch==1): + misc.save_model( + args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch) + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch,} + + if args.output_dir and misc.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + + + +def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, + log_writer=None, + args=None): + model.train(True) + metric_logger = misc.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + accum_iter = args.accum_iter + + optimizer.zero_grad() + + if log_writer is not None: + print('log_dir: {}'.format(log_writer.log_dir)) + + for data_iter_step, (image1, image2) in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)): + + # we use a per iteration lr scheduler + if data_iter_step % accum_iter == 0: + misc.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args) + + image1 = image1.to(device, non_blocking=True) + image2 = image2.to(device, non_blocking=True) + with torch.cuda.amp.autocast(enabled=bool(args.amp)): + out, mask, target = model(image1, image2) + loss = criterion(out, mask, target) + + loss_value = loss.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss /= accum_iter + loss_scaler(loss, optimizer, parameters=model.parameters(), + update_grad=(data_iter_step + 1) % accum_iter == 0) + if (data_iter_step + 1) % accum_iter == 0: + optimizer.zero_grad() + + torch.cuda.synchronize() + + metric_logger.update(loss=loss_value) + + lr = optimizer.param_groups[0]["lr"] + metric_logger.update(lr=lr) + + loss_value_reduce = misc.all_reduce_mean(loss_value) + if log_writer is not None and ((data_iter_step + 1) % (accum_iter*args.print_freq)) == 0: + # x-axis is based on epoch_1000x in the tensorboard, calibrating differences curves when batch size changes + epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000) + log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x) + log_writer.add_scalar('lr', lr, epoch_1000x) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + main(args) diff --git a/dynamic_predictor/croco/stereoflow/README.MD b/dynamic_predictor/croco/stereoflow/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..81595380fadd274b523e0cf77921b1b65cbedb34 --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/README.MD @@ -0,0 +1,318 @@ +## CroCo-Stereo and CroCo-Flow + +This README explains how to use CroCo-Stereo and CroCo-Flow as well as how they were trained. +All commands should be launched from the root directory. + +### Simple inference example + +We provide a simple inference exemple for CroCo-Stereo and CroCo-Flow in the Totebook `croco-stereo-flow-demo.ipynb`. +Before running it, please download the trained models with: +``` +bash stereoflow/download_model.sh crocostereo.pth +bash stereoflow/download_model.sh crocoflow.pth +``` + +### Prepare data for training or evaluation + +Put the datasets used for training/evaluation in `./data/stereoflow` (or update the paths at the top of `stereoflow/datasets_stereo.py` and `stereoflow/datasets_flow.py`). +Please find below on the file structure should look for each dataset: +
+FlyingChairs + +``` +./data/stereoflow/FlyingChairs/ +└───chairs_split.txt +└───data/ + └─── ... +``` +
+ +
+MPI-Sintel + +``` +./data/stereoflow/MPI-Sintel/ +└───training/ +│ └───clean/ +│ └───final/ +│ └───flow/ +└───test/ + └───clean/ + └───final/ +``` +
+ +
+SceneFlow (including FlyingThings) + +``` +./data/stereoflow/SceneFlow/ +└───Driving/ +│ └───disparity/ +│ └───frames_cleanpass/ +│ └───frames_finalpass/ +└───FlyingThings/ +│ └───disparity/ +│ └───frames_cleanpass/ +│ └───frames_finalpass/ +│ └───optical_flow/ +└───Monkaa/ + └───disparity/ + └───frames_cleanpass/ + └───frames_finalpass/ +``` +
+ +
+TartanAir + +``` +./data/stereoflow/TartanAir/ +└───abandonedfactory/ +│ └───.../ +└───abandonedfactory_night/ +│ └───.../ +└───.../ +``` +
+ +
+Booster + +``` +./data/stereoflow/booster_gt/ +└───train/ + └───balanced/ + └───Bathroom/ + └───Bedroom/ + └───... +``` +
+ +
+CREStereo + +``` +./data/stereoflow/crenet_stereo_trainset/ +└───stereo_trainset/ + └───crestereo/ + └───hole/ + └───reflective/ + └───shapenet/ + └───tree/ +``` +
+ +
+ETH3D Two-view Low-res + +``` +./data/stereoflow/eth3d_lowres/ +└───test/ +│ └───lakeside_1l/ +│ └───... +└───train/ +│ └───delivery_area_1l/ +│ └───... +└───train_gt/ + └───delivery_area_1l/ + └───... +``` +
+ +
+KITTI 2012 + +``` +./data/stereoflow/kitti-stereo-2012/ +└───testing/ +│ └───colored_0/ +│ └───colored_1/ +└───training/ + └───colored_0/ + └───colored_1/ + └───disp_occ/ + └───flow_occ/ +``` +
+ +
+KITTI 2015 + +``` +./data/stereoflow/kitti-stereo-2015/ +└───testing/ +│ └───image_2/ +│ └───image_3/ +└───training/ + └───image_2/ + └───image_3/ + └───disp_occ_0/ + └───flow_occ/ +``` +
+ +
+Middlebury + +``` +./data/stereoflow/middlebury +└───2005/ +│ └───train/ +│ └───Art/ +│ └───... +└───2006/ +│ └───Aloe/ +│ └───Baby1/ +│ └───... +└───2014/ +│ └───Adirondack-imperfect/ +│ └───Adirondack-perfect/ +│ └───... +└───2021/ +│ └───data/ +│ └───artroom1/ +│ └───artroom2/ +│ └───... +└───MiddEval3_F/ + └───test/ + │ └───Australia/ + │ └───... + └───train/ + └───Adirondack/ + └───... +``` +
+ +
+Spring + +``` +./data/stereoflow/spring/ +└───test/ +│ └───0003/ +│ └───... +└───train/ + └───0001/ + └───... +``` +
+ + +### CroCo-Stereo + +##### Main model + +The main training of CroCo-Stereo was performed on a series of datasets, and it was used as it for Middlebury v3 benchmark. + +``` +# Download the model +bash stereoflow/download_model.sh crocostereo.pth +# Middlebury v3 submission +python stereoflow/test.py --model stereoflow_models/crocostereo.pth --dataset "MdEval3('all_full')" --save submission --tile_overlap 0.9 +# Training command that was used, using checkpoint-last.pth +python -u stereoflow/train.py stereo --criterion "LaplacianLossBounded2()" --dataset "CREStereo('train')+SceneFlow('train_allpass')+30*ETH3DLowRes('train')+50*Md05('train')+50*Md06('train')+50*Md14('train')+50*Md21('train')+50*MdEval3('train_full')+Booster('train_balanced')" --val_dataset "SceneFlow('test1of100_finalpass')+SceneFlow('test1of100_cleanpass')+ETH3DLowRes('subval')+Md05('subval')+Md06('subval')+Md14('subval')+Md21('subval')+MdEval3('subval_full')+Booster('subval_balanced')" --lr 3e-5 --batch_size 6 --epochs 32 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --output_dir xps/crocostereo/main/ +# or it can be launched on multiple gpus (while maintaining the effective batch size), e.g. on 3 gpus: +torchrun --nproc_per_node 3 stereoflow/train.py stereo --criterion "LaplacianLossBounded2()" --dataset "CREStereo('train')+SceneFlow('train_allpass')+30*ETH3DLowRes('train')+50*Md05('train')+50*Md06('train')+50*Md14('train')+50*Md21('train')+50*MdEval3('train_full')+Booster('train_balanced')" --val_dataset "SceneFlow('test1of100_finalpass')+SceneFlow('test1of100_cleanpass')+ETH3DLowRes('subval')+Md05('subval')+Md06('subval')+Md14('subval')+Md21('subval')+MdEval3('subval_full')+Booster('subval_balanced')" --lr 3e-5 --batch_size 2 --epochs 32 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --output_dir xps/crocostereo/main/ +``` + +For evaluation of validation set, we also provide the model trained on the `subtrain` subset of the training sets. + +``` +# Download the model +bash stereoflow/download_model.sh crocostereo_subtrain.pth +# Evaluation on validation sets +python stereoflow/test.py --model stereoflow_models/crocostereo_subtrain.pth --dataset "MdEval3('subval_full')+ETH3DLowRes('subval')+SceneFlow('test_finalpass')+SceneFlow('test_cleanpass')" --save metrics --tile_overlap 0.9 +# Training command that was used (same as above but on subtrain, using checkpoint-best.pth), can also be launched on multiple gpus +python -u stereoflow/train.py stereo --criterion "LaplacianLossBounded2()" --dataset "CREStereo('train')+SceneFlow('train_allpass')+30*ETH3DLowRes('subtrain')+50*Md05('subtrain')+50*Md06('subtrain')+50*Md14('subtrain')+50*Md21('subtrain')+50*MdEval3('subtrain_full')+Booster('subtrain_balanced')" --val_dataset "SceneFlow('test1of100_finalpass')+SceneFlow('test1of100_cleanpass')+ETH3DLowRes('subval')+Md05('subval')+Md06('subval')+Md14('subval')+Md21('subval')+MdEval3('subval_full')+Booster('subval_balanced')" --lr 3e-5 --batch_size 6 --epochs 32 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --output_dir xps/crocostereo/main_subtrain/ +``` + +##### Other models + +
+ Model for ETH3D + The model used for the submission on ETH3D is trained with the same command but using an unbounded Laplacian loss. + + # Download the model + bash stereoflow/download_model.sh crocostereo_eth3d.pth + # ETH3D submission + python stereoflow/test.py --model stereoflow_models/crocostereo_eth3d.pth --dataset "ETH3DLowRes('all')" --save submission --tile_overlap 0.9 + # Training command that was used + python -u stereoflow/train.py stereo --criterion "LaplacianLoss()" --tile_conf_mode conf_expbeta3 --dataset "CREStereo('train')+SceneFlow('train_allpass')+30*ETH3DLowRes('train')+50*Md05('train')+50*Md06('train')+50*Md14('train')+50*Md21('train')+50*MdEval3('train_full')+Booster('train_balanced')" --val_dataset "SceneFlow('test1of100_finalpass')+SceneFlow('test1of100_cleanpass')+ETH3DLowRes('subval')+Md05('subval')+Md06('subval')+Md14('subval')+Md21('subval')+MdEval3('subval_full')+Booster('subval_balanced')" --lr 3e-5 --batch_size 6 --epochs 32 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --output_dir xps/crocostereo/main_eth3d/ + +
+ +
+ Main model finetuned on Kitti + + # Download the model + bash stereoflow/download_model.sh crocostereo_finetune_kitti.pth + # Kitti submission + python stereoflow/test.py --model stereoflow_models/crocostereo_finetune_kitti.pth --dataset "Kitti15('test')" --save submission --tile_overlap 0.9 + # Training that was used + python -u stereoflow/train.py stereo --crop 352 1216 --criterion "LaplacianLossBounded2()" --dataset "Kitti12('train')+Kitti15('train')" --lr 3e-5 --batch_size 1 --accum_iter 6 --epochs 20 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --start_from stereoflow_models/crocostereo.pth --output_dir xps/crocostereo/finetune_kitti/ --save_every 5 +
+ +
+ Main model finetuned on Spring + + # Download the model + bash stereoflow/download_model.sh crocostereo_finetune_spring.pth + # Spring submission + python stereoflow/test.py --model stereoflow_models/crocostereo_finetune_spring.pth --dataset "Spring('test')" --save submission --tile_overlap 0.9 + # Training command that was used + python -u stereoflow/train.py stereo --criterion "LaplacianLossBounded2()" --dataset "Spring('train')" --lr 3e-5 --batch_size 6 --epochs 8 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --start_from stereoflow_models/crocostereo.pth --output_dir xps/crocostereo/finetune_spring/ +
+ +
+ Smaller models + To train CroCo-Stereo with smaller CroCo pretrained models, simply replace the --pretrained argument. To download the smaller CroCo-Stereo models based on CroCo v2 pretraining with ViT-Base encoder and Small encoder, use bash stereoflow/download_model.sh crocostereo_subtrain_vitb_smalldecoder.pth, and for the model with a ViT-Base encoder and a Base decoder, use bash stereoflow/download_model.sh crocostereo_subtrain_vitb_basedecoder.pth. +
+ + +### CroCo-Flow + +##### Main model + +The main training of CroCo-Flow was performed on the FlyingThings, FlyingChairs, MPI-Sintel and TartanAir datasets. +It was used for our submission to the MPI-Sintel benchmark. + +``` +# Download the model +bash stereoflow/download_model.sh crocoflow.pth +# Evaluation +python stereoflow/test.py --model stereoflow_models/crocoflow.pth --dataset "MPISintel('subval_cleanpass')+MPISintel('subval_finalpass')" --save metrics --tile_overlap 0.9 +# Sintel submission +python stereoflow/test.py --model stereoflow_models/crocoflow.pth --dataset "MPISintel('test_allpass')" --save submission --tile_overlap 0.9 +# Training command that was used, with checkpoint-best.pth +python -u stereoflow/train.py flow --criterion "LaplacianLossBounded()" --dataset "40*MPISintel('subtrain_cleanpass')+40*MPISintel('subtrain_finalpass')+4*FlyingThings('train_allpass')+4*FlyingChairs('train')+TartanAir('train')" --val_dataset "MPISintel('subval_cleanpass')+MPISintel('subval_finalpass')" --lr 2e-5 --batch_size 8 --epochs 240 --img_per_epoch 30000 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --output_dir xps/crocoflow/main/ +``` + +##### Other models + +
+ Main model finetuned on Kitti + + # Download the model + bash stereoflow/download_model.sh crocoflow_finetune_kitti.pth + # Kitti submission + python stereoflow/test.py --model stereoflow_models/crocoflow_finetune_kitti.pth --dataset "Kitti15('test')" --save submission --tile_overlap 0.99 + # Training that was used, with checkpoint-last.pth + python -u stereoflow/train.py flow --crop 352 1216 --criterion "LaplacianLossBounded()" --dataset "Kitti15('train')+Kitti12('train')" --lr 2e-5 --batch_size 1 --accum_iter 8 --epochs 150 --save_every 5 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --start_from stereoflow_models/crocoflow.pth --output_dir xps/crocoflow/finetune_kitti/ +
+ +
+ Main model finetuned on Spring + + # Download the model + bash stereoflow/download_model.sh crocoflow_finetune_spring.pth + # Spring submission + python stereoflow/test.py --model stereoflow_models/crocoflow_finetune_spring.pth --dataset "Spring('test')" --save submission --tile_overlap 0.9 + # Training command that was used, with checkpoint-last.pth + python -u stereoflow/train.py flow --criterion "LaplacianLossBounded()" --dataset "Spring('train')" --lr 2e-5 --batch_size 8 --epochs 12 --pretrained pretrained_models/CroCo_V2_ViTLarge_BaseDecoder.pth --start_from stereoflow_models/crocoflow.pth --output_dir xps/crocoflow/finetune_spring/ +
+ +
+ Smaller models + To train CroCo-Flow with smaller CroCo pretrained models, simply replace the --pretrained argument. To download the smaller CroCo-Flow models based on CroCo v2 pretraining with ViT-Base encoder and Small encoder, use bash stereoflow/download_model.sh crocoflow_vitb_smalldecoder.pth, and for the model with a ViT-Base encoder and a Base decoder, use bash stereoflow/download_model.sh crocoflow_vitb_basedecoder.pth. +
diff --git a/dynamic_predictor/croco/stereoflow/augmentor.py b/dynamic_predictor/croco/stereoflow/augmentor.py new file mode 100644 index 0000000000000000000000000000000000000000..69e6117151988d94cbc4b385e0d88e982133bf10 --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/augmentor.py @@ -0,0 +1,290 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# Data augmentation for training stereo and flow +# -------------------------------------------------------- + +# References +# https://github.com/autonomousvision/unimatch/blob/master/dataloader/stereo/transforms.py +# https://github.com/autonomousvision/unimatch/blob/master/dataloader/flow/transforms.py + + +import numpy as np +import random +from PIL import Image + +import cv2 +cv2.setNumThreads(0) +cv2.ocl.setUseOpenCL(False) + +import torch +from torchvision.transforms import ColorJitter +import torchvision.transforms.functional as FF + +class StereoAugmentor(object): + + def __init__(self, crop_size, scale_prob=0.5, scale_xonly=True, lhth=800., lminscale=0.0, lmaxscale=1.0, hminscale=-0.2, hmaxscale=0.4, scale_interp_nearest=True, rightjitterprob=0.5, v_flip_prob=0.5, color_aug_asym=True, color_choice_prob=0.5): + self.crop_size = crop_size + self.scale_prob = scale_prob + self.scale_xonly = scale_xonly + self.lhth = lhth + self.lminscale = lminscale + self.lmaxscale = lmaxscale + self.hminscale = hminscale + self.hmaxscale = hmaxscale + self.scale_interp_nearest = scale_interp_nearest + self.rightjitterprob = rightjitterprob + self.v_flip_prob = v_flip_prob + self.color_aug_asym = color_aug_asym + self.color_choice_prob = color_choice_prob + + def _random_scale(self, img1, img2, disp): + ch,cw = self.crop_size + h,w = img1.shape[:2] + if self.scale_prob>0. and np.random.rand()1.: + scale_x = clip_scale + scale_y = scale_x if not self.scale_xonly else 1.0 + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + disp = cv2.resize(disp, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR if not self.scale_interp_nearest else cv2.INTER_NEAREST) * scale_x + return img1, img2, disp + + def _random_crop(self, img1, img2, disp): + h,w = img1.shape[:2] + ch,cw = self.crop_size + assert ch<=h and cw<=w, (img1.shape, h,w,ch,cw) + offset_x = np.random.randint(w - cw + 1) + offset_y = np.random.randint(h - ch + 1) + img1 = img1[offset_y:offset_y+ch,offset_x:offset_x+cw] + img2 = img2[offset_y:offset_y+ch,offset_x:offset_x+cw] + disp = disp[offset_y:offset_y+ch,offset_x:offset_x+cw] + return img1, img2, disp + + def _random_vflip(self, img1, img2, disp): + # vertical flip + if self.v_flip_prob>0 and np.random.rand() < self.v_flip_prob: + img1 = np.copy(np.flipud(img1)) + img2 = np.copy(np.flipud(img2)) + disp = np.copy(np.flipud(disp)) + return img1, img2, disp + + def _random_rotate_shift_right(self, img2): + if self.rightjitterprob>0. and np.random.rand() 0) & (xx < wd1) & (yy > 0) & (yy < ht1) + xx = xx[v] + yy = yy[v] + flow1 = flow1[v] + + flow = np.inf * np.ones([ht1, wd1, 2], dtype=np.float32) # invalid value every where, before we fill it with the correct ones + flow[yy, xx] = flow1 + return flow + + def spatial_transform(self, img1, img2, flow, dname): + + if np.random.rand() < self.spatial_aug_prob: + # randomly sample scale + ht, wd = img1.shape[:2] + clip_min_scale = np.maximum( + (self.crop_size[0] + 8) / float(ht), + (self.crop_size[1] + 8) / float(wd)) + min_scale, max_scale = self.min_scale, self.max_scale + scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) + scale_x = scale + scale_y = scale + if np.random.rand() < self.stretch_prob: + scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + scale_x = np.clip(scale_x, clip_min_scale, None) + scale_y = np.clip(scale_y, clip_min_scale, None) + # rescale the images + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow = self._resize_flow(flow, scale_x, scale_y, factor=2.0 if dname=='Spring' else 1.0) + elif dname=="Spring": + flow = self._resize_flow(flow, 1.0, 1.0, factor=2.0) + + if self.h_flip_prob>0. and np.random.rand() < self.h_flip_prob: # h-flip + img1 = img1[:, ::-1] + img2 = img2[:, ::-1] + flow = flow[:, ::-1] * [-1.0, 1.0] + + if self.v_flip_prob>0. and np.random.rand() < self.v_flip_prob: # v-flip + img1 = img1[::-1, :] + img2 = img2[::-1, :] + flow = flow[::-1, :] * [1.0, -1.0] + + # In case no cropping + if img1.shape[0] - self.crop_size[0] > 0: + y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0]) + else: + y0 = 0 + if img1.shape[1] - self.crop_size[1] > 0: + x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1]) + else: + x0 = 0 + + img1 = img1[y0:y0 + self.crop_size[0], x0:x0 + self.crop_size[1]] + img2 = img2[y0:y0 + self.crop_size[0], x0:x0 + self.crop_size[1]] + flow = flow[y0:y0 + self.crop_size[0], x0:x0 + self.crop_size[1]] + + return img1, img2, flow + + def __call__(self, img1, img2, flow, dname): + img1, img2, flow = self.spatial_transform(img1, img2, flow, dname) + img1, img2 = self.color_transform(img1, img2) + img1 = np.ascontiguousarray(img1) + img2 = np.ascontiguousarray(img2) + flow = np.ascontiguousarray(flow) + return img1, img2, flow \ No newline at end of file diff --git a/dynamic_predictor/croco/stereoflow/criterion.py b/dynamic_predictor/croco/stereoflow/criterion.py new file mode 100644 index 0000000000000000000000000000000000000000..57792ebeeee34827b317a4d32b7445837bb33f17 --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/criterion.py @@ -0,0 +1,251 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# Losses, metrics per batch, metrics per dataset +# -------------------------------------------------------- + +import torch +from torch import nn +import torch.nn.functional as F + +def _get_gtnorm(gt): + if gt.size(1)==1: # stereo + return gt + # flow + return torch.sqrt(torch.sum(gt**2, dim=1, keepdims=True)) # Bx1xHxW + +############ losses without confidence + +class L1Loss(nn.Module): + + def __init__(self, max_gtnorm=None): + super().__init__() + self.max_gtnorm = max_gtnorm + self.with_conf = False + + def _error(self, gt, predictions): + return torch.abs(gt-predictions) + + def forward(self, predictions, gt, inspect=False): + mask = torch.isfinite(gt) + if self.max_gtnorm is not None: + mask *= _get_gtnorm(gt).expand(-1,gt.size(1),-1,-1) which is a constant + + +class LaplacianLossBounded(nn.Module): # used for CroCo-Flow ; in the equation of the paper, we have a=1/b + def __init__(self, max_gtnorm=10000., a=0.25, b=4.): + super().__init__() + self.max_gtnorm = max_gtnorm + self.with_conf = True + self.a, self.b = a, b + + def forward(self, predictions, gt, conf): + mask = torch.isfinite(gt) + mask = mask[:,0,:,:] + if self.max_gtnorm is not None: mask *= _get_gtnorm(gt)[:,0,:,:] which is a constant + +class LaplacianLossBounded2(nn.Module): # used for CroCo-Stereo (except for ETH3D) ; in the equation of the paper, we have a=b + def __init__(self, max_gtnorm=None, a=3.0, b=3.0): + super().__init__() + self.max_gtnorm = max_gtnorm + self.with_conf = True + self.a, self.b = a, b + + def forward(self, predictions, gt, conf): + mask = torch.isfinite(gt) + mask = mask[:,0,:,:] + if self.max_gtnorm is not None: mask *= _get_gtnorm(gt)[:,0,:,:] which is a constant + +############## metrics per batch + +class StereoMetrics(nn.Module): + + def __init__(self, do_quantile=False): + super().__init__() + self.bad_ths = [0.5,1,2,3] + self.do_quantile = do_quantile + + def forward(self, predictions, gt): + B = predictions.size(0) + metrics = {} + gtcopy = gt.clone() + mask = torch.isfinite(gtcopy) + gtcopy[~mask] = 999999.0 # we make a copy and put a non-infinite value, such that it does not become nan once multiplied by the mask value 0 + Npx = mask.view(B,-1).sum(dim=1) + L1error = (torch.abs(gtcopy-predictions)*mask).view(B,-1) + L2error = (torch.square(gtcopy-predictions)*mask).view(B,-1) + # avgerr + metrics['avgerr'] = torch.mean(L1error.sum(dim=1)/Npx ) + # rmse + metrics['rmse'] = torch.sqrt(L2error.sum(dim=1)/Npx).mean(dim=0) + # err > t for t in [0.5,1,2,3] + for ths in self.bad_ths: + metrics['bad@{:.1f}'.format(ths)] = (((L1error>ths)* mask.view(B,-1)).sum(dim=1)/Npx).mean(dim=0) * 100 + return metrics + +class FlowMetrics(nn.Module): + def __init__(self): + super().__init__() + self.bad_ths = [1,3,5] + + def forward(self, predictions, gt): + B = predictions.size(0) + metrics = {} + mask = torch.isfinite(gt[:,0,:,:]) # both x and y would be infinite + Npx = mask.view(B,-1).sum(dim=1) + gtcopy = gt.clone() # to compute L1/L2 error, we need to have non-infinite value, the error computed at this locations will be ignored + gtcopy[:,0,:,:][~mask] = 999999.0 + gtcopy[:,1,:,:][~mask] = 999999.0 + L1error = (torch.abs(gtcopy-predictions).sum(dim=1)*mask).view(B,-1) + L2error = (torch.sqrt(torch.sum(torch.square(gtcopy-predictions),dim=1))*mask).view(B,-1) + metrics['L1err'] = torch.mean(L1error.sum(dim=1)/Npx ) + metrics['EPE'] = torch.mean(L2error.sum(dim=1)/Npx ) + for ths in self.bad_ths: + metrics['bad@{:.1f}'.format(ths)] = (((L2error>ths)* mask.view(B,-1)).sum(dim=1)/Npx).mean(dim=0) * 100 + return metrics + +############## metrics per dataset +## we update the average and maintain the number of pixels while adding data batch per batch +## at the beggining, call reset() +## after each batch, call add_batch(...) +## at the end: call get_results() + +class StereoDatasetMetrics(nn.Module): + + def __init__(self): + super().__init__() + self.bad_ths = [0.5,1,2,3] + + def reset(self): + self.agg_N = 0 # number of pixels so far + self.agg_L1err = torch.tensor(0.0) # L1 error so far + self.agg_Nbad = [0 for _ in self.bad_ths] # counter of bad pixels + self._metrics = None + + def add_batch(self, predictions, gt): + assert predictions.size(1)==1, predictions.size() + assert gt.size(1)==1, gt.size() + if gt.size(2)==predictions.size(2)*2 and gt.size(3)==predictions.size(3)*2: # special case for Spring ... + L1err = torch.minimum( torch.minimum( torch.minimum( + torch.sum(torch.abs(gt[:,:,0::2,0::2]-predictions),dim=1), + torch.sum(torch.abs(gt[:,:,1::2,0::2]-predictions),dim=1)), + torch.sum(torch.abs(gt[:,:,0::2,1::2]-predictions),dim=1)), + torch.sum(torch.abs(gt[:,:,1::2,1::2]-predictions),dim=1)) + valid = torch.isfinite(L1err) + else: + valid = torch.isfinite(gt[:,0,:,:]) # both x and y would be infinite + L1err = torch.sum(torch.abs(gt-predictions),dim=1) + N = valid.sum() + Nnew = self.agg_N + N + self.agg_L1err = float(self.agg_N)/Nnew * self.agg_L1err + L1err[valid].mean().cpu() * float(N)/Nnew + self.agg_N = Nnew + for i,th in enumerate(self.bad_ths): + self.agg_Nbad[i] += (L1err[valid]>th).sum().cpu() + + def _compute_metrics(self): + if self._metrics is not None: return + out = {} + out['L1err'] = self.agg_L1err.item() + for i,th in enumerate(self.bad_ths): + out['bad@{:.1f}'.format(th)] = (float(self.agg_Nbad[i]) / self.agg_N).item() * 100.0 + self._metrics = out + + def get_results(self): + self._compute_metrics() # to avoid recompute them multiple times + return self._metrics + +class FlowDatasetMetrics(nn.Module): + + def __init__(self): + super().__init__() + self.bad_ths = [0.5,1,3,5] + self.speed_ths = [(0,10),(10,40),(40,torch.inf)] + + def reset(self): + self.agg_N = 0 # number of pixels so far + self.agg_L1err = torch.tensor(0.0) # L1 error so far + self.agg_L2err = torch.tensor(0.0) # L2 (=EPE) error so far + self.agg_Nbad = [0 for _ in self.bad_ths] # counter of bad pixels + self.agg_EPEspeed = [torch.tensor(0.0) for _ in self.speed_ths] # EPE per speed bin so far + self.agg_Nspeed = [0 for _ in self.speed_ths] # N pixels per speed bin so far + self._metrics = None + self.pairname_results = {} + + def add_batch(self, predictions, gt): + assert predictions.size(1)==2, predictions.size() + assert gt.size(1)==2, gt.size() + if gt.size(2)==predictions.size(2)*2 and gt.size(3)==predictions.size(3)*2: # special case for Spring ... + L1err = torch.minimum( torch.minimum( torch.minimum( + torch.sum(torch.abs(gt[:,:,0::2,0::2]-predictions),dim=1), + torch.sum(torch.abs(gt[:,:,1::2,0::2]-predictions),dim=1)), + torch.sum(torch.abs(gt[:,:,0::2,1::2]-predictions),dim=1)), + torch.sum(torch.abs(gt[:,:,1::2,1::2]-predictions),dim=1)) + L2err = torch.minimum( torch.minimum( torch.minimum( + torch.sqrt(torch.sum(torch.square(gt[:,:,0::2,0::2]-predictions),dim=1)), + torch.sqrt(torch.sum(torch.square(gt[:,:,1::2,0::2]-predictions),dim=1))), + torch.sqrt(torch.sum(torch.square(gt[:,:,0::2,1::2]-predictions),dim=1))), + torch.sqrt(torch.sum(torch.square(gt[:,:,1::2,1::2]-predictions),dim=1))) + valid = torch.isfinite(L1err) + gtspeed = (torch.sqrt(torch.sum(torch.square(gt[:,:,0::2,0::2]),dim=1)) + torch.sqrt(torch.sum(torch.square(gt[:,:,0::2,1::2]),dim=1)) +\ + torch.sqrt(torch.sum(torch.square(gt[:,:,1::2,0::2]),dim=1)) + torch.sqrt(torch.sum(torch.square(gt[:,:,1::2,1::2]),dim=1)) ) / 4.0 # let's just average them + else: + valid = torch.isfinite(gt[:,0,:,:]) # both x and y would be infinite + L1err = torch.sum(torch.abs(gt-predictions),dim=1) + L2err = torch.sqrt(torch.sum(torch.square(gt-predictions),dim=1)) + gtspeed = torch.sqrt(torch.sum(torch.square(gt),dim=1)) + N = valid.sum() + Nnew = self.agg_N + N + self.agg_L1err = float(self.agg_N)/Nnew * self.agg_L1err + L1err[valid].mean().cpu() * float(N)/Nnew + self.agg_L2err = float(self.agg_N)/Nnew * self.agg_L2err + L2err[valid].mean().cpu() * float(N)/Nnew + self.agg_N = Nnew + for i,th in enumerate(self.bad_ths): + self.agg_Nbad[i] += (L2err[valid]>th).sum().cpu() + for i,(th1,th2) in enumerate(self.speed_ths): + vv = (gtspeed[valid]>=th1) * (gtspeed[valid] don't use batch_size>1 at test time) + self._prepare_data() + self._load_or_build_cache() + + def prepare_data(self): + """ + to be defined for each dataset + """ + raise NotImplementedError + + def __len__(self): + return len(self.pairnames) # each pairname is typically of the form (str, int1, int2) + + def __getitem__(self, index): + pairname = self.pairnames[index] + + # get filenames + img1name = self.pairname_to_img1name(pairname) + img2name = self.pairname_to_img2name(pairname) + flowname = self.pairname_to_flowname(pairname) if self.pairname_to_flowname is not None else None + + # load images and disparities + img1 = _read_img(img1name) + img2 = _read_img(img2name) + flow = self.load_flow(flowname) if flowname is not None else None + + # apply augmentations + if self.augmentor is not None: + img1, img2, flow = self.augmentor(img1, img2, flow, self.name) + + if self.totensor: + img1 = img_to_tensor(img1) + img2 = img_to_tensor(img2) + if flow is not None: + flow = flow_to_tensor(flow) + else: + flow = torch.tensor([]) # to allow dataloader batching with default collate_gn + pairname = str(pairname) # transform potential tuple to str to be able to batch it + + return img1, img2, flow, pairname + + def __rmul__(self, v): + self.rmul *= v + self.pairnames = v * self.pairnames + return self + + def __str__(self): + return f'{self.__class__.__name__}_{self.split}' + + def __repr__(self): + s = f'{self.__class__.__name__}(split={self.split}, augmentor={self.augmentor_str}, crop_size={str(self.crop_size)}, totensor={self.totensor})' + if self.rmul==1: + s+=f'\n\tnum pairs: {len(self.pairnames)}' + else: + s+=f'\n\tnum pairs: {len(self.pairnames)} ({len(self.pairnames)//self.rmul}x{self.rmul})' + return s + + def _set_root(self): + self.root = dataset_to_root[self.name] + assert os.path.isdir(self.root), f"could not find root directory for dataset {self.name}: {self.root}" + + def _load_or_build_cache(self): + cache_file = osp.join(cache_dir, self.name+'.pkl') + if osp.isfile(cache_file): + with open(cache_file, 'rb') as fid: + self.pairnames = pickle.load(fid)[self.split] + else: + tosave = self._build_cache() + os.makedirs(cache_dir, exist_ok=True) + with open(cache_file, 'wb') as fid: + pickle.dump(tosave, fid) + self.pairnames = tosave[self.split] + +class TartanAirDataset(FlowDataset): + + def _prepare_data(self): + self.name = "TartanAir" + self._set_root() + assert self.split in ['train'] + self.pairname_to_img1name = lambda pairname: osp.join(self.root, pairname[0], 'image_left/{:06d}_left.png'.format(pairname[1])) + self.pairname_to_img2name = lambda pairname: osp.join(self.root, pairname[0], 'image_left/{:06d}_left.png'.format(pairname[2])) + self.pairname_to_flowname = lambda pairname: osp.join(self.root, pairname[0], 'flow/{:06d}_{:06d}_flow.npy'.format(pairname[1],pairname[2])) + self.pairname_to_str = lambda pairname: os.path.join(pairname[0][pairname[0].find('/')+1:], '{:06d}_{:06d}'.format(pairname[1], pairname[2])) + self.load_flow = _read_numpy_flow + + def _build_cache(self): + seqs = sorted(os.listdir(self.root)) + pairs = [(osp.join(s,s,difficulty,Pxxx),int(a[:6]),int(a[:6])+1) for s in seqs for difficulty in ['Easy','Hard'] for Pxxx in sorted(os.listdir(osp.join(self.root,s,s,difficulty))) for a in sorted(os.listdir(osp.join(self.root,s,s,difficulty,Pxxx,'image_left/')))[:-1]] + assert len(pairs)==306268, "incorrect parsing of pairs in TartanAir" + tosave = {'train': pairs} + return tosave + +class FlyingChairsDataset(FlowDataset): + + def _prepare_data(self): + self.name = "FlyingChairs" + self._set_root() + assert self.split in ['train','val'] + self.pairname_to_img1name = lambda pairname: osp.join(self.root, 'data', pairname+'_img1.ppm') + self.pairname_to_img2name = lambda pairname: osp.join(self.root, 'data', pairname+'_img2.ppm') + self.pairname_to_flowname = lambda pairname: osp.join(self.root, 'data', pairname+'_flow.flo') + self.pairname_to_str = lambda pairname: pairname + self.load_flow = _read_flo_file + + def _build_cache(self): + split_file = osp.join(self.root, 'chairs_split.txt') + split_list = np.loadtxt(split_file, dtype=np.int32) + trainpairs = ['{:05d}'.format(i) for i in np.where(split_list==1)[0]+1] + valpairs = ['{:05d}'.format(i) for i in np.where(split_list==2)[0]+1] + assert len(trainpairs)==22232 and len(valpairs)==640, "incorrect parsing of pairs in MPI-Sintel" + tosave = {'train': trainpairs, 'val': valpairs} + return tosave + +class FlyingThingsDataset(FlowDataset): + + def _prepare_data(self): + self.name = "FlyingThings" + self._set_root() + assert self.split in [f'{set_}_{pass_}pass{camstr}' for set_ in ['train','test','test1024'] for camstr in ['','_rightcam'] for pass_ in ['clean','final','all']] + self.pairname_to_img1name = lambda pairname: osp.join(self.root, f'frames_{pairname[3]}pass', pairname[0].replace('into_future','').replace('into_past',''), '{:04d}.png'.format(pairname[1])) + self.pairname_to_img2name = lambda pairname: osp.join(self.root, f'frames_{pairname[3]}pass', pairname[0].replace('into_future','').replace('into_past',''), '{:04d}.png'.format(pairname[2])) + self.pairname_to_flowname = lambda pairname: osp.join(self.root, 'optical_flow', pairname[0], 'OpticalFlowInto{f:s}_{i:04d}_{c:s}.pfm'.format(f='Future' if 'future' in pairname[0] else 'Past', i=pairname[1], c='L' if 'left' in pairname[0] else 'R' )) + self.pairname_to_str = lambda pairname: os.path.join(pairname[3]+'pass', pairname[0], 'Into{f:s}_{i:04d}_{c:s}'.format(f='Future' if 'future' in pairname[0] else 'Past', i=pairname[1], c='L' if 'left' in pairname[0] else 'R' )) + self.load_flow = _read_pfm_flow + + def _build_cache(self): + tosave = {} + # train and test splits for the different passes + for set_ in ['train', 'test']: + sroot = osp.join(self.root, 'optical_flow', set_.upper()) + fname_to_i = lambda f: int(f[len('OpticalFlowIntoFuture_'):-len('_L.pfm')]) + pp = [(osp.join(set_.upper(), d, s, 'into_future/left'),fname_to_i(fname)) for d in sorted(os.listdir(sroot)) for s in sorted(os.listdir(osp.join(sroot,d))) for fname in sorted(os.listdir(osp.join(sroot,d, s, 'into_future/left')))[:-1]] + pairs = [(a,i,i+1) for a,i in pp] + pairs += [(a.replace('into_future','into_past'),i+1,i) for a,i in pp] + assert len(pairs)=={'train': 40302, 'test': 7866}[set_], "incorrect parsing of pairs Flying Things" + for cam in ['left','right']: + camstr = '' if cam=='left' else f'_{cam}cam' + for pass_ in ['final', 'clean']: + tosave[f'{set_}_{pass_}pass{camstr}'] = [(a.replace('left',cam),i,j,pass_) for a,i,j in pairs] + tosave[f'{set_}_allpass{camstr}'] = tosave[f'{set_}_cleanpass{camstr}'] + tosave[f'{set_}_finalpass{camstr}'] + # test1024: this is the same split as unimatch 'validation' split + # see https://github.com/autonomousvision/unimatch/blob/master/dataloader/flow/datasets.py#L229 + test1024_nsamples = 1024 + alltest_nsamples = len(tosave['test_cleanpass']) # 7866 + stride = alltest_nsamples // test1024_nsamples + remove = alltest_nsamples % test1024_nsamples + for cam in ['left','right']: + camstr = '' if cam=='left' else f'_{cam}cam' + for pass_ in ['final','clean']: + tosave[f'test1024_{pass_}pass{camstr}'] = sorted(tosave[f'test_{pass_}pass{camstr}'])[:-remove][::stride] # warning, it was not sorted before + assert len(tosave['test1024_cleanpass'])==1024, "incorrect parsing of pairs in Flying Things" + tosave[f'test1024_allpass{camstr}'] = tosave[f'test1024_cleanpass{camstr}'] + tosave[f'test1024_finalpass{camstr}'] + return tosave + + +class MPISintelDataset(FlowDataset): + + def _prepare_data(self): + self.name = "MPISintel" + self._set_root() + assert self.split in [s+'_'+p for s in ['train','test','subval','subtrain'] for p in ['cleanpass','finalpass','allpass']] + self.pairname_to_img1name = lambda pairname: osp.join(self.root, pairname[0], 'frame_{:04d}.png'.format(pairname[1])) + self.pairname_to_img2name = lambda pairname: osp.join(self.root, pairname[0], 'frame_{:04d}.png'.format(pairname[1]+1)) + self.pairname_to_flowname = lambda pairname: None if pairname[0].startswith('test/') else osp.join(self.root, pairname[0].replace('/clean/','/flow/').replace('/final/','/flow/'), 'frame_{:04d}.flo'.format(pairname[1])) + self.pairname_to_str = lambda pairname: osp.join(pairname[0], 'frame_{:04d}'.format(pairname[1])) + self.load_flow = _read_flo_file + + def _build_cache(self): + trainseqs = sorted(os.listdir(self.root+'training/clean')) + trainpairs = [ (osp.join('training/clean', s),i) for s in trainseqs for i in range(1, len(os.listdir(self.root+'training/clean/'+s)))] + subvalseqs = ['temple_2','temple_3'] + subtrainseqs = [s for s in trainseqs if s not in subvalseqs] + subvalpairs = [ (p,i) for p,i in trainpairs if any(s in p for s in subvalseqs)] + subtrainpairs = [ (p,i) for p,i in trainpairs if any(s in p for s in subtrainseqs)] + testseqs = sorted(os.listdir(self.root+'test/clean')) + testpairs = [ (osp.join('test/clean', s),i) for s in testseqs for i in range(1, len(os.listdir(self.root+'test/clean/'+s)))] + assert len(trainpairs)==1041 and len(testpairs)==552 and len(subvalpairs)==98 and len(subtrainpairs)==943, "incorrect parsing of pairs in MPI-Sintel" + tosave = {} + tosave['train_cleanpass'] = trainpairs + tosave['test_cleanpass'] = testpairs + tosave['subval_cleanpass'] = subvalpairs + tosave['subtrain_cleanpass'] = subtrainpairs + for t in ['train','test','subval','subtrain']: + tosave[t+'_finalpass'] = [(p.replace('/clean/','/final/'),i) for p,i in tosave[t+'_cleanpass']] + tosave[t+'_allpass'] = tosave[t+'_cleanpass'] + tosave[t+'_finalpass'] + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, _time): + assert prediction.shape[2]==2 + outfile = os.path.join(outdir, 'submission', self.pairname_to_str(pairname)+'.flo') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + writeFlowFile(prediction, outfile) + + def finalize_submission(self, outdir): + assert self.split == 'test_allpass' + bundle_exe = "/nfs/data/ffs-3d/datasets/StereoFlow/MPI-Sintel/bundler/linux-x64/bundler" # eg + if os.path.isfile(bundle_exe): + cmd = f'{bundle_exe} "{outdir}/submission/test/clean/" "{outdir}/submission/test/final" "{outdir}/submission/bundled.lzma"' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at: "{outdir}/submission/bundled.lzma"') + else: + print('Could not find bundler executable for submission.') + print('Please download it and run:') + print(f' "{outdir}/submission/test/clean/" "{outdir}/submission/test/final" "{outdir}/submission/bundled.lzma"') + +class SpringDataset(FlowDataset): + + def _prepare_data(self): + self.name = "Spring" + self._set_root() + assert self.split in ['train','test','subtrain','subval'] + self.pairname_to_img1name = lambda pairname: osp.join(self.root, pairname[0], pairname[1], 'frame_'+pairname[3], 'frame_{:s}_{:04d}.png'.format(pairname[3], pairname[4])) + self.pairname_to_img2name = lambda pairname: osp.join(self.root, pairname[0], pairname[1], 'frame_'+pairname[3], 'frame_{:s}_{:04d}.png'.format(pairname[3], pairname[4]+(1 if pairname[2]=='FW' else -1))) + self.pairname_to_flowname = lambda pairname: None if pairname[0]=='test' else osp.join(self.root, pairname[0], pairname[1], f'flow_{pairname[2]}_{pairname[3]}', f'flow_{pairname[2]}_{pairname[3]}_{pairname[4]:04d}.flo5') + self.pairname_to_str = lambda pairname: osp.join(pairname[0], pairname[1], f'flow_{pairname[2]}_{pairname[3]}', f'flow_{pairname[2]}_{pairname[3]}_{pairname[4]:04d}') + self.load_flow = _read_hdf5_flow + + def _build_cache(self): + # train + trainseqs = sorted(os.listdir( osp.join(self.root,'train'))) + trainpairs = [] + for leftright in ['left','right']: + for fwbw in ['FW','BW']: + trainpairs += [('train',s,fwbw,leftright,int(f[len(f'flow_{fwbw}_{leftright}_'):-len('.flo5')])) for s in trainseqs for f in sorted(os.listdir(osp.join(self.root,'train',s,f'flow_{fwbw}_{leftright}')))] + # test + testseqs = sorted(os.listdir( osp.join(self.root,'test'))) + testpairs = [] + for leftright in ['left','right']: + testpairs += [('test',s,'FW',leftright,int(f[len(f'frame_{leftright}_'):-len('.png')])) for s in testseqs for f in sorted(os.listdir(osp.join(self.root,'test',s,f'frame_{leftright}')))[:-1]] + testpairs += [('test',s,'BW',leftright,int(f[len(f'frame_{leftright}_'):-len('.png')])+1) for s in testseqs for f in sorted(os.listdir(osp.join(self.root,'test',s,f'frame_{leftright}')))[:-1]] + # subtrain / subval + subtrainpairs = [p for p in trainpairs if p[1]!='0041'] + subvalpairs = [p for p in trainpairs if p[1]=='0041'] + assert len(trainpairs)==19852 and len(testpairs)==3960 and len(subtrainpairs)==19472 and len(subvalpairs)==380, "incorrect parsing of pairs in Spring" + tosave = {'train': trainpairs, 'test': testpairs, 'subtrain': subtrainpairs, 'subval': subvalpairs} + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==3 + assert prediction.shape[2]==2 + assert prediction.dtype==np.float32 + outfile = osp.join(outdir, pairname[0], pairname[1], f'flow_{pairname[2]}_{pairname[3]}', f'flow_{pairname[2]}_{pairname[3]}_{pairname[4]:04d}.flo5') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + writeFlo5File(prediction, outfile) + + def finalize_submission(self, outdir): + assert self.split=='test' + exe = "{self.root}/flow_subsampling" + if os.path.isfile(exe): + cmd = f'cd "{outdir}/test"; {exe} .' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at {outdir}/test/flow_submission.hdf5') + else: + print('Could not find flow_subsampling executable for submission.') + print('Please download it and run:') + print(f'cd "{outdir}/test"; .') + + +class Kitti12Dataset(FlowDataset): + + def _prepare_data(self): + self.name = "Kitti12" + self._set_root() + assert self.split in ['train','test'] + self.pairname_to_img1name = lambda pairname: osp.join(self.root, pairname+'_10.png') + self.pairname_to_img2name = lambda pairname: osp.join(self.root, pairname+'_11.png') + self.pairname_to_flowname = None if self.split=='test' else lambda pairname: osp.join(self.root, pairname.replace('/colored_0/','/flow_occ/')+'_10.png') + self.pairname_to_str = lambda pairname: pairname.replace('/colored_0/','/') + self.load_flow = _read_kitti_flow + + def _build_cache(self): + trainseqs = ["training/colored_0/%06d"%(i) for i in range(194)] + testseqs = ["testing/colored_0/%06d"%(i) for i in range(195)] + assert len(trainseqs)==194 and len(testseqs)==195, "incorrect parsing of pairs in Kitti12" + tosave = {'train': trainseqs, 'test': testseqs} + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==3 + assert prediction.shape[2]==2 + outfile = os.path.join(outdir, pairname.split('/')[-1]+'_10.png') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + writeFlowKitti(outfile, prediction) + + def finalize_submission(self, outdir): + assert self.split=='test' + cmd = f'cd {outdir}/; zip -r "kitti12_flow_results.zip" .' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at {outdir}/kitti12_flow_results.zip') + + +class Kitti15Dataset(FlowDataset): + + def _prepare_data(self): + self.name = "Kitti15" + self._set_root() + assert self.split in ['train','subtrain','subval','test'] + self.pairname_to_img1name = lambda pairname: osp.join(self.root, pairname+'_10.png') + self.pairname_to_img2name = lambda pairname: osp.join(self.root, pairname+'_11.png') + self.pairname_to_flowname = None if self.split=='test' else lambda pairname: osp.join(self.root, pairname.replace('/image_2/','/flow_occ/')+'_10.png') + self.pairname_to_str = lambda pairname: pairname.replace('/image_2/','/') + self.load_flow = _read_kitti_flow + + def _build_cache(self): + trainseqs = ["training/image_2/%06d"%(i) for i in range(200)] + subtrainseqs = trainseqs[:-10] + subvalseqs = trainseqs[-10:] + testseqs = ["testing/image_2/%06d"%(i) for i in range(200)] + assert len(trainseqs)==200 and len(subtrainseqs)==190 and len(subvalseqs)==10 and len(testseqs)==200, "incorrect parsing of pairs in Kitti15" + tosave = {'train': trainseqs, 'subtrain': subtrainseqs, 'subval': subvalseqs, 'test': testseqs} + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==3 + assert prediction.shape[2]==2 + outfile = os.path.join(outdir, 'flow', pairname.split('/')[-1]+'_10.png') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + writeFlowKitti(outfile, prediction) + + def finalize_submission(self, outdir): + assert self.split=='test' + cmd = f'cd {outdir}/; zip -r "kitti15_flow_results.zip" flow' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at {outdir}/kitti15_flow_results.zip') + + +import cv2 +def _read_numpy_flow(filename): + return np.load(filename) + +def _read_pfm_flow(filename): + f, _ = _read_pfm(filename) + assert np.all(f[:,:,2]==0.0) + return np.ascontiguousarray(f[:,:,:2]) + +TAG_FLOAT = 202021.25 # tag to check the sanity of the file +TAG_STRING = 'PIEH' # string containing the tag +MIN_WIDTH = 1 +MAX_WIDTH = 99999 +MIN_HEIGHT = 1 +MAX_HEIGHT = 99999 +def readFlowFile(filename): + """ + readFlowFile() reads a flow file into a 2-band np.array. + if does not exist, an IOError is raised. + if does not finish by '.flo' or the tag, the width, the height or the file's size is illegal, an Expcetion is raised. + ---- PARAMETERS ---- + filename: string containg the name of the file to read a flow + ---- OUTPUTS ---- + a np.array of dimension (height x width x 2) containing the flow of type 'float32' + """ + + # check filename + if not filename.endswith(".flo"): + raise Exception("readFlowFile({:s}): filename must finish with '.flo'".format(filename)) + + # open the file and read it + with open(filename,'rb') as f: + # check tag + tag = struct.unpack('f',f.read(4))[0] + if tag != TAG_FLOAT: + raise Exception("flow_utils.readFlowFile({:s}): wrong tag".format(filename)) + # read dimension + w,h = struct.unpack('ii',f.read(8)) + if w < MIN_WIDTH or w > MAX_WIDTH: + raise Exception("flow_utils.readFlowFile({:s}: illegal width {:d}".format(filename,w)) + if h < MIN_HEIGHT or h > MAX_HEIGHT: + raise Exception("flow_utils.readFlowFile({:s}: illegal height {:d}".format(filename,h)) + flow = np.fromfile(f,'float32') + if not flow.shape == (h*w*2,): + raise Exception("flow_utils.readFlowFile({:s}: illegal size of the file".format(filename)) + flow.shape = (h,w,2) + return flow + +def writeFlowFile(flow,filename): + """ + writeFlowFile(flow,) write flow to the file . + if does not exist, an IOError is raised. + if does not finish with '.flo' or the flow has not 2 bands, an Exception is raised. + ---- PARAMETERS ---- + flow: np.array of dimension (height x width x 2) containing the flow to write + filename: string containg the name of the file to write a flow + """ + + # check filename + if not filename.endswith(".flo"): + raise Exception("flow_utils.writeFlowFile(,{:s}): filename must finish with '.flo'".format(filename)) + + if not flow.shape[2:] == (2,): + raise Exception("flow_utils.writeFlowFile(,{:s}): must have 2 bands".format(filename)) + + + # open the file and write it + with open(filename,'wb') as f: + # write TAG + f.write( TAG_STRING.encode('utf-8') ) + # write dimension + f.write( struct.pack('ii',flow.shape[1],flow.shape[0]) ) + # write the flow + + flow.astype(np.float32).tofile(f) + +_read_flo_file = readFlowFile + +def _read_kitti_flow(filename): + flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR) + flow = flow[:, :, ::-1].astype(np.float32) + valid = flow[:, :, 2]>0 + flow = flow[:, :, :2] + flow = (flow - 2 ** 15) / 64.0 + flow[~valid,0] = np.inf + flow[~valid,1] = np.inf + return flow +_read_hd1k_flow = _read_kitti_flow + + +def writeFlowKitti(filename, uv): + uv = 64.0 * uv + 2 ** 15 + valid = np.ones([uv.shape[0], uv.shape[1], 1]) + uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16) + cv2.imwrite(filename, uv[..., ::-1]) + +def writeFlo5File(flow, filename): + with h5py.File(filename, "w") as f: + f.create_dataset("flow", data=flow, compression="gzip", compression_opts=5) + +def _read_hdf5_flow(filename): + flow = np.asarray(h5py.File(filename)['flow']) + flow[np.isnan(flow)] = np.inf # make invalid values as +inf + return flow.astype(np.float32) + +# flow visualization +RY = 15 +YG = 6 +GC = 4 +CB = 11 +BM = 13 +MR = 6 +UNKNOWN_THRESH = 1e9 + +def colorTest(): + """ + flow_utils.colorTest(): display an example of image showing the color encoding scheme + """ + import matplotlib.pylab as plt + truerange = 1 + h,w = 151,151 + trange = truerange*1.04 + s2 = round(h/2) + x,y = np.meshgrid(range(w),range(h)) + u = x*trange/s2-trange + v = y*trange/s2-trange + img = _computeColor(np.concatenate((u[:,:,np.newaxis],v[:,:,np.newaxis]),2)/trange/np.sqrt(2)) + plt.imshow(img) + plt.axis('off') + plt.axhline(round(h/2),color='k') + plt.axvline(round(w/2),color='k') + +def flowToColor(flow, maxflow=None, maxmaxflow=None, saturate=False): + """ + flow_utils.flowToColor(flow): return a color code flow field, normalized based on the maximum l2-norm of the flow + flow_utils.flowToColor(flow,maxflow): return a color code flow field, normalized by maxflow + ---- PARAMETERS ---- + flow: flow to display of shape (height x width x 2) + maxflow (default:None): if given, normalize the flow by its value, otherwise by the flow norm + maxmaxflow (default:None): if given, normalize the flow by the max of its value and the flow norm + ---- OUTPUT ---- + an np.array of shape (height x width x 3) of type uint8 containing a color code of the flow + """ + h,w,n = flow.shape + # check size of flow + assert n == 2, "flow_utils.flowToColor(flow): flow must have 2 bands" + # fix unknown flow + unknown_idx = np.max(np.abs(flow),2)>UNKNOWN_THRESH + flow[unknown_idx] = 0.0 + # compute max flow if needed + if maxflow is None: + maxflow = flowMaxNorm(flow) + if maxmaxflow is not None: + maxflow = min(maxmaxflow, maxflow) + # normalize flow + eps = np.spacing(1) # minimum positive float value to avoid division by 0 + # compute the flow + img = _computeColor(flow/(maxflow+eps), saturate=saturate) + # put black pixels in unknown location + img[ np.tile( unknown_idx[:,:,np.newaxis],[1,1,3]) ] = 0.0 + return img + +def flowMaxNorm(flow): + """ + flow_utils.flowMaxNorm(flow): return the maximum of the l2-norm of the given flow + ---- PARAMETERS ---- + flow: the flow + + ---- OUTPUT ---- + a float containing the maximum of the l2-norm of the flow + """ + return np.max( np.sqrt( np.sum( np.square( flow ) , 2) ) ) + +def _computeColor(flow, saturate=True): + """ + flow_utils._computeColor(flow): compute color codes for the flow field flow + + ---- PARAMETERS ---- + flow: np.array of dimension (height x width x 2) containing the flow to display + ---- OUTPUTS ---- + an np.array of dimension (height x width x 3) containing the color conversion of the flow + """ + # set nan to 0 + nanidx = np.isnan(flow[:,:,0]) + flow[nanidx] = 0.0 + + # colorwheel + ncols = RY + YG + GC + CB + BM + MR + nchans = 3 + colorwheel = np.zeros((ncols,nchans),'uint8') + col = 0; + #RY + colorwheel[:RY,0] = 255 + colorwheel[:RY,1] = [(255*i) // RY for i in range(RY)] + col += RY + # YG + colorwheel[col:col+YG,0] = [255 - (255*i) // YG for i in range(YG)] + colorwheel[col:col+YG,1] = 255 + col += YG + # GC + colorwheel[col:col+GC,1] = 255 + colorwheel[col:col+GC,2] = [(255*i) // GC for i in range(GC)] + col += GC + # CB + colorwheel[col:col+CB,1] = [255 - (255*i) // CB for i in range(CB)] + colorwheel[col:col+CB,2] = 255 + col += CB + # BM + colorwheel[col:col+BM,0] = [(255*i) // BM for i in range(BM)] + colorwheel[col:col+BM,2] = 255 + col += BM + # MR + colorwheel[col:col+MR,0] = 255 + colorwheel[col:col+MR,2] = [255 - (255*i) // MR for i in range(MR)] + + # compute utility variables + rad = np.sqrt( np.sum( np.square(flow) , 2) ) # magnitude + a = np.arctan2( -flow[:,:,1] , -flow[:,:,0]) / np.pi # angle + fk = (a+1)/2 * (ncols-1) # map [-1,1] to [0,ncols-1] + k0 = np.floor(fk).astype('int') + k1 = k0+1 + k1[k1==ncols] = 0 + f = fk-k0 + + if not saturate: + rad = np.minimum(rad,1) + + # compute the image + img = np.zeros( (flow.shape[0],flow.shape[1],nchans), 'uint8' ) + for i in range(nchans): + tmp = colorwheel[:,i].astype('float') + col0 = tmp[k0]/255 + col1 = tmp[k1]/255 + col = (1-f)*col0 + f*col1 + idx = (rad <= 1) + col[idx] = 1-rad[idx]*(1-col[idx]) # increase saturation with radius + col[~idx] *= 0.75 # out of range + img[:,:,i] = (255*col*(1-nanidx.astype('float'))).astype('uint8') + + return img + +# flow dataset getter + +def get_train_dataset_flow(dataset_str, augmentor=True, crop_size=None): + dataset_str = dataset_str.replace('(','Dataset(') + if augmentor: + dataset_str = dataset_str.replace(')',', augmentor=True)') + if crop_size is not None: + dataset_str = dataset_str.replace(')',', crop_size={:s})'.format(str(crop_size))) + return eval(dataset_str) + +def get_test_datasets_flow(dataset_str): + dataset_str = dataset_str.replace('(','Dataset(') + return [eval(s) for s in dataset_str.split('+')] \ No newline at end of file diff --git a/dynamic_predictor/croco/stereoflow/datasets_stereo.py b/dynamic_predictor/croco/stereoflow/datasets_stereo.py new file mode 100644 index 0000000000000000000000000000000000000000..dbdf841a6650afa71ae5782702902c79eba31a5c --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/datasets_stereo.py @@ -0,0 +1,674 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# Dataset structure for stereo +# -------------------------------------------------------- + +import sys, os +import os.path as osp +import pickle +import numpy as np +from PIL import Image +import json +import h5py +from glob import glob +import cv2 + +import torch +from torch.utils import data + +from .augmentor import StereoAugmentor + + + +dataset_to_root = { + 'CREStereo': './data/stereoflow//crenet_stereo_trainset/stereo_trainset/crestereo/', + 'SceneFlow': './data/stereoflow//SceneFlow/', + 'ETH3DLowRes': './data/stereoflow/eth3d_lowres/', + 'Booster': './data/stereoflow/booster_gt/', + 'Middlebury2021': './data/stereoflow/middlebury/2021/data/', + 'Middlebury2014': './data/stereoflow/middlebury/2014/', + 'Middlebury2006': './data/stereoflow/middlebury/2006/', + 'Middlebury2005': './data/stereoflow/middlebury/2005/train/', + 'MiddleburyEval3': './data/stereoflow/middlebury/MiddEval3/', + 'Spring': './data/stereoflow/spring/', + 'Kitti15': './data/stereoflow/kitti-stereo-2015/', + 'Kitti12': './data/stereoflow/kitti-stereo-2012/', +} +cache_dir = "./data/stereoflow/datasets_stereo_cache/" + + +in1k_mean = torch.tensor([0.485, 0.456, 0.406]).view(3,1,1) +in1k_std = torch.tensor([0.229, 0.224, 0.225]).view(3,1,1) +def img_to_tensor(img): + img = torch.from_numpy(img).permute(2, 0, 1).float() / 255. + img = (img-in1k_mean)/in1k_std + return img +def disp_to_tensor(disp): + return torch.from_numpy(disp)[None,:,:] + +class StereoDataset(data.Dataset): + + def __init__(self, split, augmentor=False, crop_size=None, totensor=True): + self.split = split + if not augmentor: assert crop_size is None + if crop_size: assert augmentor + self.crop_size = crop_size + self.augmentor_str = augmentor + self.augmentor = StereoAugmentor(crop_size) if augmentor else None + self.totensor = totensor + self.rmul = 1 # keep track of rmul + self.has_constant_resolution = True # whether the dataset has constant resolution or not (=> don't use batch_size>1 at test time) + self._prepare_data() + self._load_or_build_cache() + + def prepare_data(self): + """ + to be defined for each dataset + """ + raise NotImplementedError + + def __len__(self): + return len(self.pairnames) + + def __getitem__(self, index): + pairname = self.pairnames[index] + + # get filenames + Limgname = self.pairname_to_Limgname(pairname) + Rimgname = self.pairname_to_Rimgname(pairname) + Ldispname = self.pairname_to_Ldispname(pairname) if self.pairname_to_Ldispname is not None else None + + # load images and disparities + Limg = _read_img(Limgname) + Rimg = _read_img(Rimgname) + disp = self.load_disparity(Ldispname) if Ldispname is not None else None + + # sanity check + if disp is not None: assert np.all(disp>0) or self.name=="Spring", (self.name, pairname, Ldispname) + + # apply augmentations + if self.augmentor is not None: + Limg, Rimg, disp = self.augmentor(Limg, Rimg, disp, self.name) + + if self.totensor: + Limg = img_to_tensor(Limg) + Rimg = img_to_tensor(Rimg) + if disp is None: + disp = torch.tensor([]) # to allow dataloader batching with default collate_gn + else: + disp = disp_to_tensor(disp) + + return Limg, Rimg, disp, str(pairname) + + def __rmul__(self, v): + self.rmul *= v + self.pairnames = v * self.pairnames + return self + + def __str__(self): + return f'{self.__class__.__name__}_{self.split}' + + def __repr__(self): + s = f'{self.__class__.__name__}(split={self.split}, augmentor={self.augmentor_str}, crop_size={str(self.crop_size)}, totensor={self.totensor})' + if self.rmul==1: + s+=f'\n\tnum pairs: {len(self.pairnames)}' + else: + s+=f'\n\tnum pairs: {len(self.pairnames)} ({len(self.pairnames)//self.rmul}x{self.rmul})' + return s + + def _set_root(self): + self.root = dataset_to_root[self.name] + assert os.path.isdir(self.root), f"could not find root directory for dataset {self.name}: {self.root}" + + def _load_or_build_cache(self): + cache_file = osp.join(cache_dir, self.name+'.pkl') + if osp.isfile(cache_file): + with open(cache_file, 'rb') as fid: + self.pairnames = pickle.load(fid)[self.split] + else: + tosave = self._build_cache() + os.makedirs(cache_dir, exist_ok=True) + with open(cache_file, 'wb') as fid: + pickle.dump(tosave, fid) + self.pairnames = tosave[self.split] + +class CREStereoDataset(StereoDataset): + + def _prepare_data(self): + self.name = 'CREStereo' + self._set_root() + assert self.split in ['train'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname+'_left.jpg') + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname+'_right.jpg') + self.pairname_to_Ldispname = lambda pairname: osp.join(self.root, pairname+'_left.disp.png') + self.pairname_to_str = lambda pairname: pairname + self.load_disparity = _read_crestereo_disp + + + def _build_cache(self): + allpairs = [s+'/'+f[:-len('_left.jpg')] for s in sorted(os.listdir(self.root)) for f in sorted(os.listdir(self.root+'/'+s)) if f.endswith('_left.jpg')] + assert len(allpairs)==200000, "incorrect parsing of pairs in CreStereo" + tosave = {'train': allpairs} + return tosave + +class SceneFlowDataset(StereoDataset): + + def _prepare_data(self): + self.name = "SceneFlow" + self._set_root() + assert self.split in ['train_finalpass','train_cleanpass','train_allpass','test_finalpass','test_cleanpass','test_allpass','test1of100_cleanpass','test1of100_finalpass'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname) + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname).replace('/left/','/right/') + self.pairname_to_Ldispname = lambda pairname: osp.join(self.root, pairname).replace('/frames_finalpass/','/disparity/').replace('/frames_cleanpass/','/disparity/')[:-4]+'.pfm' + self.pairname_to_str = lambda pairname: pairname[:-4] + self.load_disparity = _read_sceneflow_disp + + def _build_cache(self): + trainpairs = [] + # driving + pairs = sorted(glob(self.root+'Driving/frames_finalpass/*/*/*/left/*.png')) + pairs = list(map(lambda x: x[len(self.root):], pairs)) + assert len(pairs) == 4400, "incorrect parsing of pairs in SceneFlow" + trainpairs += pairs + # monkaa + pairs = sorted(glob(self.root+'Monkaa/frames_finalpass/*/left/*.png')) + pairs = list(map(lambda x: x[len(self.root):], pairs)) + assert len(pairs) == 8664, "incorrect parsing of pairs in SceneFlow" + trainpairs += pairs + # flyingthings + pairs = sorted(glob(self.root+'FlyingThings/frames_finalpass/TRAIN/*/*/left/*.png')) + pairs = list(map(lambda x: x[len(self.root):], pairs)) + assert len(pairs) == 22390, "incorrect parsing of pairs in SceneFlow" + trainpairs += pairs + assert len(trainpairs) == 35454, "incorrect parsing of pairs in SceneFlow" + testpairs = sorted(glob(self.root+'FlyingThings/frames_finalpass/TEST/*/*/left/*.png')) + testpairs = list(map(lambda x: x[len(self.root):], testpairs)) + assert len(testpairs) == 4370, "incorrect parsing of pairs in SceneFlow" + test1of100pairs = testpairs[::100] + assert len(test1of100pairs) == 44, "incorrect parsing of pairs in SceneFlow" + # all + tosave = {'train_finalpass': trainpairs, + 'train_cleanpass': list(map(lambda x: x.replace('frames_finalpass','frames_cleanpass'), trainpairs)), + 'test_finalpass': testpairs, + 'test_cleanpass': list(map(lambda x: x.replace('frames_finalpass','frames_cleanpass'), testpairs)), + 'test1of100_finalpass': test1of100pairs, + 'test1of100_cleanpass': list(map(lambda x: x.replace('frames_finalpass','frames_cleanpass'), test1of100pairs)), + } + tosave['train_allpass'] = tosave['train_finalpass']+tosave['train_cleanpass'] + tosave['test_allpass'] = tosave['test_finalpass']+tosave['test_cleanpass'] + return tosave + +class Md21Dataset(StereoDataset): + + def _prepare_data(self): + self.name = "Middlebury2021" + self._set_root() + assert self.split in ['train','subtrain','subval'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname) + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname.replace('/im0','/im1')) + self.pairname_to_Ldispname = lambda pairname: osp.join(self.root, pairname.split('/')[0], 'disp0.pfm') + self.pairname_to_str = lambda pairname: pairname[:-4] + self.load_disparity = _read_middlebury_disp + + def _build_cache(self): + seqs = sorted(os.listdir(self.root)) + trainpairs = [] + for s in seqs: + #trainpairs += [s+'/im0.png'] # we should remove it, it is included as such in other lightings + trainpairs += [s+'/ambient/'+b+'/'+a for b in sorted(os.listdir(osp.join(self.root,s,'ambient'))) for a in sorted(os.listdir(osp.join(self.root,s,'ambient',b))) if a.startswith('im0')] + assert len(trainpairs)==355 + subtrainpairs = [p for p in trainpairs if any(p.startswith(s+'/') for s in seqs[:-2])] + subvalpairs = [p for p in trainpairs if any(p.startswith(s+'/') for s in seqs[-2:])] + assert len(subtrainpairs)==335 and len(subvalpairs)==20, "incorrect parsing of pairs in Middlebury 2021" + tosave = {'train': trainpairs, 'subtrain': subtrainpairs, 'subval': subvalpairs} + return tosave + +class Md14Dataset(StereoDataset): + + def _prepare_data(self): + self.name = "Middlebury2014" + self._set_root() + assert self.split in ['train','subtrain','subval'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, osp.dirname(pairname), 'im0.png') + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname) + self.pairname_to_Ldispname = lambda pairname: osp.join(self.root, osp.dirname(pairname), 'disp0.pfm') + self.pairname_to_str = lambda pairname: pairname[:-4] + self.load_disparity = _read_middlebury_disp + self.has_constant_resolution = False + + def _build_cache(self): + seqs = sorted(os.listdir(self.root)) + trainpairs = [] + for s in seqs: + trainpairs += [s+'/im1.png',s+'/im1E.png',s+'/im1L.png'] + assert len(trainpairs)==138 + valseqs = ['Umbrella-imperfect','Vintage-perfect'] + assert all(s in seqs for s in valseqs) + subtrainpairs = [p for p in trainpairs if not any(p.startswith(s+'/') for s in valseqs)] + subvalpairs = [p for p in trainpairs if any(p.startswith(s+'/') for s in valseqs)] + assert len(subtrainpairs)==132 and len(subvalpairs)==6, "incorrect parsing of pairs in Middlebury 2014" + tosave = {'train': trainpairs, 'subtrain': subtrainpairs, 'subval': subvalpairs} + return tosave + +class Md06Dataset(StereoDataset): + + def _prepare_data(self): + self.name = "Middlebury2006" + self._set_root() + assert self.split in ['train','subtrain','subval'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname) + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, osp.dirname(pairname), 'view5.png') + self.pairname_to_Ldispname = lambda pairname: osp.join(self.root, pairname.split('/')[0], 'disp1.png') + self.load_disparity = _read_middlebury20052006_disp + self.has_constant_resolution = False + + def _build_cache(self): + seqs = sorted(os.listdir(self.root)) + trainpairs = [] + for s in seqs: + for i in ['Illum1','Illum2','Illum3']: + for e in ['Exp0','Exp1','Exp2']: + trainpairs.append(osp.join(s,i,e,'view1.png')) + assert len(trainpairs)==189 + valseqs = ['Rocks1','Wood2'] + assert all(s in seqs for s in valseqs) + subtrainpairs = [p for p in trainpairs if not any(p.startswith(s+'/') for s in valseqs)] + subvalpairs = [p for p in trainpairs if any(p.startswith(s+'/') for s in valseqs)] + assert len(subtrainpairs)==171 and len(subvalpairs)==18, "incorrect parsing of pairs in Middlebury 2006" + tosave = {'train': trainpairs, 'subtrain': subtrainpairs, 'subval': subvalpairs} + return tosave + +class Md05Dataset(StereoDataset): + + def _prepare_data(self): + self.name = "Middlebury2005" + self._set_root() + assert self.split in ['train','subtrain','subval'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname) + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, osp.dirname(pairname), 'view5.png') + self.pairname_to_Ldispname = lambda pairname: osp.join(self.root, pairname.split('/')[0], 'disp1.png') + self.pairname_to_str = lambda pairname: pairname[:-4] + self.load_disparity = _read_middlebury20052006_disp + + def _build_cache(self): + seqs = sorted(os.listdir(self.root)) + trainpairs = [] + for s in seqs: + for i in ['Illum1','Illum2','Illum3']: + for e in ['Exp0','Exp1','Exp2']: + trainpairs.append(osp.join(s,i,e,'view1.png')) + assert len(trainpairs)==54, "incorrect parsing of pairs in Middlebury 2005" + valseqs = ['Reindeer'] + assert all(s in seqs for s in valseqs) + subtrainpairs = [p for p in trainpairs if not any(p.startswith(s+'/') for s in valseqs)] + subvalpairs = [p for p in trainpairs if any(p.startswith(s+'/') for s in valseqs)] + assert len(subtrainpairs)==45 and len(subvalpairs)==9, "incorrect parsing of pairs in Middlebury 2005" + tosave = {'train': trainpairs, 'subtrain': subtrainpairs, 'subval': subvalpairs} + return tosave + +class MdEval3Dataset(StereoDataset): + + def _prepare_data(self): + self.name = "MiddleburyEval3" + self._set_root() + assert self.split in [s+'_'+r for s in ['train','subtrain','subval','test','all'] for r in ['full','half','quarter']] + if self.split.endswith('_full'): + self.root = self.root.replace('/MiddEval3','/MiddEval3_F') + elif self.split.endswith('_half'): + self.root = self.root.replace('/MiddEval3','/MiddEval3_H') + else: + assert self.split.endswith('_quarter') + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname, 'im0.png') + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname, 'im1.png') + self.pairname_to_Ldispname = lambda pairname: None if pairname.startswith('test') else osp.join(self.root, pairname, 'disp0GT.pfm') + self.pairname_to_str = lambda pairname: pairname + self.load_disparity = _read_middlebury_disp + # for submission only + self.submission_methodname = "CroCo-Stereo" + self.submission_sresolution = 'F' if self.split.endswith('_full') else ('H' if self.split.endswith('_half') else 'Q') + + def _build_cache(self): + trainpairs = ['train/'+s for s in sorted(os.listdir(self.root+'train/'))] + testpairs = ['test/'+s for s in sorted(os.listdir(self.root+'test/'))] + subvalpairs = trainpairs[-1:] + subtrainpairs = trainpairs[:-1] + allpairs = trainpairs+testpairs + assert len(trainpairs)==15 and len(testpairs)==15 and len(subvalpairs)==1 and len(subtrainpairs)==14 and len(allpairs)==30, "incorrect parsing of pairs in Middlebury Eval v3" + tosave = {} + for r in ['full','half','quarter']: + tosave.update(**{'train_'+r: trainpairs, 'subtrain_'+r: subtrainpairs, 'subval_'+r: subvalpairs, 'test_'+r: testpairs, 'all_'+r: allpairs}) + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==2 + assert prediction.dtype==np.float32 + outfile = os.path.join(outdir, pairname.split('/')[0].replace('train','training')+self.submission_sresolution, pairname.split('/')[1], 'disp0'+self.submission_methodname+'.pfm') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + writePFM(outfile, prediction) + timefile = os.path.join( os.path.dirname(outfile), "time"+self.submission_methodname+'.txt') + with open(timefile, 'w') as fid: + fid.write(str(time)) + + def finalize_submission(self, outdir): + cmd = f'cd {outdir}/; zip -r "{self.submission_methodname}.zip" .' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at {outdir}/{self.submission_methodname}.zip') + +class ETH3DLowResDataset(StereoDataset): + + def _prepare_data(self): + self.name = "ETH3DLowRes" + self._set_root() + assert self.split in ['train','test','subtrain','subval','all'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname, 'im0.png') + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname, 'im1.png') + self.pairname_to_Ldispname = None if self.split=='test' else lambda pairname: None if pairname.startswith('test/') else osp.join(self.root, pairname.replace('train/','train_gt/'), 'disp0GT.pfm') + self.pairname_to_str = lambda pairname: pairname + self.load_disparity = _read_eth3d_disp + self.has_constant_resolution = False + + def _build_cache(self): + trainpairs = ['train/' + s for s in sorted(os.listdir(self.root+'train/'))] + testpairs = ['test/' + s for s in sorted(os.listdir(self.root+'test/'))] + assert len(trainpairs) == 27 and len(testpairs) == 20, "incorrect parsing of pairs in ETH3D Low Res" + subvalpairs = ['train/delivery_area_3s','train/electro_3l','train/playground_3l'] + assert all(p in trainpairs for p in subvalpairs) + subtrainpairs = [p for p in trainpairs if not p in subvalpairs] + assert len(subvalpairs)==3 and len(subtrainpairs)==24, "incorrect parsing of pairs in ETH3D Low Res" + tosave = {'train': trainpairs, 'test': testpairs, 'subtrain': subtrainpairs, 'subval': subvalpairs, 'all': trainpairs+testpairs} + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==2 + assert prediction.dtype==np.float32 + outfile = os.path.join(outdir, 'low_res_two_view', pairname.split('/')[1]+'.pfm') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + writePFM(outfile, prediction) + timefile = outfile[:-4]+'.txt' + with open(timefile, 'w') as fid: + fid.write('runtime '+str(time)) + + def finalize_submission(self, outdir): + cmd = f'cd {outdir}/; zip -r "eth3d_low_res_two_view_results.zip" low_res_two_view' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at {outdir}/eth3d_low_res_two_view_results.zip') + +class BoosterDataset(StereoDataset): + + def _prepare_data(self): + self.name = "Booster" + self._set_root() + assert self.split in ['train_balanced','test_balanced','subtrain_balanced','subval_balanced'] # we use only the balanced version + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname) + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname).replace('/camera_00/','/camera_02/') + self.pairname_to_Ldispname = lambda pairname: osp.join(self.root, osp.dirname(pairname), '../disp_00.npy') # same images with different colors, same gt per sequence + self.pairname_to_str = lambda pairname: pairname[:-4].replace('/camera_00/','/') + self.load_disparity = _read_booster_disp + + + def _build_cache(self): + trainseqs = sorted(os.listdir(self.root+'train/balanced')) + trainpairs = ['train/balanced/'+s+'/camera_00/'+imname for s in trainseqs for imname in sorted(os.listdir(self.root+'train/balanced/'+s+'/camera_00/'))] + testpairs = ['test/balanced/'+s+'/camera_00/'+imname for s in sorted(os.listdir(self.root+'test/balanced')) for imname in sorted(os.listdir(self.root+'test/balanced/'+s+'/camera_00/'))] + assert len(trainpairs) == 228 and len(testpairs) == 191 + subtrainpairs = [p for p in trainpairs if any(s in p for s in trainseqs[:-2])] + subvalpairs = [p for p in trainpairs if any(s in p for s in trainseqs[-2:])] + # warning: if we do validation split, we should split scenes!!! + tosave = {'train_balanced': trainpairs, 'test_balanced': testpairs, 'subtrain_balanced': subtrainpairs, 'subval_balanced': subvalpairs,} + return tosave + +class SpringDataset(StereoDataset): + + def _prepare_data(self): + self.name = "Spring" + self._set_root() + assert self.split in ['train', 'test', 'subtrain', 'subval'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname+'.png') + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname+'.png').replace('frame_right','').replace('frame_left','frame_right').replace('','frame_left') + self.pairname_to_Ldispname = lambda pairname: None if pairname.startswith('test') else osp.join(self.root, pairname+'.dsp5').replace('frame_left','disp1_left').replace('frame_right','disp1_right') + self.pairname_to_str = lambda pairname: pairname + self.load_disparity = _read_hdf5_disp + + def _build_cache(self): + trainseqs = sorted(os.listdir( osp.join(self.root,'train'))) + trainpairs = [osp.join('train',s,'frame_left',f[:-4]) for s in trainseqs for f in sorted(os.listdir(osp.join(self.root,'train',s,'frame_left')))] + testseqs = sorted(os.listdir( osp.join(self.root,'test'))) + testpairs = [osp.join('test',s,'frame_left',f[:-4]) for s in testseqs for f in sorted(os.listdir(osp.join(self.root,'test',s,'frame_left')))] + testpairs += [p.replace('frame_left','frame_right') for p in testpairs] + """maxnorm = {'0001': 32.88, '0002': 228.5, '0004': 298.2, '0005': 142.5, '0006': 113.6, '0007': 27.3, '0008': 554.5, '0009': 155.6, '0010': 126.1, '0011': 87.6, '0012': 303.2, '0013': 24.14, '0014': 82.56, '0015': 98.44, '0016': 156.9, '0017': 28.17, '0018': 21.03, '0020': 178.0, '0021': 58.06, '0022': 354.2, '0023': 8.79, '0024': 97.06, '0025': 55.16, '0026': 91.9, '0027': 156.6, '0030': 200.4, '0032': 58.66, '0033': 373.5, '0036': 149.4, '0037': 5.625, '0038': 37.0, '0039': 12.2, '0041': 453.5, '0043': 457.0, '0044': 379.5, '0045': 161.8, '0047': 105.44} # => let'use 0041""" + subtrainpairs = [p for p in trainpairs if p.split('/')[1]!='0041'] + subvalpairs = [p for p in trainpairs if p.split('/')[1]=='0041'] + assert len(trainpairs)==5000 and len(testpairs)==2000 and len(subtrainpairs)==4904 and len(subvalpairs)==96, "incorrect parsing of pairs in Spring" + tosave = {'train': trainpairs, 'test': testpairs, 'subtrain': subtrainpairs, 'subval': subvalpairs} + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==2 + assert prediction.dtype==np.float32 + outfile = os.path.join(outdir, pairname+'.dsp5').replace('frame_left','disp1_left').replace('frame_right','disp1_right') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + writeDsp5File(prediction, outfile) + + def finalize_submission(self, outdir): + assert self.split=='test' + exe = "{self.root}/disp1_subsampling" + if os.path.isfile(exe): + cmd = f'cd "{outdir}/test"; {exe} .' + print(cmd) + os.system(cmd) + else: + print('Could not find disp1_subsampling executable for submission.') + print('Please download it and run:') + print(f'cd "{outdir}/test"; .') + +class Kitti12Dataset(StereoDataset): + + def _prepare_data(self): + self.name = "Kitti12" + self._set_root() + assert self.split in ['train','test'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname+'_10.png') + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname.replace('/colored_0/','/colored_1/')+'_10.png') + self.pairname_to_Ldispname = None if self.split=='test' else lambda pairname: osp.join(self.root, pairname.replace('/colored_0/','/disp_occ/')+'_10.png') + self.pairname_to_str = lambda pairname: pairname.replace('/colored_0/','/') + self.load_disparity = _read_kitti_disp + + def _build_cache(self): + trainseqs = ["training/colored_0/%06d"%(i) for i in range(194)] + testseqs = ["testing/colored_0/%06d"%(i) for i in range(195)] + assert len(trainseqs)==194 and len(testseqs)==195, "incorrect parsing of pairs in Kitti12" + tosave = {'train': trainseqs, 'test': testseqs} + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==2 + assert prediction.dtype==np.float32 + outfile = os.path.join(outdir, pairname.split('/')[-1]+'_10.png') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + img = (prediction * 256).astype('uint16') + Image.fromarray(img).save(outfile) + + def finalize_submission(self, outdir): + assert self.split=='test' + cmd = f'cd {outdir}/; zip -r "kitti12_results.zip" .' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at {outdir}/kitti12_results.zip') + +class Kitti15Dataset(StereoDataset): + + def _prepare_data(self): + self.name = "Kitti15" + self._set_root() + assert self.split in ['train','subtrain','subval','test'] + self.pairname_to_Limgname = lambda pairname: osp.join(self.root, pairname+'_10.png') + self.pairname_to_Rimgname = lambda pairname: osp.join(self.root, pairname.replace('/image_2/','/image_3/')+'_10.png') + self.pairname_to_Ldispname = None if self.split=='test' else lambda pairname: osp.join(self.root, pairname.replace('/image_2/','/disp_occ_0/')+'_10.png') + self.pairname_to_str = lambda pairname: pairname.replace('/image_2/','/') + self.load_disparity = _read_kitti_disp + + def _build_cache(self): + trainseqs = ["training/image_2/%06d"%(i) for i in range(200)] + subtrainseqs = trainseqs[:-5] + subvalseqs = trainseqs[-5:] + testseqs = ["testing/image_2/%06d"%(i) for i in range(200)] + assert len(trainseqs)==200 and len(subtrainseqs)==195 and len(subvalseqs)==5 and len(testseqs)==200, "incorrect parsing of pairs in Kitti15" + tosave = {'train': trainseqs, 'subtrain': subtrainseqs, 'subval': subvalseqs, 'test': testseqs} + return tosave + + def submission_save_pairname(self, pairname, prediction, outdir, time): + assert prediction.ndim==2 + assert prediction.dtype==np.float32 + outfile = os.path.join(outdir, 'disp_0', pairname.split('/')[-1]+'_10.png') + os.makedirs( os.path.dirname(outfile), exist_ok=True) + img = (prediction * 256).astype('uint16') + Image.fromarray(img).save(outfile) + + def finalize_submission(self, outdir): + assert self.split=='test' + cmd = f'cd {outdir}/; zip -r "kitti15_results.zip" disp_0' + print(cmd) + os.system(cmd) + print(f'Done. Submission file at {outdir}/kitti15_results.zip') + + +### auxiliary functions + +def _read_img(filename): + # convert to RGB for scene flow finalpass data + img = np.asarray(Image.open(filename).convert('RGB')) + return img + +def _read_booster_disp(filename): + disp = np.load(filename) + disp[disp==0.0] = np.inf + return disp + +def _read_png_disp(filename, coef=1.0): + disp = np.asarray(Image.open(filename)) + disp = disp.astype(np.float32) / coef + disp[disp==0.0] = np.inf + return disp + +def _read_pfm_disp(filename): + disp = np.ascontiguousarray(_read_pfm(filename)[0]) + disp[disp<=0] = np.inf # eg /nfs/data/ffs-3d/datasets/middlebury/2014/Shopvac-imperfect/disp0.pfm + return disp + +def _read_npy_disp(filename): + return np.load(filename) + +def _read_crestereo_disp(filename): return _read_png_disp(filename, coef=32.0) +def _read_middlebury20052006_disp(filename): return _read_png_disp(filename, coef=1.0) +def _read_kitti_disp(filename): return _read_png_disp(filename, coef=256.0) +_read_sceneflow_disp = _read_pfm_disp +_read_eth3d_disp = _read_pfm_disp +_read_middlebury_disp = _read_pfm_disp +_read_carla_disp = _read_pfm_disp +_read_tartanair_disp = _read_npy_disp + +def _read_hdf5_disp(filename): + disp = np.asarray(h5py.File(filename)['disparity']) + disp[np.isnan(disp)] = np.inf # make invalid values as +inf + #disp[disp==0.0] = np.inf # make invalid values as +inf + return disp.astype(np.float32) + +import re +def _read_pfm(file): + file = open(file, 'rb') + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header.decode("ascii") == 'PF': + color = True + elif header.decode("ascii") == 'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("ascii")) + if dim_match: + width, height = list(map(int, dim_match.groups())) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().decode("ascii").rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data, scale + +def writePFM(file, image, scale=1): + file = open(file, 'wb') + + color = None + + if image.dtype.name != 'float32': + raise Exception('Image dtype must be float32.') + + image = np.flipud(image) + + if len(image.shape) == 3 and image.shape[2] == 3: # color image + color = True + elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale + color = False + else: + raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') + + file.write('PF\n' if color else 'Pf\n'.encode()) + file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0])) + + endian = image.dtype.byteorder + + if endian == '<' or endian == '=' and sys.byteorder == 'little': + scale = -scale + + file.write('%f\n'.encode() % scale) + + image.tofile(file) + +def writeDsp5File(disp, filename): + with h5py.File(filename, "w") as f: + f.create_dataset("disparity", data=disp, compression="gzip", compression_opts=5) + + +# disp visualization + +def vis_disparity(disp, m=None, M=None): + if m is None: m = disp.min() + if M is None: M = disp.max() + disp_vis = (disp - m) / (M-m) * 255.0 + disp_vis = disp_vis.astype("uint8") + disp_vis = cv2.applyColorMap(disp_vis, cv2.COLORMAP_INFERNO) + return disp_vis + +# dataset getter + +def get_train_dataset_stereo(dataset_str, augmentor=True, crop_size=None): + dataset_str = dataset_str.replace('(','Dataset(') + if augmentor: + dataset_str = dataset_str.replace(')',', augmentor=True)') + if crop_size is not None: + dataset_str = dataset_str.replace(')',', crop_size={:s})'.format(str(crop_size))) + return eval(dataset_str) + +def get_test_datasets_stereo(dataset_str): + dataset_str = dataset_str.replace('(','Dataset(') + return [eval(s) for s in dataset_str.split('+')] \ No newline at end of file diff --git a/dynamic_predictor/croco/stereoflow/download_model.sh b/dynamic_predictor/croco/stereoflow/download_model.sh new file mode 100644 index 0000000000000000000000000000000000000000..533119609108c5ec3c22ff79b10e9215c1ac5098 --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/download_model.sh @@ -0,0 +1,12 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +model=$1 +outfile="stereoflow_models/${model}" +if [[ ! -f $outfile ]] +then + mkdir -p stereoflow_models/; + wget https://download.europe.naverlabs.com/ComputerVision/CroCo/StereoFlow_models/$1 -P stereoflow_models/; +else + echo "Model ${model} already downloaded in ${outfile}." +fi \ No newline at end of file diff --git a/dynamic_predictor/croco/stereoflow/engine.py b/dynamic_predictor/croco/stereoflow/engine.py new file mode 100644 index 0000000000000000000000000000000000000000..c057346b99143bf6b9c4666a58215b2b91aca7a6 --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/engine.py @@ -0,0 +1,280 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# Main function for training one epoch or testing +# -------------------------------------------------------- + +import math +import sys +from typing import Iterable +import numpy as np +import torch +import torchvision + +from utils import misc as misc + + +def split_prediction_conf(predictions, with_conf=False): + if not with_conf: + return predictions, None + conf = predictions[:,-1:,:,:] + predictions = predictions[:,:-1,:,:] + return predictions, conf + +def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, metrics: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, + log_writer=None, print_freq = 20, + args=None): + model.train(True) + metric_logger = misc.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + + accum_iter = args.accum_iter + + optimizer.zero_grad() + + details = {} + + if log_writer is not None: + print('log_dir: {}'.format(log_writer.log_dir)) + + if args.img_per_epoch: + iter_per_epoch = args.img_per_epoch // args.batch_size + int(args.img_per_epoch % args.batch_size > 0) + assert len(data_loader) >= iter_per_epoch, 'Dataset is too small for so many iterations' + len_data_loader = iter_per_epoch + else: + len_data_loader, iter_per_epoch = len(data_loader), None + + for data_iter_step, (image1, image2, gt, pairname) in enumerate(metric_logger.log_every(data_loader, print_freq, header, max_iter=iter_per_epoch)): + + image1 = image1.to(device, non_blocking=True) + image2 = image2.to(device, non_blocking=True) + gt = gt.to(device, non_blocking=True) + + # we use a per iteration (instead of per epoch) lr scheduler + if data_iter_step % accum_iter == 0: + misc.adjust_learning_rate(optimizer, data_iter_step / len_data_loader + epoch, args) + + with torch.cuda.amp.autocast(enabled=bool(args.amp)): + prediction = model(image1, image2) + prediction, conf = split_prediction_conf(prediction, criterion.with_conf) + batch_metrics = metrics(prediction.detach(), gt) + loss = criterion(prediction, gt) if conf is None else criterion(prediction, gt, conf) + + loss_value = loss.item() + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss /= accum_iter + loss_scaler(loss, optimizer, parameters=model.parameters(), + update_grad=(data_iter_step + 1) % accum_iter == 0) + if (data_iter_step + 1) % accum_iter == 0: + optimizer.zero_grad() + + torch.cuda.synchronize() + + metric_logger.update(loss=loss_value) + for k,v in batch_metrics.items(): + metric_logger.update(**{k: v.item()}) + lr = optimizer.param_groups[0]["lr"] + metric_logger.update(lr=lr) + + #if args.dsitributed: loss_value_reduce = misc.all_reduce_mean(loss_value) + time_to_log = ((data_iter_step + 1) % (args.tboard_log_step * accum_iter) == 0 or data_iter_step == len_data_loader-1) + loss_value_reduce = misc.all_reduce_mean(loss_value) + if log_writer is not None and time_to_log: + epoch_1000x = int((data_iter_step / len_data_loader + epoch) * 1000) + # We use epoch_1000x as the x-axis in tensorboard. This calibrates different curves when batch size changes. + log_writer.add_scalar('train/loss', loss_value_reduce, epoch_1000x) + log_writer.add_scalar('lr', lr, epoch_1000x) + for k,v in batch_metrics.items(): + log_writer.add_scalar('train/'+k, v.item(), epoch_1000x) + + # gather the stats from all processes + #if args.distributed: metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +@torch.no_grad() +def validate_one_epoch(model: torch.nn.Module, + criterion: torch.nn.Module, + metrics: torch.nn.Module, + data_loaders: list[Iterable], + device: torch.device, + epoch: int, + log_writer=None, + args=None): + + model.eval() + metric_loggers = [] + header = 'Epoch: [{}]'.format(epoch) + print_freq = 20 + + conf_mode = args.tile_conf_mode + crop = args.crop + + if log_writer is not None: + print('log_dir: {}'.format(log_writer.log_dir)) + + results = {} + dnames = [] + image1, image2, gt, prediction = None, None, None, None + for didx, data_loader in enumerate(data_loaders): + dname = str(data_loader.dataset) + dnames.append(dname) + metric_loggers.append(misc.MetricLogger(delimiter=" ")) + for data_iter_step, (image1, image2, gt, pairname) in enumerate(metric_loggers[didx].log_every(data_loader, print_freq, header)): + image1 = image1.to(device, non_blocking=True) + image2 = image2.to(device, non_blocking=True) + gt = gt.to(device, non_blocking=True) + if dname.startswith('Spring'): + assert gt.size(2)==image1.size(2)*2 and gt.size(3)==image1.size(3)*2 + gt = (gt[:,:,0::2,0::2] + gt[:,:,0::2,1::2] + gt[:,:,1::2,0::2] + gt[:,:,1::2,1::2] ) / 4.0 # we approximate the gt based on the 2x upsampled ones + + with torch.inference_mode(): + prediction, tiled_loss, c = tiled_pred(model, criterion, image1, image2, gt, conf_mode=conf_mode, overlap=args.val_overlap, crop=crop, with_conf=criterion.with_conf) + batch_metrics = metrics(prediction.detach(), gt) + loss = criterion(prediction.detach(), gt) if not criterion.with_conf else criterion(prediction.detach(), gt, c) + loss_value = loss.item() + metric_loggers[didx].update(loss_tiled=tiled_loss.item()) + metric_loggers[didx].update(**{f'loss': loss_value}) + for k,v in batch_metrics.items(): + metric_loggers[didx].update(**{dname+'_' + k: v.item()}) + + results = {k: meter.global_avg for ml in metric_loggers for k, meter in ml.meters.items()} + if len(dnames)>1: + for k in batch_metrics.keys(): + results['AVG_'+k] = sum(results[dname+'_'+k] for dname in dnames) / len(dnames) + + if log_writer is not None : + epoch_1000x = int((1 + epoch) * 1000) + for k,v in results.items(): + log_writer.add_scalar('val/'+k, v, epoch_1000x) + + print("Averaged stats:", results) + return results + +import torch.nn.functional as F +def _resize_img(img, new_size): + return F.interpolate(img, size=new_size, mode='bicubic', align_corners=False) +def _resize_stereo_or_flow(data, new_size): + assert data.ndim==4 + assert data.size(1) in [1,2] + scale_x = new_size[1]/float(data.size(3)) + out = F.interpolate(data, size=new_size, mode='bicubic', align_corners=False) + out[:,0,:,:] *= scale_x + if out.size(1)==2: + scale_y = new_size[0]/float(data.size(2)) + out[:,1,:,:] *= scale_y + print(scale_x, new_size, data.shape) + return out + + +@torch.no_grad() +def tiled_pred(model, criterion, img1, img2, gt, + overlap=0.5, bad_crop_thr=0.05, + downscale=False, crop=512, ret='loss', + conf_mode='conf_expsigmoid_10_5', with_conf=False, + return_time=False): + + # for each image, we are going to run inference on many overlapping patches + # then, all predictions will be weighted-averaged + if gt is not None: + B, C, H, W = gt.shape + else: + B, _, H, W = img1.shape + C = model.head.num_channels-int(with_conf) + win_height, win_width = crop[0], crop[1] + + # upscale to be larger than the crop + do_change_scale = H= window and 0 <= overlap < 1, (total, window, overlap) + num_windows = 1 + int(np.ceil( (total - window) / ((1-overlap) * window) )) + offsets = np.linspace(0, total-window, num_windows).round().astype(int) + yield from (slice(x, x+window) for x in offsets) + +def _crop(img, sy, sx): + B, THREE, H, W = img.shape + if 0 <= sy.start and sy.stop <= H and 0 <= sx.start and sx.stop <= W: + return img[:,:,sy,sx] + l, r = max(0,-sx.start), max(0,sx.stop-W) + t, b = max(0,-sy.start), max(0,sy.stop-H) + img = torch.nn.functional.pad(img, (l,r,t,b), mode='constant') + return img[:, :, slice(sy.start+t,sy.stop+t), slice(sx.start+l,sx.stop+l)] \ No newline at end of file diff --git a/dynamic_predictor/croco/stereoflow/test.py b/dynamic_predictor/croco/stereoflow/test.py new file mode 100644 index 0000000000000000000000000000000000000000..0248e56664c769752595af251e1eadcfa3a479d9 --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/test.py @@ -0,0 +1,216 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# Main test function +# -------------------------------------------------------- + +import os +import argparse +import pickle +from PIL import Image +import numpy as np +from tqdm import tqdm + +import torch +from torch.utils.data import DataLoader + +import utils.misc as misc +from models.croco_downstream import CroCoDownstreamBinocular +from models.head_downstream import PixelwiseTaskWithDPT + +from stereoflow.criterion import * +from stereoflow.datasets_stereo import get_test_datasets_stereo +from stereoflow.datasets_flow import get_test_datasets_flow +from stereoflow.engine import tiled_pred + +from stereoflow.datasets_stereo import vis_disparity +from stereoflow.datasets_flow import flowToColor + +def get_args_parser(): + parser = argparse.ArgumentParser('Test CroCo models on stereo/flow', add_help=False) + # important argument + parser.add_argument('--model', required=True, type=str, help='Path to the model to evaluate') + parser.add_argument('--dataset', required=True, type=str, help="test dataset (there can be multiple dataset separated by a +)") + # tiling + parser.add_argument('--tile_conf_mode', type=str, default='', help='Weights for the tiling aggregation based on confidence (empty means use the formula from the loaded checkpoint') + parser.add_argument('--tile_overlap', type=float, default=0.7, help='overlap between tiles') + # save (it will automatically go to _/_) + parser.add_argument('--save', type=str, nargs='+', default=[], + help='what to save: \ + metrics (pickle file), \ + pred (raw prediction save as torch tensor), \ + visu (visualization in png of each prediction), \ + err10 (visualization in png of the error clamp at 10 for each prediction), \ + submission (submission file)') + # other (no impact) + parser.add_argument('--num_workers', default=4, type=int) + return parser + + +def _load_model_and_criterion(model_path, do_load_metrics, device): + print('loading model from', model_path) + assert os.path.isfile(model_path) + ckpt = torch.load(model_path, 'cpu') + + ckpt_args = ckpt['args'] + task = ckpt_args.task + tile_conf_mode = ckpt_args.tile_conf_mode + num_channels = {'stereo': 1, 'flow': 2}[task] + with_conf = eval(ckpt_args.criterion).with_conf + if with_conf: num_channels += 1 + print('head: PixelwiseTaskWithDPT()') + head = PixelwiseTaskWithDPT() + head.num_channels = num_channels + print('croco_args:', ckpt_args.croco_args) + model = CroCoDownstreamBinocular(head, **ckpt_args.croco_args) + msg = model.load_state_dict(ckpt['model'], strict=True) + model.eval() + model = model.to(device) + + if do_load_metrics: + if task=='stereo': + metrics = StereoDatasetMetrics().to(device) + else: + metrics = FlowDatasetMetrics().to(device) + else: + metrics = None + + return model, metrics, ckpt_args.crop, with_conf, task, tile_conf_mode + + +def _save_batch(pred, gt, pairnames, dataset, task, save, outdir, time, submission_dir=None): + + for i in range(len(pairnames)): + + pairname = eval(pairnames[i]) if pairnames[i].startswith('(') else pairnames[i] # unbatch pairname + fname = os.path.join(outdir, dataset.pairname_to_str(pairname)) + os.makedirs(os.path.dirname(fname), exist_ok=True) + + predi = pred[i,...] + if gt is not None: gti = gt[i,...] + + if 'pred' in save: + torch.save(predi.squeeze(0).cpu(), fname+'_pred.pth') + + if 'visu' in save: + if task=='stereo': + disparity = predi.permute((1,2,0)).squeeze(2).cpu().numpy() + m,M = None + if gt is not None: + mask = torch.isfinite(gti) + m = gt[mask].min() + M = gt[mask].max() + img_disparity = vis_disparity(disparity, m=m, M=M) + Image.fromarray(img_disparity).save(fname+'_pred.png') + else: + # normalize flowToColor according to the maxnorm of gt (or prediction if not available) + flowNorm = torch.sqrt(torch.sum( (gti if gt is not None else predi)**2, dim=0)).max().item() + imgflow = flowToColor(predi.permute((1,2,0)).cpu().numpy(), maxflow=flowNorm) + Image.fromarray(imgflow).save(fname+'_pred.png') + + if 'err10' in save: + assert gt is not None + L2err = torch.sqrt(torch.sum( (gti-predi)**2, dim=0)) + valid = torch.isfinite(gti[0,:,:]) + L2err[~valid] = 0.0 + L2err = torch.clamp(L2err, max=10.0) + red = (L2err*255.0/10.0).to(dtype=torch.uint8)[:,:,None] + zer = torch.zeros_like(red) + imgerr = torch.cat( (red,zer,zer), dim=2).cpu().numpy() + Image.fromarray(imgerr).save(fname+'_err10.png') + + if 'submission' in save: + assert submission_dir is not None + predi_np = predi.permute(1,2,0).squeeze(2).cpu().numpy() # transform into HxWx2 for flow or HxW for stereo + dataset.submission_save_pairname(pairname, predi_np, submission_dir, time) + +def main(args): + + # load the pretrained model and metrics + device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') + model, metrics, cropsize, with_conf, task, tile_conf_mode = _load_model_and_criterion(args.model, 'metrics' in args.save, device) + if args.tile_conf_mode=='': args.tile_conf_mode = tile_conf_mode + + # load the datasets + datasets = (get_test_datasets_stereo if task=='stereo' else get_test_datasets_flow)(args.dataset) + dataloaders = [DataLoader(dataset, batch_size=1, shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False) for dataset in datasets] + + # run + for i,dataloader in enumerate(dataloaders): + dataset = datasets[i] + dstr = args.dataset.split('+')[i] + + outdir = args.model+'_'+misc.filename(dstr) + if 'metrics' in args.save and len(args.save)==1: + fname = os.path.join(outdir, f'conf_{args.tile_conf_mode}_overlap_{args.tile_overlap}.pkl') + if os.path.isfile(fname) and len(args.save)==1: + print(' metrics already compute in '+fname) + with open(fname, 'rb') as fid: + results = pickle.load(fid) + for k,v in results.items(): + print('{:s}: {:.3f}'.format(k, v)) + continue + + if 'submission' in args.save: + dirname = f'submission_conf_{args.tile_conf_mode}_overlap_{args.tile_overlap}' + submission_dir = os.path.join(outdir, dirname) + else: + submission_dir = None + + print('') + print('saving {:s} in {:s}'.format('+'.join(args.save), outdir)) + print(repr(dataset)) + + if metrics is not None: + metrics.reset() + + for data_iter_step, (image1, image2, gt, pairnames) in enumerate(tqdm(dataloader)): + + do_flip = (task=='stereo' and dstr.startswith('Spring') and any("right" in p for p in pairnames)) # we flip the images and will flip the prediction after as we assume img1 is on the left + + image1 = image1.to(device, non_blocking=True) + image2 = image2.to(device, non_blocking=True) + gt = gt.to(device, non_blocking=True) if gt.numel()>0 else None # special case for test time + if do_flip: + assert all("right" in p for p in pairnames) + image1 = image1.flip(dims=[3]) # this is already the right frame, let's flip it + image2 = image2.flip(dims=[3]) + gt = gt # that is ok + + with torch.inference_mode(): + pred, _, _, time = tiled_pred(model, None, image1, image2, None if dataset.name=='Spring' else gt, conf_mode=args.tile_conf_mode, overlap=args.tile_overlap, crop=cropsize, with_conf=with_conf, return_time=True) + + if do_flip: + pred = pred.flip(dims=[3]) + + if metrics is not None: + metrics.add_batch(pred, gt) + + if any(k in args.save for k in ['pred','visu','err10','submission']): + _save_batch(pred, gt, pairnames, dataset, task, args.save, outdir, time, submission_dir=submission_dir) + + + # print + if metrics is not None: + results = metrics.get_results() + for k,v in results.items(): + print('{:s}: {:.3f}'.format(k, v)) + + # save if needed + if 'metrics' in args.save: + os.makedirs(os.path.dirname(fname), exist_ok=True) + with open(fname, 'wb') as fid: + pickle.dump(results, fid) + print('metrics saved in', fname) + + # finalize submission if needed + if 'submission' in args.save: + dataset.finalize_submission(submission_dir) + + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + main(args) \ No newline at end of file diff --git a/dynamic_predictor/croco/stereoflow/train.py b/dynamic_predictor/croco/stereoflow/train.py new file mode 100644 index 0000000000000000000000000000000000000000..91f2414ffbe5ecd547d31c0e2455478d402719d6 --- /dev/null +++ b/dynamic_predictor/croco/stereoflow/train.py @@ -0,0 +1,253 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). + +# -------------------------------------------------------- +# Main training function +# -------------------------------------------------------- + +import argparse +import datetime +import json +import numpy as np +import os +import sys +import time + +import torch +import torch.distributed as dist +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter +import torchvision.transforms as transforms +import torchvision.datasets as datasets +from torch.utils.data import DataLoader + +import utils +import utils.misc as misc +from utils.misc import NativeScalerWithGradNormCount as NativeScaler +from models.croco_downstream import CroCoDownstreamBinocular, croco_args_from_ckpt +from models.pos_embed import interpolate_pos_embed +from models.head_downstream import PixelwiseTaskWithDPT + +from stereoflow.datasets_stereo import get_train_dataset_stereo, get_test_datasets_stereo +from stereoflow.datasets_flow import get_train_dataset_flow, get_test_datasets_flow +from stereoflow.engine import train_one_epoch, validate_one_epoch +from stereoflow.criterion import * + + +def get_args_parser(): + # prepare subparsers + parser = argparse.ArgumentParser('Finetuning CroCo models on stereo or flow', add_help=False) + subparsers = parser.add_subparsers(title="Task (stereo or flow)", dest="task", required=True) + parser_stereo = subparsers.add_parser('stereo', help='Training stereo model') + parser_flow = subparsers.add_parser('flow', help='Training flow model') + def add_arg(name_or_flags, default=None, default_stereo=None, default_flow=None, **kwargs): + if default is not None: assert default_stereo is None and default_flow is None, "setting default makes default_stereo and default_flow disabled" + parser_stereo.add_argument(name_or_flags, default=default if default is not None else default_stereo, **kwargs) + parser_flow.add_argument(name_or_flags, default=default if default is not None else default_flow, **kwargs) + # output dir + add_arg('--output_dir', required=True, type=str, help='path where to save, if empty, automatically created') + # model + add_arg('--crop', type=int, nargs = '+', default_stereo=[352, 704], default_flow=[320, 384], help = "size of the random image crops used during training.") + add_arg('--pretrained', required=True, type=str, help="Load pretrained model (required as croco arguments come from there)") + # criterion + add_arg('--criterion', default_stereo='LaplacianLossBounded2()', default_flow='LaplacianLossBounded()', type=str, help='string to evaluate to get criterion') + add_arg('--bestmetric', default_stereo='avgerr', default_flow='EPE', type=str) + # dataset + add_arg('--dataset', type=str, required=True, help="training set") + # training + add_arg('--seed', default=0, type=int, help='seed') + add_arg('--batch_size', default_stereo=6, default_flow=8, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus') + add_arg('--epochs', default=32, type=int, help='number of training epochs') + add_arg('--img_per_epoch', type=int, default=None, help='Fix the number of images seen in an epoch (None means use all training pairs)') + add_arg('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)') + add_arg('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') + add_arg('--lr', type=float, default_stereo=3e-5, default_flow=2e-5, metavar='LR', help='learning rate (absolute lr)') + add_arg('--min_lr', type=float, default=0., metavar='LR', help='lower lr bound for cyclic schedulers that hit 0') + add_arg('--warmup_epochs', type=int, default=1, metavar='N', help='epochs to warmup LR') + add_arg('--optimizer', default='AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))', type=str, + help="Optimizer from torch.optim [ default: AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95)) ]") + add_arg('--amp', default=0, type=int, choices=[0,1], help='enable automatic mixed precision training') + # validation + add_arg('--val_dataset', type=str, default='', help="Validation sets, multiple separated by + (empty string means that no validation is performed)") + add_arg('--tile_conf_mode', type=str, default_stereo='conf_expsigmoid_15_3', default_flow='conf_expsigmoid_10_5', help='Weights for tile aggregation') + add_arg('--val_overlap', default=0.7, type=float, help='Overlap value for the tiling') + # others + add_arg('--num_workers', default=8, type=int) + add_arg('--eval_every', type=int, default=1, help='Val loss evaluation frequency') + add_arg('--save_every', type=int, default=1, help='Save checkpoint frequency') + add_arg('--start_from', type=str, default=None, help='Start training using weights from an other model (eg for finetuning)') + add_arg('--tboard_log_step', type=int, default=100, help='Log to tboard every so many steps') + add_arg('--dist_url', default='env://', help='url used to set up distributed training') + + return parser + + +def main(args): + misc.init_distributed_mode(args) + global_rank = misc.get_rank() + num_tasks = misc.get_world_size() + + assert os.path.isfile(args.pretrained) + print("output_dir: "+args.output_dir) + os.makedirs(args.output_dir, exist_ok=True) + + # fix the seed for reproducibility + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + cudnn.benchmark = True + + # Metrics / criterion + device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + metrics = (StereoMetrics if args.task=='stereo' else FlowMetrics)().to(device) + criterion = eval(args.criterion).to(device) + print('Criterion: ', args.criterion) + + # Prepare model + assert os.path.isfile(args.pretrained) + ckpt = torch.load(args.pretrained, 'cpu') + croco_args = croco_args_from_ckpt(ckpt) + croco_args['img_size'] = (args.crop[0], args.crop[1]) + print('Croco args: '+str(croco_args)) + args.croco_args = croco_args # saved for test time + # prepare head + num_channels = {'stereo': 1, 'flow': 2}[args.task] + if criterion.with_conf: num_channels += 1 + print(f'Building head PixelwiseTaskWithDPT() with {num_channels} channel(s)') + head = PixelwiseTaskWithDPT() + head.num_channels = num_channels + # build model and load pretrained weights + model = CroCoDownstreamBinocular(head, **croco_args) + interpolate_pos_embed(model, ckpt['model']) + msg = model.load_state_dict(ckpt['model'], strict=False) + print(msg) + + total_params = sum(p.numel() for p in model.parameters()) + total_params_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad) + print(f"Total params: {total_params}") + print(f"Total params trainable: {total_params_trainable}") + model_without_ddp = model.to(device) + + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + print("lr: %.2e" % args.lr) + print("accumulate grad iterations: %d" % args.accum_iter) + print("effective batch size: %d" % eff_batch_size) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], static_graph=True) + model_without_ddp = model.module + + # following timm: set wd as 0 for bias and norm layers + param_groups = misc.get_parameter_groups(model_without_ddp, args.weight_decay) + optimizer = eval(f"torch.optim.{args.optimizer}") + print(optimizer) + loss_scaler = NativeScaler() + + # automatic restart + last_ckpt_fname = os.path.join(args.output_dir, f'checkpoint-last.pth') + args.resume = last_ckpt_fname if os.path.isfile(last_ckpt_fname) else None + + if not args.resume and args.start_from: + print(f"Starting from an other model's weights: {args.start_from}") + best_so_far = None + args.start_epoch = 0 + ckpt = torch.load(args.start_from, 'cpu') + msg = model_without_ddp.load_state_dict(ckpt['model'], strict=False) + print(msg) + else: + best_so_far = misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) + + if best_so_far is None: best_so_far = np.inf + + # tensorboard + log_writer = None + if global_rank == 0 and args.output_dir is not None: + log_writer = SummaryWriter(log_dir=args.output_dir, purge_step=args.start_epoch*1000) + + # dataset and loader + print('Building Train Data loader for dataset: ', args.dataset) + train_dataset = (get_train_dataset_stereo if args.task=='stereo' else get_train_dataset_flow)(args.dataset, crop_size=args.crop) + def _print_repr_dataset(d): + if isinstance(d, torch.utils.data.dataset.ConcatDataset): + for dd in d.datasets: + _print_repr_dataset(dd) + else: + print(repr(d)) + _print_repr_dataset(train_dataset) + print(' total length:', len(train_dataset)) + if args.distributed: + sampler_train = torch.utils.data.DistributedSampler( + train_dataset, num_replicas=num_tasks, rank=global_rank, shuffle=True + ) + else: + sampler_train = torch.utils.data.RandomSampler(train_dataset) + data_loader_train = torch.utils.data.DataLoader( + train_dataset, sampler=sampler_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=True, + drop_last=True, + ) + if args.val_dataset=='': + data_loaders_val = None + else: + print('Building Val Data loader for datasets: ', args.val_dataset) + val_datasets = (get_test_datasets_stereo if args.task=='stereo' else get_test_datasets_flow)(args.val_dataset) + for val_dataset in val_datasets: print(repr(val_dataset)) + data_loaders_val = [DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False) for val_dataset in val_datasets] + bestmetric = ("AVG_" if len(data_loaders_val)>1 else str(data_loaders_val[0].dataset)+'_')+args.bestmetric + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + # Training Loop + for epoch in range(args.start_epoch, args.epochs): + + if args.distributed: data_loader_train.sampler.set_epoch(epoch) + + # Train + epoch_start = time.time() + train_stats = train_one_epoch(model, criterion, metrics, data_loader_train, optimizer, device, epoch, loss_scaler, log_writer=log_writer, args=args) + epoch_time = time.time() - epoch_start + + if args.distributed: dist.barrier() + + # Validation (current naive implementation runs the validation on every gpu ... not smart ...) + if data_loaders_val is not None and args.eval_every > 0 and (epoch+1) % args.eval_every == 0: + val_epoch_start = time.time() + val_stats = validate_one_epoch(model, criterion, metrics, data_loaders_val, device, epoch, log_writer=log_writer, args=args) + val_epoch_time = time.time() - val_epoch_start + + val_best = val_stats[bestmetric] + + # Save best of all + if val_best <= best_so_far: + best_so_far = val_best + misc.save_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, best_so_far=best_so_far, fname='best') + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch, + **{f'val_{k}': v for k, v in val_stats.items()}} + else: + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch,} + + if args.distributed: dist.barrier() + + # Save stuff + if args.output_dir and ((epoch+1) % args.save_every == 0 or epoch + 1 == args.epochs): + misc.save_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, best_so_far=best_so_far, fname='last') + + if args.output_dir: + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + main(args) \ No newline at end of file diff --git a/dynamic_predictor/croco/utils/misc.py b/dynamic_predictor/croco/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..a7460c7424bf71cd56ceb917410d5f4f1166226c --- /dev/null +++ b/dynamic_predictor/croco/utils/misc.py @@ -0,0 +1,470 @@ +# Copyright (C) 2022-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# utilitary functions for CroCo +# -------------------------------------------------------- +# References: +# MAE: https://github.com/facebookresearch/mae +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- + +import builtins +import datetime +import os +import time +import math +import json +from collections import defaultdict, deque +from pathlib import Path +import numpy as np + +import torch +import torch.distributed as dist +from torch import inf + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None, max_iter=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + len_iterable = min(len(iterable), max_iter) if max_iter else len(iterable) + space_fmt = ':' + str(len(str(len_iterable))) + 'd' + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for it,obj in enumerate(iterable): + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len_iterable - 1: + eta_seconds = iter_time.global_avg * (len_iterable - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len_iterable, eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len_iterable, eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + if max_iter and it >= max_iter: + break + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len_iterable)) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + builtin_print = builtins.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + force = force or (get_world_size() > 8) + if is_master or force: + now = datetime.datetime.now().time() + builtin_print('[{}] '.format(now), end='') # print with time stamp + builtin_print(*args, **kwargs) + + builtins.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + nodist = args.nodist if hasattr(args,'nodist') else False + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ and not nodist: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + else: + print('Not using distributed mode') + setup_for_distributed(is_master=True) # hack + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta(seconds=3600)) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +class NativeScalerWithGradNormCount: + state_dict_key = "amp_scaler" + + def __init__(self, enabled=True): + self._scaler = torch.cuda.amp.GradScaler(enabled=enabled) + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + self._scaler.scale(loss).backward(create_graph=create_graph) + if update_grad: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + self._scaler.step(optimizer) + self._scaler.update() + else: + norm = None + return norm + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + if len(parameters) == 0: + return torch.tensor(0.) + device = parameters[0].grad.device + if norm_type == inf: + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) + return total_norm + + + + +def save_model(args, epoch, model_without_ddp, optimizer, loss_scaler, fname=None, best_so_far=None, best_pose_ate_sofar=None): + output_dir = Path(args.output_dir) + if fname is None: fname = str(epoch) + checkpoint_path = output_dir / ('checkpoint-%s.pth' % fname) + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'scaler': loss_scaler.state_dict(), + 'args': args, + 'epoch': epoch, + } + if best_so_far is not None: to_save['best_so_far'] = best_so_far + if best_pose_ate_sofar is not None: to_save['best_pose_ate_sofar'] = best_pose_ate_sofar + print(f'>> Saving model to {checkpoint_path} ...') + save_on_master(to_save, checkpoint_path) + + +def load_model(args, model_without_ddp, optimizer, loss_scaler): + args.start_epoch = 0 + best_so_far = None + best_pose_ate_sofar = None + if args.resume is not None: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + print("Resume checkpoint %s" % args.resume) + model_without_ddp.load_state_dict(checkpoint['model'], strict=False) + args.start_epoch = checkpoint['epoch'] + 1 + optimizer.load_state_dict(checkpoint['optimizer']) + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + if 'best_so_far' in checkpoint: + best_so_far = checkpoint['best_so_far'] + print(" & best_so_far={:g}".format(best_so_far)) + else: + print("") + if 'best_pose_ate_sofar' in checkpoint: + best_pose_ate_sofar = checkpoint['best_pose_ate_sofar'] + print(" & best_pose_ate_sofar={:g}".format(best_pose_ate_sofar)) + else: + best_pose_ate_sofar = None + print("With optim & sched! start_epoch={:d}".format(args.start_epoch), end='') + return best_so_far, best_pose_ate_sofar + +def all_reduce_mean(x): + world_size = get_world_size() + if world_size > 1: + x_reduce = torch.tensor(x).cuda() + dist.all_reduce(x_reduce) + x_reduce /= world_size + return x_reduce.item() + else: + return x + +def _replace(text, src, tgt, rm=''): + """ Advanced string replacement. + Given a text: + - replace all elements in src by the corresponding element in tgt + - remove all elements in rm + """ + if len(tgt) == 1: + tgt = tgt * len(src) + assert len(src) == len(tgt), f"'{src}' and '{tgt}' should have the same len" + for s,t in zip(src, tgt): + text = text.replace(s,t) + for c in rm: + text = text.replace(c,'') + return text + +def filename( obj ): + """ transform a python obj or cmd into a proper filename. + - \1 gets replaced by slash '/' + - \2 gets replaced by comma ',' + """ + if not isinstance(obj, str): + obj = repr(obj) + obj = str(obj).replace('()','') + obj = _replace(obj, '_,(*/\1\2','-__x%/,', rm=' )\'"') + assert all(len(s) < 256 for s in obj.split(os.sep)), 'filename too long (>256 characters):\n'+obj + return obj + +def _get_num_layer_for_vit(var_name, enc_depth, dec_depth): + if var_name in ("cls_token", "mask_token", "pos_embed", "global_tokens"): + return 0 + elif var_name.startswith("patch_embed"): + return 0 + elif var_name.startswith("enc_blocks"): + layer_id = int(var_name.split('.')[1]) + return layer_id + 1 + elif var_name.startswith('decoder_embed') or var_name.startswith('enc_norm'): # part of the last black + return enc_depth + elif var_name.startswith('dec_blocks'): + layer_id = int(var_name.split('.')[1]) + return enc_depth + layer_id + 1 + elif var_name.startswith('dec_norm'): # part of the last block + return enc_depth + dec_depth + elif any(var_name.startswith(k) for k in ['head','prediction_head']): + return enc_depth + dec_depth + 1 + else: + raise NotImplementedError(var_name) + +def get_parameter_groups(model, weight_decay, layer_decay=1.0, skip_list=(), no_lr_scale_list=[]): + parameter_group_names = {} + parameter_group_vars = {} + enc_depth, dec_depth = None, None + # prepare layer decay values + assert layer_decay==1.0 or 0.DAVIS 2017 dataset. + +This tool is also used to evaluate the submissions in the Codalab site for the Semi-supervised DAVIS Challenge and the Unsupervised DAVIS Challenge + +### Installation +```bash +# Download the code +git clone https://github.com/davisvideochallenge/davis2017-evaluation.git && cd davis2017-evaluation +# Install it - Python 3.6 or higher required +python setup.py install +``` +If you don't want to specify the DAVIS path every time, you can modify the default value in the variable `default_davis_path` in `evaluation_method.py`(the following examples assume that you have set it). +Otherwise, you can specify the path in every call using using the flag `--davis_path /path/to/DAVIS` when calling `evaluation_method.py`. + +Once the evaluation has finished, two different CSV files will be generated inside the folder with the results: +- `global_results-SUBSET.csv` contains the overall results for a certain `SUBSET`. +- `per-sequence_results-SUBSET.csv` contain the per sequence results for a certain `SUBSET`. + +If a folder that contains the previous files is evaluated again, the results will be read from the CSV files instead of recomputing them. + +## Evaluate DAVIS 2017 Semi-supervised +In order to evaluate your semi-supervised method in DAVIS 2017, execute the following command substituting `results/semi-supervised/osvos` by the folder path that contains your results: +```bash +python evaluation_method.py --task semi-supervised --results_path results/semi-supervised/osvos +``` +The semi-supervised results have been generated using [OSVOS](https://github.com/kmaninis/OSVOS-caffe). + +## Evaluate DAVIS 2017 Unsupervised +In order to evaluate your unsupervised method in DAVIS 2017, execute the following command substituting `results/unsupervised/rvos` by the folder path that contains your results: +```bash +python evaluation_method.py --task unsupervised --results_path results/unsupervised/rvos +``` +The unsupervised results example have been generated using [RVOS](https://github.com/imatge-upc/rvos). + +## Evaluation running in Codalab +In case you would like to know which is the evaluation script that is running in the Codalab servers, check the `evaluation_codalab.py` script. + +This package runs in the following docker image: [scaelles/codalab:anaconda3-2018.12](https://cloud.docker.com/u/scaelles/repository/docker/scaelles/codalab) + +## Citation + +Please cite both papers in your publications if DAVIS or this code helps your research. + +```latex +@article{Caelles_arXiv_2019, + author = {Sergi Caelles and Jordi Pont-Tuset and Federico Perazzi and Alberto Montes and Kevis-Kokitsi Maninis and Luc {Van Gool}}, + title = {The 2019 DAVIS Challenge on VOS: Unsupervised Multi-Object Segmentation}, + journal = {arXiv}, + year = {2019} +} +``` + +```latex +@article{Pont-Tuset_arXiv_2017, + author = {Jordi Pont-Tuset and Federico Perazzi and Sergi Caelles and Pablo Arbel\'aez and Alexander Sorkine-Hornung and Luc {Van Gool}}, + title = {The 2017 DAVIS Challenge on Video Object Segmentation}, + journal = {arXiv:1704.00675}, + year = {2017} +} +``` + diff --git a/dynamic_predictor/davis/davis2017-evaluation/evaluation_codalab.py b/dynamic_predictor/davis/davis2017-evaluation/evaluation_codalab.py new file mode 100644 index 0000000000000000000000000000000000000000..694bf9b2a1dc0f395ab05cfc04457cf07e8cbb89 --- /dev/null +++ b/dynamic_predictor/davis/davis2017-evaluation/evaluation_codalab.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +import sys +import os.path +from time import time + +import numpy as np +import pandas +from davis2017.evaluation import DAVISEvaluation + +task = 'semi-supervised' +gt_set = 'test-dev' + +time_start = time() +# as per the metadata file, input and output directories are the arguments +if len(sys.argv) < 3: + input_dir = "input_dir" + output_dir = "output_dir" + debug = True +else: + [_, input_dir, output_dir] = sys.argv + debug = False + +# unzipped submission data is always in the 'res' subdirectory +# https://github.com/codalab/codalab-competitions/wiki/User_Building-a-Scoring-Program-for-a-Competition#directory-structure-for-submissions +submission_path = os.path.join(input_dir, 'res') +if not os.path.exists(submission_path): + sys.exit('Could not find submission file {0}'.format(submission_path)) + +# unzipped reference data is always in the 'ref' subdirectory +# https://github.com/codalab/codalab-competitions/wiki/User_Building-a-Scoring-Program-for-a-Competition#directory-structure-for-submissions +gt_path = os.path.join(input_dir, 'ref') +if not os.path.exists(gt_path): + sys.exit('Could not find GT file {0}'.format(gt_path)) + + +# Create dataset +dataset_eval = DAVISEvaluation(davis_root=gt_path, gt_set=gt_set, task=task, codalab=True) + +# Check directory structure +res_subfolders = os.listdir(submission_path) +if len(res_subfolders) == 1: + sys.stdout.write( + "Incorrect folder structure, the folders of the sequences have to be placed directly inside the " + "zip.\nInside every folder of the sequences there must be an indexed PNG file for every frame.\n" + "The indexes have to match with the initial frame.\n") + sys.exit() + +# Check that all sequences are there +missing = False +for seq in dataset_eval.dataset.get_sequences(): + if seq not in res_subfolders: + sys.stdout.write(seq + " sequence is missing.\n") + missing = True +if missing: + sys.stdout.write( + "Verify also the folder structure, the folders of the sequences have to be placed directly inside " + "the zip.\nInside every folder of the sequences there must be an indexed PNG file for every frame.\n" + "The indexes have to match with the initial frame.\n") + sys.exit() + +metrics_res = dataset_eval.evaluate(submission_path, debug=debug) +J, F = metrics_res['J'], metrics_res['F'] + +# Generate output to the stdout +seq_names = list(J['M_per_object'].keys()) +if gt_set == "val" or gt_set == "train" or gt_set == "test-dev": + sys.stdout.write("----------------Global results in CSV---------------\n") + g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay'] + final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. + g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]), + np.mean(F["D"])]) + table_g = pandas.DataFrame(data=np.reshape(g_res, [1, len(g_res)]), columns=g_measures) + table_g.to_csv(sys.stdout, index=False, float_format="%0.3f") + + sys.stdout.write("\n\n------------Per sequence results in CSV-------------\n") + seq_measures = ['Sequence', 'J-Mean', 'F-Mean'] + J_per_object = [J['M_per_object'][x] for x in seq_names] + F_per_object = [F['M_per_object'][x] for x in seq_names] + table_seq = pandas.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures) + table_seq.to_csv(sys.stdout, index=False, float_format="%0.3f") + +# Write scores to a file named "scores.txt" +with open(os.path.join(output_dir, 'scores.txt'), 'w') as output_file: + final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. + output_file.write("GlobalMean: %f\n" % final_mean) + output_file.write("JMean: %f\n" % np.mean(J["M"])) + output_file.write("JRecall: %f\n" % np.mean(J["R"])) + output_file.write("JDecay: %f\n" % np.mean(J["D"])) + output_file.write("FMean: %f\n" % np.mean(F["M"])) + output_file.write("FRecall: %f\n" % np.mean(F["R"])) + output_file.write("FDecay: %f\n" % np.mean(F["D"])) +total_time = time() - time_start +sys.stdout.write('\nTotal time:' + str(total_time)) diff --git a/dynamic_predictor/davis/davis2017-evaluation/evaluation_method.py b/dynamic_predictor/davis/davis2017-evaluation/evaluation_method.py new file mode 100644 index 0000000000000000000000000000000000000000..04f67d17c0c9dc5f19de93787c113ab9f4031c65 --- /dev/null +++ b/dynamic_predictor/davis/davis2017-evaluation/evaluation_method.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +import os +import sys +from time import time +import argparse + +import numpy as np +import pandas as pd +from davis2017.evaluation import DAVISEvaluation + +default_davis_path = '/path/to/the/folder/DAVIS' + +time_start = time() +parser = argparse.ArgumentParser() +parser.add_argument('--davis_path', type=str, help='Path to the DAVIS folder containing the JPEGImages, Annotations, ' + 'ImageSets, Annotations_unsupervised folders', + required=False, default=default_davis_path) +parser.add_argument('--set', type=str, help='Subset to evaluate the results', default='val') +parser.add_argument('--task', type=str, help='Task to evaluate the results', default='unsupervised', + choices=['semi-supervised', 'unsupervised']) +parser.add_argument('--results_path', type=str, help='Path to the folder containing the sequences folders', + required=True) +args, _ = parser.parse_known_args() +csv_name_global = f'global_results-{args.set}.csv' +csv_name_per_sequence = f'per-sequence_results-{args.set}.csv' + +# Check if the method has been evaluated before, if so read the results, otherwise compute the results +csv_name_global_path = os.path.join(args.results_path, csv_name_global) +csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence) +if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path): + print('Using precomputed results...') + table_g = pd.read_csv(csv_name_global_path) + table_seq = pd.read_csv(csv_name_per_sequence_path) +else: + print(f'Evaluating sequences for the {args.task} task...') + # Create dataset and evaluate + dataset_eval = DAVISEvaluation(davis_root=args.davis_path, task=args.task, gt_set=args.set) + metrics_res = dataset_eval.evaluate(args.results_path) + J, F = metrics_res['J'], metrics_res['F'] + + # Generate dataframe for the general results + g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay'] + final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. + g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]), + np.mean(F["D"])]) + g_res = np.reshape(g_res, [1, len(g_res)]) + table_g = pd.DataFrame(data=g_res, columns=g_measures) + with open(csv_name_global_path, 'w') as f: + table_g.to_csv(f, index=False, float_format="%.3f") + print(f'Global results saved in {csv_name_global_path}') + + # Generate a dataframe for the per sequence results + seq_names = list(J['M_per_object'].keys()) + seq_measures = ['Sequence', 'J-Mean', 'F-Mean'] + J_per_object = [J['M_per_object'][x] for x in seq_names] + F_per_object = [F['M_per_object'][x] for x in seq_names] + table_seq = pd.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures) + with open(csv_name_per_sequence_path, 'w') as f: + table_seq.to_csv(f, index=False, float_format="%.3f") + print(f'Per-sequence results saved in {csv_name_per_sequence_path}') + +# Print the results +sys.stdout.write(f"--------------------------- Global results for {args.set} ---------------------------\n") +print(table_g.to_string(index=False)) +sys.stdout.write(f"\n---------- Per sequence results for {args.set} ----------\n") +print(table_seq.to_string(index=False)) +total_time = time() - time_start +sys.stdout.write('\nTotal time:' + str(total_time)) diff --git a/dynamic_predictor/davis/davis2017-evaluation/pytest/test_evaluation.py b/dynamic_predictor/davis/davis2017-evaluation/pytest/test_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..b3b2d5cc42f3932efbd4202a5da06a03d8c118d1 --- /dev/null +++ b/dynamic_predictor/davis/davis2017-evaluation/pytest/test_evaluation.py @@ -0,0 +1,165 @@ +import os +import sys +import numpy as np +import pandas +from time import time +from collections import defaultdict + +from davis2017.evaluation import DAVISEvaluation +from davis2017 import utils +from davis2017.metrics import db_eval_boundary, db_eval_iou + + +davis_root = 'input_dir/ref' +methods_root = 'examples' + + +def test_task(task, gt_set, res_path, J_target=None, F_target=None, metric=('J', 'F')): + dataset_eval = DAVISEvaluation(davis_root=davis_root, gt_set=gt_set, task=task, codalab=True) + metrics_res = dataset_eval.evaluate(res_path, debug=False, metric=metric) + + num_seq = len(list(dataset_eval.dataset.get_sequences())) + J = metrics_res['J'] if 'J' in metric else {'M': np.zeros(num_seq), 'R': np.zeros(num_seq), 'D': np.zeros(num_seq)} + F = metrics_res['F'] if 'F' in metric else {'M': np.zeros(num_seq), 'R': np.zeros(num_seq), 'D': np.zeros(num_seq)} + + if gt_set == "val" or gt_set == "train" or gt_set == "test-dev": + sys.stdout.write("----------------Global results in CSV---------------\n") + g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay'] + final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. if 'J' in metric and 'F' in metric else 0 + g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]), np.mean(F["D"])]) + table_g = pandas.DataFrame(data=np.reshape(g_res, [1, len(g_res)]), columns=g_measures) + table_g.to_csv(sys.stdout, index=False, float_format="%0.3f") + if J_target is not None: + assert check_results_similarity(J, J_target), f'J {print_error(J, J_target)}' + if F_target is not None: + assert check_results_similarity(F, F_target), f'F {print_error(F, F_target)}' + return J, F + + +def check_results_similarity(target, result): + return np.isclose(np.mean(target['M']) - result[0], 0, atol=0.001) & \ + np.isclose(np.mean(target['R']) - result[1], 0, atol=0.001) & \ + np.isclose(np.mean(target['D']) - result[2], 0, atol=0.001) + + +def print_error(target, result): + return f'M:{np.mean(target["M"])} = {result[0]}\t' + \ + f'R:{np.mean(target["R"])} = {result[1]}\t' + \ + f'D:{np.mean(target["D"])} = {result[2]}' + + +def test_semisupervised_premvos(): + method_path = os.path.join(methods_root, 'premvos') + print('Evaluating PREMVOS val') + J_val = [0.739, 0.831, 0.162] + F_val = [0.818, 0.889, 0.195] + test_task('semi-supervised', 'val', method_path, J_val, F_val) + print('Evaluating PREMVOS test-dev') + J_test_dev = [0.675, 0.768, 0.217] + F_test_dev = [0.758, 0.843, 0.206] + test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev) + print('\n') + + +def test_semisupervised_onavos(): + method_path = os.path.join(methods_root, 'onavos') + print('Evaluating OnAVOS val') + J_val = [0.616, 0.674, 0.279] + F_val = [0.691, 0.754, 0.266] + test_task('semi-supervised', 'val', method_path, J_val, F_val) + print('Evaluating OnAVOS test-dev') + J_test_dev = [0.499, 0.543, 0.230] + F_test_dev = [0.557, 0.603, 0.234] + test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev) + print('\n') + + +def test_semisupervised_osvos(): + method_path = os.path.join(methods_root, 'osvos') + print('Evaluating OSVOS val') + J_val = [0.566, 0.638, 0.261] + F_val = [0.639, 0.738, 0.270] + test_task('semi-supervised', 'val', method_path, J_val, F_val) + print('Evaluating OSVOS test-dev') + J_test_dev = [0.470, 0.521, 0.192] + F_test_dev = [0.548, 0.597, 0.198] + test_task('semi-supervised', 'test-dev', method_path, J_test_dev, F_test_dev) + print('\n') + + +def test_unsupervised_flip_gt(): + print('Evaluating Unsupervised Permute GT') + method_path = os.path.join(methods_root, 'swap_gt') + if not os.path.isdir(method_path): + utils.generate_random_permutation_gt_obj_proposals(davis_root, 'val', method_path) + # utils.generate_random_permutation_gt_obj_proposals('test-dev', method_path) + J_val = [1, 1, 0] + F_val= [1, 1, 0] + test_task('unsupervised', 'val', method_path, J_val, F_val) + # test_task('unsupervised', 'test-dev', method_path, J_val, F_val) + + +def test_unsupervised_rvos(): + print('Evaluating RVOS') + method_path = os.path.join(methods_root, 'rvos') + test_task('unsupervised', 'val', method_path) + # test_task('unsupervised', 'test-dev', method_path) + + +def test_unsupervsied_multiple_proposals(num_proposals=20, metric=('J', 'F')): + print('Evaluating Multiple Proposals') + method_path = os.path.join(methods_root, f'generated_proposals_{num_proposals}') + utils.generate_obj_proposals(davis_root, 'val', num_proposals, method_path) + # utils.generate_obj_proposals('test-dev', num_proposals, method_path) + test_task('unsupervised', 'val', method_path, metric=metric) + # test_task('unsupervised', 'test-dev', method_path, metric=metric) + + +def test_void_masks(): + gt = np.zeros((2, 200, 200)) + mask = np.zeros((2, 200, 200)) + void = np.zeros((2, 200, 200)) + + gt[:, 100:150, 100:150] = 1 + void[:, 50:100, 100:150] = 1 + mask[:, 50:150, 100:150] = 1 + + assert np.mean(db_eval_iou(gt, mask, void)) == 1 + assert np.mean(db_eval_boundary(gt, mask, void)) == 1 + + +def benchmark_number_proposals(): + number_proposals = [10, 15, 20, 30] + timing_results = defaultdict(dict) + for n in number_proposals: + time_start = time() + test_unsupervsied_multiple_proposals(n, 'J') + timing_results['J'][n] = time() - time_start + + for n in number_proposals: + time_start = time() + test_unsupervsied_multiple_proposals(n) + timing_results['J_F'][n] = time() - time_start + + print(f'Using J {timing_results["J"]}') + print(f'Using J&F {timing_results["J_F"]}') + + # Using J {10: 156.45335865020752, 15: 217.91797709465027, 20: 282.0747673511505, 30: 427.6770250797272} + # Using J & F {10: 574.3529748916626, 15: 849.7542386054993, 20: 1123.4619634151459, 30: 1663.6704666614532} + # Codalab + # Using J & F {10: 971.196366071701, 15: 1473.9757001399994, 20: 1918.787559747696, 30: 3007.116141319275} + + +if __name__ == '__main__': + # Test void masks + test_void_masks() + + # Test semi-supervised methods + test_semisupervised_premvos() + test_semisupervised_onavos() + test_semisupervised_osvos() + + # Test unsupervised methods + test_unsupervised_flip_gt() + # test_unsupervised_rvos() + test_unsupervsied_multiple_proposals() diff --git a/dynamic_predictor/davis/davis2017-evaluation/setup.cfg b/dynamic_predictor/davis/davis2017-evaluation/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b70416f8a70b87842f0b43982381d1c9b688a0f1 --- /dev/null +++ b/dynamic_predictor/davis/davis2017-evaluation/setup.cfg @@ -0,0 +1,22 @@ +[metadata] +name = davis2017 +version = attr: davis2017.__version__ +description = Evaluation Framework for DAVIS 2017 Semi-supervised and Unsupervised used in the DAVIS Challenges +long_description = file: README.md +long_description_content_type = text/markdown +keywords = segmentation +license = GPL v3 +author = Sergi Caelles +author-email = scaelles@vision.ee.ethz.ch +home-page = https://github.com/davisvideochallenge/davis2017-evaluation +classifiers = + Development Status :: 4 - Beta + Intended Audience :: Developers + Intended Audience :: Education + Intended Audience :: Science/Research + License :: OSI Approved :: GNU General Public License v3 (GPLv3) + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Topic :: Scientific/Engineering :: Human Machine Interfaces + Topic :: Software Development :: Libraries + Topic :: Software Development :: Libraries :: Python Modules diff --git a/dynamic_predictor/davis/davis2017-evaluation/setup.py b/dynamic_predictor/davis/davis2017-evaluation/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..20372515078189d930d22d0fbfc9a74b30658582 --- /dev/null +++ b/dynamic_predictor/davis/davis2017-evaluation/setup.py @@ -0,0 +1,20 @@ +from setuptools import setup +import sys + +if sys.version_info < (3, 6): + sys.exit('Sorry, only Python >= 3.6 is supported') + +setup( + python_requires='>=3.6, <4', + install_requires=[ + 'Pillow>=4.1.1', + 'networkx>=2.0', + 'numpy>=1.12.1', + 'opencv-python>=4.0.0.21', + 'pandas>=0.21.1', + 'pathlib2;python_version<"3.5"', + 'scikit-image>=0.13.1', + 'scikit-learn>=0.18', + 'scipy>=1.0.0', + 'tqdm>=4.28.1' + ],packages=['davis2017']) diff --git a/dynamic_predictor/davis/davis2017/__init__.py b/dynamic_predictor/davis/davis2017/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fb263ea94c650a6ac55ef374f2cea1b8c96a1a5 --- /dev/null +++ b/dynamic_predictor/davis/davis2017/__init__.py @@ -0,0 +1,3 @@ +from __future__ import absolute_import + +__version__ = '0.1.0' diff --git a/dynamic_predictor/davis/davis2017/davis.py b/dynamic_predictor/davis/davis2017/davis.py new file mode 100644 index 0000000000000000000000000000000000000000..4da7fb3fbe9b4890fc2dec8aaff2eb3bbe338d19 --- /dev/null +++ b/dynamic_predictor/davis/davis2017/davis.py @@ -0,0 +1,29 @@ +import os +from glob import glob +from collections import defaultdict +import numpy as np +from PIL import Image + + +class MaskDataset(object): + def __init__(self, root, sequences, is_label=True): + self.is_label = is_label + self.sequences = {} + for seq in sequences: + print(root, seq) + if is_label: + masks = np.sort(glob(os.path.join(root, seq, '*.png'))).tolist() + else: + masks = sorted(glob(os.path.join(root, seq, 'dynamic_mask_*.png')), key=lambda x: int(os.path.basename(x).split('_')[-1].split('.')[0])) + self.sequences[seq] = masks + def read_masks(self, seq): + masks = [] + for msk in self.sequences[seq]: + if self.is_label: + img = np.array(Image.open(msk)) + img[img>0] = 255 + img = Image.fromarray(img) + masks.append(img) + else: + masks.append(Image.open(msk)) + return masks diff --git a/dynamic_predictor/davis/davis2017/evaluation.py b/dynamic_predictor/davis/davis2017/evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..403a32ebb08c7cbf70939d2e64da08e8405d5b26 --- /dev/null +++ b/dynamic_predictor/davis/davis2017/evaluation.py @@ -0,0 +1,130 @@ +import sys +from tqdm import tqdm +import warnings +warnings.filterwarnings("ignore", category=RuntimeWarning) + +import numpy as np +from davis2017.davis import MaskDataset +from davis2017.metrics import db_eval_boundary, db_eval_iou +from davis2017 import utils +from davis2017.results import Results +from scipy.optimize import linear_sum_assignment +from skimage.transform import resize +import cv2 +import PIL + +def _resize_pil_image(img, long_edge_size, nearest=False): + S = max(img.size) + if S > long_edge_size: + interp = PIL.Image.LANCZOS if not nearest else PIL.Image.NEAREST + elif S <= long_edge_size: + interp = PIL.Image.BICUBIC + new_size = tuple(int(round(x*long_edge_size/S)) for x in img.size) + return img.resize(new_size, interp) + +def crop_img(img, size, square_ok=False, nearest=True, crop=True): + W1, H1 = img.size + if size == 224: + # resize short side to 224 (then crop) + img = _resize_pil_image(img, round(size * max(W1/H1, H1/W1)), nearest=nearest) + else: + # resize long side to 512 + img = _resize_pil_image(img, size, nearest=nearest) + W, H = img.size + cx, cy = W//2, H//2 + if size == 224: + half = min(cx, cy) + img = img.crop((cx-half, cy-half, cx+half, cy+half)) + else: + halfw, halfh = ((2*cx)//16)*8, ((2*cy)//16)*8 + if not (square_ok) and W == H: + halfh = 3*halfw/4 + if crop: + img = img.crop((cx-halfw, cy-halfh, cx+halfw, cy+halfh)) + else: # resize + img = img.resize((2*halfw, 2*halfh), PIL.Image.NEAREST) + return img + + +class MaskEvaluation(object): + def __init__(self, root, sequences): + self.dataset = MaskDataset(root=root, sequences=sequences) + self.sequences = sequences + + + @staticmethod + def _evaluate(all_gt_masks, all_res_masks, all_void_masks, metric): + for i in range(len(all_gt_masks)): + all_gt_masks[i]= (np.array(crop_img(all_gt_masks[i], 512, square_ok=True)) > 0.5) * 255 + + for i in range(len(all_res_masks)): + all_res_masks[i]= np.array(all_res_masks[i]) + + for i in range(len(all_res_masks)): + if i % 10 == 0: + concatenated_mask = np.concatenate((all_gt_masks[i], all_res_masks[i]), axis=1).astype(np.uint8) + import matplotlib.pyplot as plt + plt.imshow(concatenated_mask, cmap='gray') + plt.title(f'Mask {i}') + plt.show() + + all_gt_masks = np.stack(all_gt_masks, axis=0) + all_res_masks = np.stack(all_res_masks, axis=0) + + + if all_res_masks.shape[0] > all_gt_masks.shape[0]: + all_res_masks = all_res_masks[:all_gt_masks.shape[0], ...] + elif all_res_masks.shape[0] < all_gt_masks.shape[0]: + zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:])) + all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0) + # Resize all_res_masks to match all_gt_masks using interpolation + + + # all_res_masks = resized_res_masks + + j_metrics_res, f_metrics_res = np.zeros(all_gt_masks.shape[:2]), np.zeros(all_gt_masks.shape[:2]) + + for ii in range(all_gt_masks.shape[0]): + if 'J' in metric: + j_metrics_res[ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks) + if 'F' in metric: + f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...], all_void_masks) + return j_metrics_res, f_metrics_res + + def evaluate(self, res_path, metric=('J', 'F')): + metric = metric if isinstance(metric, tuple) or isinstance(metric, list) else [metric] + if 'T' in metric: + raise ValueError('Temporal metric not supported!') + if 'J' not in metric and 'F' not in metric: + raise ValueError('Metric possible values are J for IoU or F for Boundary') + + # Containers + metrics_res = {} + if 'J' in metric: + metrics_res['J'] = {"M": [], "R": [], "D": [], "M_per_object": {}} + if 'F' in metric: + metrics_res['F'] = {"M": [], "R": [], "D": [], "M_per_object": {}} + + results = MaskDataset(root=res_path, sequences=self.sequences, is_label=False) + + # Sweep all sequences + for seq in tqdm(self.sequences): + all_gt_masks = self.dataset.read_masks(seq) + all_res_masks = results.read_masks(seq) + j_metrics_res, f_metrics_res = self._evaluate(all_gt_masks, all_res_masks, None, metric) + for ii in range(len(all_gt_masks)): + seq_name = f'{seq}_{ii+1}' + if 'J' in metric: + [JM, JR, JD] = utils.db_statistics(j_metrics_res[ii]) + metrics_res['J']["M"].append(JM) + metrics_res['J']["R"].append(JR) + metrics_res['J']["D"].append(JD) + metrics_res['J']["M_per_object"][seq_name] = JM + if 'F' in metric: + [FM, FR, FD] = utils.db_statistics(f_metrics_res[ii]) + metrics_res['F']["M"].append(FM) + metrics_res['F']["R"].append(FR) + metrics_res['F']["D"].append(FD) + metrics_res['F']["M_per_object"][seq_name] = FM + + return metrics_res diff --git a/dynamic_predictor/davis/davis2017/metrics.py b/dynamic_predictor/davis/davis2017/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..2bb2724177cc21de5470233b29bb0360d848a823 --- /dev/null +++ b/dynamic_predictor/davis/davis2017/metrics.py @@ -0,0 +1,197 @@ +import math +import numpy as np +import cv2 + + +def db_eval_iou(annotation, segmentation, void_pixels=None): + """ Compute region similarity as the Jaccard Index. + Arguments: + annotation (ndarray): binary annotation map. + segmentation (ndarray): binary segmentation map. + void_pixels (ndarray): optional mask with void pixels + + Return: + jaccard (float): region similarity + """ + assert annotation.shape == segmentation.shape, \ + f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.' + annotation = annotation.astype(bool) + segmentation = segmentation.astype(bool) + + if void_pixels is not None: + assert annotation.shape == void_pixels.shape, \ + f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' + void_pixels = void_pixels.astype(bool) + else: + void_pixels = np.zeros_like(segmentation) + + # Intersection between all sets + inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) + union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1)) + + j = inters / union + if j.ndim == 0: + j = 1 if np.isclose(union, 0) else j + else: + j[np.isclose(union, 0)] = 1 + return j + + +def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008): + assert annotation.shape == segmentation.shape + if void_pixels is not None: + assert annotation.shape == void_pixels.shape + if annotation.ndim == 3: + n_frames = annotation.shape[0] + f_res = np.zeros(n_frames) + for frame_id in range(n_frames): + void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ] + f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th) + elif annotation.ndim == 2: + f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th) + else: + raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions') + return f_res + + +def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008): + """ + Compute mean,recall and decay from per-frame evaluation. + Calculates precision/recall for boundaries between foreground_mask and + gt_mask using morphological operators to speed it up. + + Arguments: + foreground_mask (ndarray): binary segmentation image. + gt_mask (ndarray): binary annotated image. + void_pixels (ndarray): optional mask with void pixels + + Returns: + F (float): boundaries F-measure + """ + assert np.atleast_3d(foreground_mask).shape[2] == 1 + if void_pixels is not None: + void_pixels = void_pixels.astype(bool) + else: + void_pixels = np.zeros_like(foreground_mask).astype(bool) + + bound_pix = bound_th if bound_th >= 1 else \ + np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) + + # Get the pixel boundaries of both masks + fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels)) + gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels)) + + from skimage.morphology import disk + + # fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) + fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) + # gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) + gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) + + # Get the intersection + gt_match = gt_boundary * fg_dil + fg_match = fg_boundary * gt_dil + + # Area of the intersection + n_fg = np.sum(fg_boundary) + n_gt = np.sum(gt_boundary) + + # % Compute precision and recall + if n_fg == 0 and n_gt > 0: + precision = 1 + recall = 0 + elif n_fg > 0 and n_gt == 0: + precision = 0 + recall = 1 + elif n_fg == 0 and n_gt == 0: + precision = 1 + recall = 1 + else: + precision = np.sum(fg_match) / float(n_fg) + recall = np.sum(gt_match) / float(n_gt) + + # Compute F measure + if precision + recall == 0: + F = 0 + else: + F = 2 * precision * recall / (precision + recall) + + return F + + +def _seg2bmap(seg, width=None, height=None): + """ + From a segmentation, compute a binary boundary map with 1 pixel wide + boundaries. The boundary pixels are offset by 1/2 pixel towards the + origin from the actual segment boundary. + Arguments: + seg : Segments labeled from 1..k. + width : Width of desired bmap <= seg.shape[1] + height : Height of desired bmap <= seg.shape[0] + Returns: + bmap (ndarray): Binary boundary map. + David Martin + January 2003 + """ + + seg = seg.astype(bool) + seg[seg > 0] = 1 + + assert np.atleast_3d(seg).shape[2] == 1 + + width = seg.shape[1] if width is None else width + height = seg.shape[0] if height is None else height + + h, w = seg.shape[:2] + + ar1 = float(width) / float(height) + ar2 = float(w) / float(h) + + assert not ( + width > w | height > h | abs(ar1 - ar2) > 0.01 + ), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height) + + e = np.zeros_like(seg) + s = np.zeros_like(seg) + se = np.zeros_like(seg) + + e[:, :-1] = seg[:, 1:] + s[:-1, :] = seg[1:, :] + se[:-1, :-1] = seg[1:, 1:] + + b = seg ^ e | seg ^ s | seg ^ se + b[-1, :] = seg[-1, :] ^ e[-1, :] + b[:, -1] = seg[:, -1] ^ s[:, -1] + b[-1, -1] = 0 + + if w == width and h == height: + bmap = b + else: + bmap = np.zeros((height, width)) + for x in range(w): + for y in range(h): + if b[y, x]: + j = 1 + math.floor((y - 1) + height / h) + i = 1 + math.floor((x - 1) + width / h) + bmap[j, i] = 1 + + return bmap + + +if __name__ == '__main__': + from davis2017.davis import DAVIS + from davis2017.results import Results + + dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics') + results = Results(root_dir='examples/osvos') + # Test timing F measure + for seq in dataset.get_sequences(): + all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True) + all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1] + all_res_masks = results.read_masks(seq, all_masks_id) + f_metrics_res = np.zeros(all_gt_masks.shape[:2]) + for ii in range(all_gt_masks.shape[0]): + f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...]) + + # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py + # snakeviz f_measure.prof diff --git a/dynamic_predictor/davis/davis2017/results.py b/dynamic_predictor/davis/davis2017/results.py new file mode 100644 index 0000000000000000000000000000000000000000..95ce1a751711a4a905e738c3784d9adf850b7e6c --- /dev/null +++ b/dynamic_predictor/davis/davis2017/results.py @@ -0,0 +1,33 @@ +import os +import numpy as np +from PIL import Image +import sys + + +class Results(object): + def __init__(self, root_dir): + self.root_dir = root_dir + + def _read_mask(self, sequence, frame_id): + # mask_path = os.path.join(self.root_dir, sequence, f'{frame_id}.png') + + mask_path = os.path.join(self.root_dir, sequence, f'{int(frame_id[-4:]) -1 :05d}.png') + obj = Image.open(mask_path) + obj_mode = obj.mode + if obj_mode == 'LA': + obj = np.array(obj)[:,:,0] + else: + obj = np.array(obj) + + return obj + + + def read_masks(self, sequence, masks_id): + mask_0 = self._read_mask(sequence, masks_id[0]) + masks = np.zeros((len(masks_id), *mask_0.shape)) + for ii, m in enumerate(masks_id): + masks[ii, ...] = self._read_mask(sequence, m) + num_objects = 1 + tmp = np.ones((num_objects, *masks.shape)) * 255 + masks = (tmp == masks[None, ...]) > 0 + return masks diff --git a/dynamic_predictor/davis/davis2017/utils.py b/dynamic_predictor/davis/davis2017/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3943d3adc85bedf67c1e4ffbefb861382753371c --- /dev/null +++ b/dynamic_predictor/davis/davis2017/utils.py @@ -0,0 +1,173 @@ +import os +import errno +import numpy as np +from PIL import Image +import warnings + + +def _pascal_color_map(N=256, normalized=False): + """ + Python implementation of the color map function for the PASCAL VOC data set. + Official Matlab version can be found in the PASCAL VOC devkit + http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit + """ + + def bitget(byteval, idx): + return (byteval & (1 << idx)) != 0 + + dtype = 'float32' if normalized else 'uint8' + cmap = np.zeros((N, 3), dtype=dtype) + for i in range(N): + r = g = b = 0 + c = i + for j in range(8): + r = r | (bitget(c, 0) << 7 - j) + g = g | (bitget(c, 1) << 7 - j) + b = b | (bitget(c, 2) << 7 - j) + c = c >> 3 + + cmap[i] = np.array([r, g, b]) + + cmap = cmap / 255 if normalized else cmap + return cmap + + +def overlay_semantic_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None): + im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int) + if im.shape[:-1] != ann.shape: + raise ValueError('First two dimensions of `im` and `ann` must match') + if im.shape[-1] != 3: + raise ValueError('im must have three channels at the 3 dimension') + + colors = colors or _pascal_color_map() + colors = np.asarray(colors, dtype=np.uint8) + + mask = colors[ann] + fg = im * alpha + (1 - alpha) * mask + + img = im.copy() + img[ann > 0] = fg[ann > 0] + + if contour_thickness: # pragma: no cover + import cv2 + for obj_id in np.unique(ann[ann > 0]): + contours = cv2.findContours((ann == obj_id).astype( + np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:] + cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(), + contour_thickness) + return img + + +def generate_obj_proposals(davis_root, subset, num_proposals, save_path): + dataset = DAVIS(davis_root, subset=subset, codalab=True) + for seq in dataset.get_sequences(): + save_dir = os.path.join(save_path, seq) + if os.path.exists(save_dir): + continue + all_gt_masks, all_masks_id = dataset.get_all_masks(seq, True) + img_size = all_gt_masks.shape[2:] + num_rows = int(np.ceil(np.sqrt(num_proposals))) + proposals = np.zeros((num_proposals, len(all_masks_id), *img_size)) + height_slices = np.floor(np.arange(0, img_size[0] + 1, img_size[0]/num_rows)).astype(np.uint).tolist() + width_slices = np.floor(np.arange(0, img_size[1] + 1, img_size[1]/num_rows)).astype(np.uint).tolist() + ii = 0 + prev_h, prev_w = 0, 0 + for h in height_slices[1:]: + for w in width_slices[1:]: + proposals[ii, :, prev_h:h, prev_w:w] = 1 + prev_w = w + ii += 1 + if ii == num_proposals: + break + prev_h, prev_w = h, 0 + if ii == num_proposals: + break + + os.makedirs(save_dir, exist_ok=True) + for i, mask_id in enumerate(all_masks_id): + mask = np.sum(proposals[:, i, ...] * np.arange(1, proposals.shape[0] + 1)[:, None, None], axis=0) + save_mask(mask, os.path.join(save_dir, f'{mask_id}.png')) + + +def generate_random_permutation_gt_obj_proposals(davis_root, subset, save_path): + dataset = DAVIS(davis_root, subset=subset, codalab=True) + for seq in dataset.get_sequences(): + gt_masks, all_masks_id = dataset.get_all_masks(seq, True) + obj_swap = np.random.permutation(np.arange(gt_masks.shape[0])) + gt_masks = gt_masks[obj_swap, ...] + save_dir = os.path.join(save_path, seq) + os.makedirs(save_dir, exist_ok=True) + for i, mask_id in enumerate(all_masks_id): + mask = np.sum(gt_masks[:, i, ...] * np.arange(1, gt_masks.shape[0] + 1)[:, None, None], axis=0) + save_mask(mask, os.path.join(save_dir, f'{mask_id}.png')) + + +def color_map(N=256, normalized=False): + def bitget(byteval, idx): + return ((byteval & (1 << idx)) != 0) + + dtype = 'float32' if normalized else 'uint8' + cmap = np.zeros((N, 3), dtype=dtype) + for i in range(N): + r = g = b = 0 + c = i + for j in range(8): + r = r | (bitget(c, 0) << 7-j) + g = g | (bitget(c, 1) << 7-j) + b = b | (bitget(c, 2) << 7-j) + c = c >> 3 + + cmap[i] = np.array([r, g, b]) + + cmap = cmap/255 if normalized else cmap + return cmap + + +def save_mask(mask, img_path): + if np.max(mask) > 255: + raise ValueError('Maximum id pixel value is 255') + mask_img = Image.fromarray(mask.astype(np.uint8)) + mask_img.putpalette(color_map().flatten().tolist()) + mask_img.save(img_path) + + +def db_statistics(per_frame_values): + """ Compute mean,recall and decay from per-frame evaluation. + Arguments: + per_frame_values (ndarray): per-frame evaluation + + Returns: + M,O,D (float,float,float): + return evaluation statistics: mean,recall,decay. + """ + + # strip off nan values + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + M = np.nanmean(per_frame_values) + O = np.nanmean(per_frame_values > 0.5) + + N_bins = 4 + ids = np.round(np.linspace(1, len(per_frame_values), N_bins + 1) + 1e-10) - 1 + ids = ids.astype(np.uint8) + + D_bins = [per_frame_values[ids[i]:ids[i + 1] + 1] for i in range(0, 4)] + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3]) + + return M, O, D + + +def list_files(dir, extension=".png"): + return [os.path.splitext(file_)[0] for file_ in os.listdir(dir) if file_.endswith(extension)] + + +def force_symlink(file1, file2): + try: + os.symlink(file1, file2) + except OSError as e: + if e.errno == errno.EEXIST: + os.remove(file2) + os.symlink(file1, file2) diff --git a/dynamic_predictor/davis/motion_mask.py b/dynamic_predictor/davis/motion_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..c70d2be9370525bae59f1a0f552accfdcb3fc91f --- /dev/null +++ b/dynamic_predictor/davis/motion_mask.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +import os +import sys +from time import time +import argparse + +import numpy as np +import pandas as pd +from davis2017.evaluation import MaskEvaluation +''' +python motion_mask.py --label_path /home/remote/data/sintel/training/dynamic_label_perfect --results_path /home/remote/project/DyGS/InstantSplat/baselines/3dgs/sintel + +python motion_mask.py --label_path /home/remote/data/sintel/training/dynamic_label_perfect --results_path /home/remote/project/DyGS/InstantSplat/data/sintel_pose_dec8_baseline + +''' +''' + +python /home/kai/monster/monst3r/main/evaluation/sintel/motion_mask.py --label_path /home/kai/monster/monst3r/main/data/sintel/training/dynamic_label_perfect --results_path /home/kai/monster/github/monst3r/results/sintel_pose + + J&F-Mean J-Mean J-Recall J-Decay F-Mean F-Recall F-Decay + 0.414534 0.371022 0.337043 0.0 0.458046 0.437202 0.0 + + python /home/kai/monster/monst3r/main/evaluation/sintel/motion_mask.py --label_path /home/kai/monster/monst3r/main/data/sintel/training/dynamic_label_perfect --results_path /home/kai/monster/monst3r/monst3r_assets/results/EqMSeg_fix_monst3r/pred_mask_avg_pred1 + J&F-Mean J-Mean J-Recall J-Decay F-Mean F-Recall F-Decay + 0.558321 0.593482 0.689984 0.0 0.52316 0.529412 0.0 + +python /home/kai/monster/monst3r/main/evaluation/sintel/motion_mask.py --label_path /home/kai/monster/monst3r/main/data/sintel/training/dynamic_label_perfect --results_path /home/kai/monster/monst3r/monst3r_assets/results/EqMSeg_fix_encoder/50 + J&F-Mean J-Mean J-Recall J-Decay F-Mean F-Recall F-Decay + 0.570488 0.594178 0.629571 0.0 0.546798 0.54213 0.0 + + + + +python evaluation/sintel/motion_mask.py --label_path /home/kai/monster/monst3r/data/sintel/training/dynamic_label_perfect --results_path results/sintel_pose_123456789_from_monst3r_lrx0.2/dynamic_mask_nn + + J&F-Mean J-Mean J-Recall J-Decay F-Mean F-Recall F-Decay + 0.352903 0.378218 0.443561 0.0 0.327589 0.340223 0.0 + +python evaluation/sintel/motion_mask.py --label_path /home/kai/monster/monst3r/data/sintel/training/dynamic_label_perfect --results_path results/sintel_pose_123456789_from_monst3r_lrx0.2/dynamic_mask_raft + +--------------------------- Global results --------------------------- + J&F-Mean J-Mean J-Recall J-Decay F-Mean F-Recall F-Decay + 0.362136 0.309305 0.252782 0.0 0.414966 0.424483 0.0 + +''' + + +seq_list = ["alley_2", "ambush_4", "ambush_5", "ambush_6", "cave_2", "cave_4", "market_2", + "market_5", "market_6", "shaman_3", "sleeping_1", "sleeping_2", "temple_2", "temple_3"] + +time_start = time() +parser = argparse.ArgumentParser() +parser.add_argument('--label_path', type=str, help='Subset to evaluate the results', default='all') +parser.add_argument('--results_path', type=str, help='Subset to evaluate the results', default='all') +args, _ = parser.parse_known_args() + + +csv_name_global = f'global_results.csv' +csv_name_per_sequence = f'per-sequence_results.csv' + +# Check if the method has been evaluated before, if so read the results, otherwise compute the results +csv_name_global_path = os.path.join(args.results_path, csv_name_global) +csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence) + +print(f'Evaluating sequences...') +# Create dataset and evaluate +dataset_eval = MaskEvaluation(root=args.label_path, sequences=seq_list) +metrics_res = dataset_eval.evaluate(args.results_path) +J, F = metrics_res['J'], metrics_res['F'] + +# Generate dataframe for the general results +g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay'] +final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. +g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]), + np.mean(F["D"])]) +g_res = np.reshape(g_res, [1, len(g_res)]) +table_g = pd.DataFrame(data=g_res, columns=g_measures) +with open(csv_name_global_path, 'w') as f: + table_g.to_csv(f, index=False, float_format="%.3f") +print(f'Global results saved in {csv_name_global_path}') + +# Generate a dataframe for the per sequence results +seq_names = list(J['M_per_object'].keys()) +seq_measures = ['Sequence', 'J-Mean', 'F-Mean'] +J_per_object = [J['M_per_object'][x] for x in seq_names] +F_per_object = [F['M_per_object'][x] for x in seq_names] +table_seq = pd.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures) +with open(csv_name_per_sequence_path, 'w') as f: + table_seq.to_csv(f, index=False, float_format="%.3f") +print(f'Per-sequence results saved in {csv_name_per_sequence_path}') + +# Print the results +sys.stdout.write(f"--------------------------- Global results ---------------------------\n") +print(table_g.to_string(index=False)) +# sys.stdout.write(f"\n---------- Per sequence results ----------\n") +# print(table_seq.to_string(index=False)) +total_time = time() - time_start +sys.stdout.write('\nTotal time:' + str(total_time)) diff --git a/dynamic_predictor/davis/motion_mask_davis.py b/dynamic_predictor/davis/motion_mask_davis.py new file mode 100644 index 0000000000000000000000000000000000000000..e0f733ca8fbcf7ab6424cac547067eb100eed3de --- /dev/null +++ b/dynamic_predictor/davis/motion_mask_davis.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +import os +import sys +from time import time +import argparse + +import numpy as np +import pandas as pd +from davis2017.evaluation import MaskEvaluation +''' +python motion_mask_davis.py --label_path /home/remote/main/data/davis/DAVIS/Annotations/480p --results_path /home/remote/project/DyGS/InstantSplat/data/davis + +python motion_mask_davis.py --label_path /home/remote/main/data/davis/DAVIS/Annotations/480p --results_path /home/remote/project/DyGS/InstantSplat/baselines/3dgs/davis + +''' + + + + +seq_list = ['soapbox', 'camel', 'motocross-jump', 'dog', 'car-shadow', 'blackswan', 'horsejump-high', 'parkour'] + +time_start = time() +parser = argparse.ArgumentParser() +parser.add_argument('--label_path', type=str, help='Subset to evaluate the results', default='all') +parser.add_argument('--results_path', type=str, help='Subset to evaluate the results', default='all') +args, _ = parser.parse_known_args() + + +csv_name_global = f'global_results.csv' +csv_name_per_sequence = f'per-sequence_results.csv' + +# Check if the method has been evaluated before, if so read the results, otherwise compute the results +csv_name_global_path = os.path.join(args.results_path, csv_name_global) +csv_name_per_sequence_path = os.path.join(args.results_path, csv_name_per_sequence) + +print(f'Evaluating sequences...') +# Create dataset and evaluate +dataset_eval = MaskEvaluation(root=args.label_path, sequences=seq_list) +metrics_res = dataset_eval.evaluate(args.results_path) +J, F = metrics_res['J'], metrics_res['F'] + +# Generate dataframe for the general results +g_measures = ['J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay'] +final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2. +g_res = np.array([final_mean, np.mean(J["M"]), np.mean(J["R"]), np.mean(J["D"]), np.mean(F["M"]), np.mean(F["R"]), + np.mean(F["D"])]) +g_res = np.reshape(g_res, [1, len(g_res)]) +table_g = pd.DataFrame(data=g_res, columns=g_measures) +with open(csv_name_global_path, 'w') as f: + table_g.to_csv(f, index=False, float_format="%.3f") +print(f'Global results saved in {csv_name_global_path}') + +# Generate a dataframe for the per sequence results +seq_names = list(J['M_per_object'].keys()) +seq_measures = ['Sequence', 'J-Mean', 'F-Mean'] +J_per_object = [J['M_per_object'][x] for x in seq_names] +F_per_object = [F['M_per_object'][x] for x in seq_names] +table_seq = pd.DataFrame(data=list(zip(seq_names, J_per_object, F_per_object)), columns=seq_measures) +with open(csv_name_per_sequence_path, 'w') as f: + table_seq.to_csv(f, index=False, float_format="%.3f") +print(f'Per-sequence results saved in {csv_name_per_sequence_path}') + +# Print the results +sys.stdout.write(f"--------------------------- Global results ---------------------------\n") +print(table_g.to_string(index=False)) +# sys.stdout.write(f"\n---------- Per sequence results ----------\n") +# print(table_seq.to_string(index=False)) +total_time = time() - time_start +sys.stdout.write('\nTotal time:' + str(total_time)) diff --git a/dynamic_predictor/download_ckpt.sh b/dynamic_predictor/download_ckpt.sh new file mode 100644 index 0000000000000000000000000000000000000000..b724d2bf0be42256976714384af7250d4fe2f533 --- /dev/null +++ b/dynamic_predictor/download_ckpt.sh @@ -0,0 +1,11 @@ +# mkdir -p ./checkpoints/ +# gdown --fuzzy https://drive.google.com/file/d/1Z1jO_JmfZj0z3bgMvCwqfUhyZ1bIbc9E/view?usp=sharing -O ../checkpoints/ +# THE original dust3r ckpt +# wget https://download.europe.naverlabs.com/ComputerVision/DUSt3R/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth -P ../checkpoints/ + +# sea-raft ckpt +cd ./third_party/RAFT +wget https://www.dropbox.com/s/4j4z58wuv8o0mfz/models.zip +unzip models.zip +rm models.zip +gdown --fuzzy https://drive.google.com/file/d/1a0C5FTdhjM4rKrfXiGhec7eq2YM141lu/view?usp=drive_link -O models/ diff --git a/dynamic_predictor/dust3r/__init__.py b/dynamic_predictor/dust3r/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a32692113d830ddc4af4e6ed608f222fbe062e6e --- /dev/null +++ b/dynamic_predictor/dust3r/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). diff --git a/dynamic_predictor/dust3r/cloud_opt/__init__.py b/dynamic_predictor/dust3r/cloud_opt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72ba20831e09a5e833a1521d5d199b766a1776a8 --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/__init__.py @@ -0,0 +1,30 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# global alignment optimization wrapper function +# -------------------------------------------------------- +from enum import Enum + +from .optimizer import PointCloudOptimizer +from .modular_optimizer import ModularPointCloudOptimizer + + +class GlobalAlignerMode(Enum): + PointCloudOptimizer = "PointCloudOptimizer" + ModularPointCloudOptimizer = "ModularPointCloudOptimizer" + PairViewer = "PairViewer" + + +def global_aligner(dust3r_output, device, mode=GlobalAlignerMode.PointCloudOptimizer, **optim_kw): + # extract all inputs + view1, view2, pred1, pred2 = [dust3r_output[k] for k in 'view1 view2 pred1 pred2'.split()] + # build the optimizer + if mode == GlobalAlignerMode.PointCloudOptimizer: + net = PointCloudOptimizer(view1, view2, pred1, pred2, **optim_kw).to(device) + elif mode == GlobalAlignerMode.ModularPointCloudOptimizer: + net = ModularPointCloudOptimizer(view1, view2, pred1, pred2, **optim_kw).to(device) + else: + raise NotImplementedError(f'Unknown mode {mode}') + + return net diff --git a/dynamic_predictor/dust3r/cloud_opt/base_opt.py b/dynamic_predictor/dust3r/cloud_opt/base_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..cc16264f3d25735c089a2f5af7445cf74f798c85 --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/base_opt.py @@ -0,0 +1,620 @@ +# -------------------------------------------------------- +# Base class for the global alignement procedure +# -------------------------------------------------------- +from copy import deepcopy +import cv2 + +import numpy as np +import torch +import torch.nn as nn +import roma +from copy import deepcopy +import tqdm + +from dust3r.utils.geometry import inv, geotrf +from dust3r.utils.device import to_numpy +from dust3r.utils.image import rgb +from dust3r.viz import SceneViz, segment_sky, auto_cam_size +from dust3r.optim_factory import adjust_learning_rate_by_lr + +from dust3r.cloud_opt.commons import (edge_str, ALL_DISTS, NoGradParamDict, get_imshapes, signed_expm1, signed_log1p, + cosine_schedule, linear_schedule, cycled_linear_schedule, get_conf_trf) +import dust3r.cloud_opt.init_im_poses as init_fun +from scipy.spatial.transform import Rotation +from dust3r.utils.vo_eval import save_trajectory_tum_format +import os +import matplotlib.pyplot as plt +from PIL import Image + +def c2w_to_tumpose(c2w): + """ + Convert a camera-to-world matrix to a tuple of translation and rotation + + input: c2w: 4x4 matrix + output: tuple of translation and rotation (x y z qw qx qy qz) + """ + # convert input to numpy + c2w = to_numpy(c2w) + xyz = c2w[:3, -1] + rot = Rotation.from_matrix(c2w[:3, :3]) + qx, qy, qz, qw = rot.as_quat() + tum_pose = np.concatenate([xyz, [qw, qx, qy, qz]]) + return tum_pose + +class BasePCOptimizer (nn.Module): + """ Optimize a global scene, given a list of pairwise observations. + Graph node: images + Graph edges: observations = (pred1, pred2) + """ + + def __init__(self, *args, **kwargs): + if len(args) == 1 and len(kwargs) == 0: + other = deepcopy(args[0]) + attrs = '''edges is_symmetrized dist n_imgs pred_i pred_j imshapes + min_conf_thr conf_thr conf_i conf_j im_conf + base_scale norm_pw_scale POSE_DIM pw_poses + pw_adaptors pw_adaptors has_im_poses rand_pose imgs verbose'''.split() + self.__dict__.update({k: other[k] for k in attrs}) + else: + self._init_from_views(*args, **kwargs) + + def _init_from_views(self, view1, view2, pred1, pred2, + dist='l1', + conf='log', + min_conf_thr=3, + thr_for_init_conf=False, + base_scale=0.5, + use_gt_mask = False, + use_pred_mask = False, + pred_motion_mask_thre = 0.5, + allow_pw_adaptors=False, + pw_break=20, + rand_pose=torch.randn, + empty_cache=False, + verbose=True): + super().__init__() + if not isinstance(view1['idx'], list): + view1['idx'] = view1['idx'].tolist() + if not isinstance(view2['idx'], list): + view2['idx'] = view2['idx'].tolist() + self.edges = [(int(i), int(j)) for i, j in zip(view1['idx'], view2['idx'])] + self.is_symmetrized = set(self.edges) == {(j, i) for i, j in self.edges} + self.dist = ALL_DISTS[dist] + self.verbose = verbose + self.empty_cache = empty_cache + self.n_imgs = self._check_edges() + + self.use_self_mask = not (use_gt_mask or use_pred_mask) + self.pred_motion_mask_thre = pred_motion_mask_thre + + self.i_count = {} + self.j_count = {} + for i, j in zip(view1['idx'], view2['idx']): + self.i_count[i] = self.i_count.get(i, 0) + 1 + self.j_count[j] = self.j_count.get(j, 0) + 1 + + # input data + pred1_pts = pred1['pts3d'] + pred2_pts = pred2['pts3d_in_other_view'] + self.pred_i = NoGradParamDict({ij: pred1_pts[n] for n, ij in enumerate(self.str_edges)}) + self.pred_j = NoGradParamDict({ij: pred2_pts[n] for n, ij in enumerate(self.str_edges)}) + self.imshapes = get_imshapes(self.edges, pred1_pts, pred2_pts) + + # work in log-scale with conf + pred1_conf = pred1['conf'] # (Number of image_pairs, H, W) + pred2_conf = pred2['conf'] # (Number of image_pairs, H, W) + self.min_conf_thr = min_conf_thr + self.thr_for_init_conf = thr_for_init_conf + self.conf_trf = get_conf_trf(conf) + + self.conf_i = NoGradParamDict({ij: pred1_conf[n] for n, ij in enumerate(self.str_edges)}) + self.conf_j = NoGradParamDict({ij: pred2_conf[n] for n, ij in enumerate(self.str_edges)}) + self.im_conf = self._compute_img_conf(pred1_conf, pred2_conf) + for i in range(len(self.im_conf)): + # self.im_conf[i].requires_grad = True + self.im_conf[i].requires_grad = False + + self.init_conf_maps = [c.clone() for c in self.im_conf] + + if use_pred_mask and not use_gt_mask: + aggreagte_mode = 'avg' + # print("Aggreagating predicted dynamic masks, mode: ", aggreagte_mode) + # compute image dynamic masks + pred_1_mmask = pred1['dynamic_mask'] + self.mmask_n = {nm: pred_1_mmask[i] for i, nm in enumerate(self.str_edges)} + self.im_dyna = self._compute_img_mmask(pred_1_mmask, reduce = aggreagte_mode) + self.im_dyna_avg = self._compute_img_mmask(pred_1_mmask, reduce = 'avg') + self.im_dyna_max = self._compute_img_mmask(pred_1_mmask, reduce = 'max') + + + + # pairwise pose parameters + self.base_scale = base_scale + self.norm_pw_scale = True + self.pw_break = pw_break + self.POSE_DIM = 7 + self.pw_poses = nn.Parameter(rand_pose((self.n_edges, 1+self.POSE_DIM))) # pairwise poses + self.pw_adaptors = nn.Parameter(torch.zeros((self.n_edges, 2))) # slight xy/z adaptation + self.pw_adaptors.requires_grad_(allow_pw_adaptors) + self.has_im_poses = False + self.rand_pose = rand_pose + + # possibly store images, camera_pose, instance for show_pointcloud + self.imgs = None + if 'img' in view1 and 'img' in view2: + imgs = [torch.zeros((3,)+hw) for hw in self.imshapes] + for v in range(len(self.edges)): + idx = view1['idx'][v] + imgs[idx] = view1['img'][v] + idx = view2['idx'][v] + imgs[idx] = view2['img'][v] + self.imgs = rgb(imgs) + + dynamic_masks = [torch.zeros(hw) for hw in self.imshapes] + for v in range(len(self.edges)): + idx = view1['idx'][v] + dynamic_masks[idx] = self.im_dyna[idx] > self.pred_motion_mask_thre + idx = view2['idx'][v] + dynamic_masks[idx] = self.im_dyna[idx] > self.pred_motion_mask_thre + self.dynamic_masks = dynamic_masks + + # if use_gt_mask: + # if 'dynamic_mask' in view1 and 'dynamic_mask' in view2: + # dynamic_masks = [torch.zeros(hw) for hw in self.imshapes] + # for v in range(len(self.edges)): + # idx = view1['idx'][v] + # dynamic_masks[idx] = view1['dynamic_mask'][v] + # idx = view2['idx'][v] + # dynamic_masks[idx] = view2['dynamic_mask'][v] + # self.dynamic_masks = dynamic_masks + + + self.camera_poses = None + if 'camera_pose' in view1 and 'camera_pose' in view2: + camera_poses = [torch.zeros((4, 4)) for _ in range(self.n_imgs)] + for v in range(len(self.edges)): + idx = view1['idx'][v] + camera_poses[idx] = view1['camera_pose'][v] + idx = view2['idx'][v] + camera_poses[idx] = view2['camera_pose'][v] + self.camera_poses = camera_poses + + self.img_pathes = None + if 'instance' in view1 and 'instance' in view2: + img_pathes = ['' for _ in range(self.n_imgs)] + for v in range(len(self.edges)): + idx = view1['idx'][v] + img_pathes[idx] = view1['instance'][v] + idx = view2['idx'][v] + img_pathes[idx] = view2['instance'][v] + self.img_pathes = img_pathes + + @property + def n_edges(self): + return len(self.edges) + + @property + def str_edges(self): + return [edge_str(i, j) for i, j in self.edges] + + @property + def imsizes(self): + return [(w, h) for h, w in self.imshapes] + + @property + def device(self): + return next(iter(self.parameters())).device + + def state_dict(self, trainable=True): + all_params = super().state_dict() + return {k: v for k, v in all_params.items() if k.startswith(('_', 'pred_i.', 'pred_j.', 'conf_i.', 'conf_j.')) != trainable} + + def load_state_dict(self, data): + return super().load_state_dict(self.state_dict(trainable=False) | data) + + def _check_edges(self): + indices = sorted({i for edge in self.edges for i in edge}) + assert indices == list(range(len(indices))), 'bad pair indices: missing values ' + return len(indices) + + @torch.no_grad() + def _compute_img_mmask(self, pred1_mmask, reduce = 'avg'): + im_dyna = [torch.zeros(hw, device=self.device) for hw in self.imshapes] + if reduce == 'avg': + for e, (i, j) in enumerate(self.edges): + im_dyna[i] = torch.add(im_dyna[i], pred1_mmask[e]/self.i_count[i]) + # im_dyna[j] = torch.add(im_dyna[j], pred2_mmask[e]/self.j_count[j]) + if reduce == 'max': + for e, (i, j) in enumerate(self.edges): + im_dyna[i] = torch.maximum(im_dyna[i], pred1_mmask[e]) + # im_dyna[j] = torch.maximum(im_dyna[j], pred2_mmask[e]) + return im_dyna + + @torch.no_grad() + def _compute_img_conf(self, pred1_conf, pred2_conf): + im_conf = nn.ParameterList([torch.zeros(hw, device=self.device) for hw in self.imshapes]) + for e, (i, j) in enumerate(self.edges): + im_conf[i] = torch.maximum(im_conf[i], pred1_conf[e]) + im_conf[j] = torch.maximum(im_conf[j], pred2_conf[e]) + return im_conf + + def get_adaptors(self): + adapt = self.pw_adaptors + adapt = torch.cat((adapt[:, 0:1], adapt), dim=-1) # (scale_xy, scale_xy, scale_z) + if self.norm_pw_scale: # normalize so that the product == 1 + adapt = adapt - adapt.mean(dim=1, keepdim=True) + return (adapt / self.pw_break).exp() + + def _get_poses(self, poses): + # normalize rotation + Q = poses[:, :4] + T = signed_expm1(poses[:, 4:7]) + RT = roma.RigidUnitQuat(Q, T).normalize().to_homogeneous() + return RT + + def _set_pose(self, poses, idx, R, T=None, scale=None, force=False): + # all poses == cam-to-world + pose = poses[idx] + if not (pose.requires_grad or force): + return pose + + if R.shape == (4, 4): + assert T is None + T = R[:3, 3] + R = R[:3, :3] + + if R is not None: + pose.data[0:4] = roma.rotmat_to_unitquat(R) + if T is not None: + pose.data[4:7] = signed_log1p(T / (scale or 1)) # translation is function of scale + + if scale is not None: + assert poses.shape[-1] in (8, 13) + pose.data[-1] = np.log(float(scale)) + return pose + + def get_pw_norm_scale_factor(self): + if self.norm_pw_scale: + # normalize scales so that things cannot go south + # we want that exp(scale) ~= self.base_scale + return (np.log(self.base_scale) - self.pw_poses[:, -1].mean()).exp() + else: + return 1 # don't norm scale for known poses + + def get_pw_scale(self): + scale = self.pw_poses[:, -1].exp() # (n_edges,) + scale = scale * self.get_pw_norm_scale_factor() + return scale + + def get_pw_poses(self): # cam to world + RT = self._get_poses(self.pw_poses) + scaled_RT = RT.clone() + scaled_RT[:, :3] *= self.get_pw_scale().view(-1, 1, 1) # scale the rotation AND translation + return scaled_RT + + def get_masks(self): + if self.thr_for_init_conf: + return [(conf > self.min_conf_thr) for conf in self.init_conf_maps] + else: + return [(conf > self.min_conf_thr) for conf in self.im_conf] + + def depth_to_pts3d(self): + raise NotImplementedError() + + def get_pts3d(self, raw=False, **kwargs): + res = self.depth_to_pts3d(**kwargs) + if not raw: + res = [dm[:h*w].view(h, w, 3) for dm, (h, w) in zip(res, self.imshapes)] + return res + + def _set_focal(self, idx, focal, force=False): + raise NotImplementedError() + + def get_focals(self): + raise NotImplementedError() + + def get_known_focal_mask(self): + raise NotImplementedError() + + def get_principal_points(self): + raise NotImplementedError() + + def get_conf(self, mode=None): + trf = self.conf_trf if mode is None else get_conf_trf(mode) + return [trf(c) for c in self.im_conf] + + def get_init_conf(self, mode=None): + trf = self.conf_trf if mode is None else get_conf_trf(mode) + return [trf(c) for c in self.init_conf_maps] + + def get_im_poses(self): + raise NotImplementedError() + + def _set_depthmap(self, idx, depth, force=False): + raise NotImplementedError() + + def get_depthmaps(self, raw=False): + raise NotImplementedError() + + def clean_pointcloud(self, **kw): + cams = inv(self.get_im_poses()) + K = self.get_intrinsics() + depthmaps = self.get_depthmaps() + all_pts3d = self.get_pts3d() + + new_im_confs = clean_pointcloud(self.im_conf, K, cams, depthmaps, all_pts3d, **kw) + + for i, new_conf in enumerate(new_im_confs): + self.im_conf[i].data[:] = new_conf + return self + + def get_tum_poses(self): + poses = self.get_im_poses() + tt = np.arange(len(poses)).astype(float) + tum_poses = [c2w_to_tumpose(p) for p in poses] + tum_poses = np.stack(tum_poses, 0) + return [tum_poses, tt] + + def save_tum_poses(self, path): + traj = self.get_tum_poses() + save_trajectory_tum_format(traj, path) + return traj[0] # return the poses + + def save_focals(self, path): + # convert focal to txt + focals = self.get_focals() + np.savetxt(path, focals.detach().cpu().numpy(), fmt='%.6f') + return focals + + def save_intrinsics(self, path): + K_raw = self.get_intrinsics() + K = K_raw.reshape(-1, 9) + np.savetxt(path, K.detach().cpu().numpy(), fmt='%.6f') + return K_raw + + def save_conf_maps(self, path): + conf = self.get_conf() + for i, c in enumerate(conf): + np.save(f'{path}/conf_{i}.npy', c.detach().cpu().numpy()) + return conf + + def save_dyna_maps(self, path): + dyna_avg = self.im_dyna_avg + for i, c in enumerate(dyna_avg): + np.save(f'{path}/dyna_avg_{i}.npy', c.detach().cpu().numpy()) + + dyna_max = self.im_dyna_max + for i, c in enumerate(dyna_max): + np.save(f'{path}/dyna_max_{i}.npy', c.detach().cpu().numpy()) + + + def save_init_conf_maps(self, path): + conf = self.get_init_conf() + for i, c in enumerate(conf): + np.save(f'{path}/init_conf_{i}.npy', c.detach().cpu().numpy()) + return conf + + def save_rgb_imgs(self, path): + imgs = self.imgs + for i, img in enumerate(imgs): + # convert from rgb to bgr + img = img[..., ::-1] + cv2.imwrite(f'{path}/frame_{i:04d}.png', img*255) + return imgs + + def save_dynamic_masks(self, path): + dynamic_masks = self.dynamic_masks if getattr(self, 'sam2_dynamic_masks', None) is None else self.sam2_dynamic_masks + for i, dynamic_mask in enumerate(dynamic_masks): + cv2.imwrite(f'{path}/dynamic_mask_{i}.png', (dynamic_mask * 255).detach().cpu().numpy().astype(np.uint8)) + return dynamic_masks + + def save_depth_maps(self, path): + depth_maps = self.get_depthmaps() + images = [] + + for i, depth_map in enumerate(depth_maps): + # Apply color map to depth map + depth_map_colored = cv2.applyColorMap((depth_map * 255).detach().cpu().numpy().astype(np.uint8), cv2.COLORMAP_JET) + img_path = f'{path}/frame_{(i):04d}.png' + cv2.imwrite(img_path, depth_map_colored) + images.append(Image.open(img_path)) + np.save(f'{path}/frame_{(i):04d}.npy', depth_map.detach().cpu().numpy()) + + images[0].save(f'{path}/_depth_maps.gif', save_all=True, append_images=images[1:], duration=100, loop=0) + + return depth_maps + + def forward(self, ret_details=False): + pw_poses = self.get_pw_poses() # cam-to-world + pw_adapt = self.get_adaptors() + proj_pts3d = self.get_pts3d() + # pre-compute pixel weights + weight_i = {i_j: self.conf_trf(c) for i_j, c in self.conf_i.items()} + weight_j = {i_j: self.conf_trf(c) for i_j, c in self.conf_j.items()} + + loss = 0 + if ret_details: + details = -torch.ones((self.n_imgs, self.n_imgs)) + + for e, (i, j) in enumerate(self.edges): + i_j = edge_str(i, j) + # distance in image i and j + aligned_pred_i = geotrf(pw_poses[e], pw_adapt[e] * self.pred_i[i_j]) + aligned_pred_j = geotrf(pw_poses[e], pw_adapt[e] * self.pred_j[i_j]) + li = self.dist(proj_pts3d[i], aligned_pred_i, weight=weight_i[i_j]).mean() + lj = self.dist(proj_pts3d[j], aligned_pred_j, weight=weight_j[i_j]).mean() + loss = loss + li + lj + + if ret_details: + details[i, j] = li + lj + loss /= self.n_edges # average over all pairs + + if ret_details: + return loss, details + return loss + + @torch.cuda.amp.autocast(enabled=False) + def compute_global_alignment(self, init=None, save_score_path=None, save_score_only=False, niter_PnP=10, **kw): + if init is None: + pass + elif init == 'msp' or init == 'mst': + init_fun.init_minimum_spanning_tree(self, save_score_path=save_score_path, save_score_only=save_score_only, niter_PnP=niter_PnP) + if save_score_only: # if only want the score map + return None + elif init == 'known_poses': + self.preset_pose(known_poses=self.camera_poses, requires_grad=True) + init_fun.init_from_known_poses(self, min_conf_thr=self.min_conf_thr, + niter_PnP=niter_PnP) + else: + raise ValueError(f'bad value for {init=}') + + return global_alignment_loop(self, **kw) + + @torch.no_grad() + def mask_sky(self): + res = deepcopy(self) + for i in range(self.n_imgs): + sky = segment_sky(self.imgs[i]) + res.im_conf[i][sky] = 0 + return res + + def show(self, show_pw_cams=False, show_pw_pts3d=False, cam_size=None, **kw): + viz = SceneViz() + if self.imgs is None: + colors = np.random.randint(0, 256, size=(self.n_imgs, 3)) + colors = list(map(tuple, colors.tolist())) + for n in range(self.n_imgs): + viz.add_pointcloud(self.get_pts3d()[n], colors[n], self.get_masks()[n]) + else: + viz.add_pointcloud(self.get_pts3d(), self.imgs, self.get_masks()) + colors = np.random.randint(256, size=(self.n_imgs, 3)) + + # camera poses + im_poses = to_numpy(self.get_im_poses()) + if cam_size is None: + cam_size = auto_cam_size(im_poses) + viz.add_cameras(im_poses, self.get_focals(), colors=colors, + images=self.imgs, imsizes=self.imsizes, cam_size=cam_size) + if show_pw_cams: + pw_poses = self.get_pw_poses() + viz.add_cameras(pw_poses, color=(192, 0, 192), cam_size=cam_size) + + if show_pw_pts3d: + pts = [geotrf(pw_poses[e], self.pred_i[edge_str(i, j)]) for e, (i, j) in enumerate(self.edges)] + viz.add_pointcloud(pts, (128, 0, 128)) + + viz.show(**kw) + return viz + + +def global_alignment_loop(net, lr=0.01, niter=300, schedule='cosine', lr_min=1e-3, temporal_smoothing_weight=0, depth_map_save_dir=None): + params = [p for p in net.parameters() if p.requires_grad] + if not params: + return net + + verbose = net.verbose + if verbose: + print('Global alignement - optimizing for:') + print([name for name, value in net.named_parameters() if value.requires_grad]) + + lr_base = lr + optimizer = torch.optim.Adam(params, lr=lr, betas=(0.9, 0.9)) + + loss = float('inf') + if verbose: + with tqdm.tqdm(total=niter) as bar: + while bar.n < bar.total: + if bar.n % 500 == 0 and depth_map_save_dir is not None: + if not os.path.exists(depth_map_save_dir): + os.makedirs(depth_map_save_dir) + # visualize the depthmaps + depth_maps = net.get_depthmaps() + for i, depth_map in enumerate(depth_maps): + depth_map_save_path = os.path.join(depth_map_save_dir, f'depthmaps_{i}_iter_{bar.n}.png') + plt.imsave(depth_map_save_path, depth_map.detach().cpu().numpy(), cmap='jet') + print(f"Saved depthmaps at iteration {bar.n} to {depth_map_save_dir}") + loss, lr = global_alignment_iter(net, bar.n, niter, lr_base, lr_min, optimizer, schedule, + temporal_smoothing_weight=temporal_smoothing_weight) + bar.set_postfix_str(f'{lr=:g} loss={loss:g}') + bar.update() + else: + for n in range(niter): + loss, _ = global_alignment_iter(net, n, niter, lr_base, lr_min, optimizer, schedule, + temporal_smoothing_weight=temporal_smoothing_weight) + return loss + + +def global_alignment_iter(net, cur_iter, niter, lr_base, lr_min, optimizer, schedule, temporal_smoothing_weight=0): + t = cur_iter / niter + if schedule == 'cosine': + lr = cosine_schedule(t, lr_base, lr_min) + elif schedule == 'linear': + lr = linear_schedule(t, lr_base, lr_min) + elif schedule.startswith('cycle'): + try: + num_cycles = int(schedule[5:]) + except ValueError: + num_cycles = 2 + lr = cycled_linear_schedule(t, lr_base, lr_min, num_cycles=num_cycles) + else: + raise ValueError(f'bad lr {schedule=}') + + adjust_learning_rate_by_lr(optimizer, lr) + optimizer.zero_grad() + + if net.empty_cache: + torch.cuda.empty_cache() + + loss = net(epoch=cur_iter) + + if net.empty_cache: + torch.cuda.empty_cache() + + loss.backward() + + if net.empty_cache: + torch.cuda.empty_cache() + + optimizer.step() + + return float(loss), lr + + + +@torch.no_grad() +def clean_pointcloud( im_confs, K, cams, depthmaps, all_pts3d, + tol=0.001, bad_conf=0, dbg=()): + """ Method: + 1) express all 3d points in each camera coordinate frame + 2) if they're in front of a depthmap --> then lower their confidence + """ + assert len(im_confs) == len(cams) == len(K) == len(depthmaps) == len(all_pts3d) + assert 0 <= tol < 1 + res = [c.clone() for c in im_confs] + + # reshape appropriately + all_pts3d = [p.view(*c.shape,3) for p,c in zip(all_pts3d, im_confs)] + depthmaps = [d.view(*c.shape) for d,c in zip(depthmaps, im_confs)] + + for i, pts3d in enumerate(all_pts3d): + for j in range(len(all_pts3d)): + if i == j: continue + + # project 3dpts in other view + proj = geotrf(cams[j], pts3d) + proj_depth = proj[:,:,2] + u,v = geotrf(K[j], proj, norm=1, ncol=2).round().long().unbind(-1) + + # check which points are actually in the visible cone + H, W = im_confs[j].shape + msk_i = (proj_depth > 0) & (0 <= u) & (u < W) & (0 <= v) & (v < H) + msk_j = v[msk_i], u[msk_i] + + # find bad points = those in front but less confident + bad_points = (proj_depth[msk_i] < (1-tol) * depthmaps[j][msk_j]) & (res[i][msk_i] < res[j][msk_j]) + + bad_msk_i = msk_i.clone() + bad_msk_i[msk_i] = bad_points + res[i][bad_msk_i] = res[i][bad_msk_i].clip_(max=bad_conf) + + return res \ No newline at end of file diff --git a/dynamic_predictor/dust3r/cloud_opt/camera_estimator.py b/dynamic_predictor/dust3r/cloud_opt/camera_estimator.py new file mode 100644 index 0000000000000000000000000000000000000000..a08057084acee076b400f64f38a476d3b1117479 --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/camera_estimator.py @@ -0,0 +1,64 @@ +import numpy as np +import torch +import torch.nn as nn +import cv2 + +from dust3r.utils.geometry import inv, geotrf, depthmap_to_absolute_camera_coordinates +from dust3r.post_process import estimate_focal_knowing_depth + + +def camera_parameter_estimation(view_n, view_m, pred_n, pred_m, im_conf): + + + # for frame n and m. + # pair_n2m: n->m + conf = float(pred_n['conf'].mean() * pred_m['conf'].mean()) + K, focal = estimate_intrinsic(view_n['true_shape'], pred_n['pts3d'], pred_n['conf']) + + try: + rel_pose_n2m = estimate_extrinsic(view_m['true_shape'], pred_m['pts3d_in_other_view'], im_conf.cpu().numpy(), K) + R_mn, T_mn = rel_pose_n2m[:3, :3], rel_pose_n2m[:3, 3] + except Exception as e: + print(f"Error estimating extrinsic parameters: {e}") + rel_pose_n2m = torch.eye(4).cuda() + R_mn, T_mn = rel_pose_n2m[:3, :3], rel_pose_n2m[:3, 3] + + + # ptcloud is expressed in camera n + + + depth_n, depth_m = pred_n['pts3d'][..., 2], geotrf(inv(rel_pose_n2m), pred_m['pts3d_in_other_view'])[..., 2] + + return conf, K, focal, R_mn, T_mn, depth_n, depth_m + + +def estimate_intrinsic(img_shape, pts3d, conf): + H, W = img_shape + pts3d = pts3d.cpu() + pp = torch.tensor((W/2, H/2)) + focal = float(estimate_focal_knowing_depth(pts3d[None], pp, focal_mode='weiszfeld')) + K = np.float32([(focal, 0, pp[0]), (0, focal, pp[1]), (0, 0, 1)]) + + return K, focal + +def estimate_extrinsic(img_shape, pts3d, conf, K): + min_conf_thr = 3 + + H, W = img_shape + H, W = H.item(), W.item() + # estimate the pose of pts1 in image 2 + pts3d = pts3d.cpu().numpy() + pixels = np.mgrid[:W, :H].T.astype(np.float32) + msk = (conf > min_conf_thr) + + res = cv2.solvePnPRansac(pts3d[msk], pixels[msk], K, None, + iterationsCount=100, reprojectionError=5, flags=cv2.SOLVEPNP_SQPNP) + success, R, T, inliers = res + assert success + + R = cv2.Rodrigues(R)[0] # world to cam + pose = inv(np.r_[np.c_[R, T], [(0, 0, 0, 1)]]) # cam to world + + return torch.from_numpy(pose.astype(np.float32)).cuda() + + diff --git a/dynamic_predictor/dust3r/cloud_opt/commons.py b/dynamic_predictor/dust3r/cloud_opt/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..8ed808dfd1591d7849e1bc3768dfb8c9561a7d8a --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/commons.py @@ -0,0 +1,103 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# utility functions for global alignment +# -------------------------------------------------------- +import torch +import torch.nn as nn +import numpy as np +from scipy.stats import zscore + +def edge_str(i, j): + return f'{i}_{j}' + + +def i_j_ij(ij): + # inputs are (i, j) + return edge_str(*ij), ij + + +def edge_conf(conf_i, conf_j, edge): + + score = float(conf_i[edge].mean() * conf_j[edge].mean()) + + return score + + +def compute_edge_scores(edges, conf_i, conf_j): + score_dict = {(i, j): edge_conf(conf_i, conf_j, e) for e, (i, j) in edges} + + return score_dict + +def NoGradParamDict(x): + assert isinstance(x, dict) + return nn.ParameterDict(x).requires_grad_(False) + + +def get_imshapes(edges, pred_i, pred_j): + n_imgs = max(max(e) for e in edges) + 1 + imshapes = [None] * n_imgs + for e, (i, j) in enumerate(edges): + shape_i = tuple(pred_i[e].shape[0:2]) + shape_j = tuple(pred_j[e].shape[0:2]) + if imshapes[i]: + assert imshapes[i] == shape_i, f'incorrect shape for image {i}' + if imshapes[j]: + assert imshapes[j] == shape_j, f'incorrect shape for image {j}' + imshapes[i] = shape_i + imshapes[j] = shape_j + return imshapes + + +def get_conf_trf(mode): + if mode == 'log': + def conf_trf(x): return x.log() + elif mode == 'sqrt': + def conf_trf(x): return x.sqrt() + elif mode == 'm1': + def conf_trf(x): return x-1 + elif mode in ('id', 'none'): + def conf_trf(x): return x + else: + raise ValueError(f'bad mode for {mode=}') + return conf_trf + + +def l2_dist(a, b, weight): + return ((a - b).square().sum(dim=-1) * weight) + + +def l1_dist(a, b, weight): + return ((a - b).norm(dim=-1) * weight) + + +ALL_DISTS = dict(l1=l1_dist, l2=l2_dist) + + +def signed_log1p(x): + sign = torch.sign(x) + return sign * torch.log1p(torch.abs(x)) + + +def signed_expm1(x): + sign = torch.sign(x) + return sign * torch.expm1(torch.abs(x)) + + +def cosine_schedule(t, lr_start, lr_end): + assert 0 <= t <= 1 + return lr_end + (lr_start - lr_end) * (1+np.cos(t * np.pi))/2 + + +def linear_schedule(t, lr_start, lr_end): + assert 0 <= t <= 1 + return lr_start + (lr_end - lr_start) * t + +def cycled_linear_schedule(t, lr_start, lr_end, num_cycles=2): + assert 0 <= t <= 1 + cycle_t = t * num_cycles + cycle_t = cycle_t - int(cycle_t) + if t == 1: + cycle_t = 1 + return linear_schedule(cycle_t, lr_start, lr_end) \ No newline at end of file diff --git a/dynamic_predictor/dust3r/cloud_opt/init_im_poses.py b/dynamic_predictor/dust3r/cloud_opt/init_im_poses.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d3e8cb1943b0a8a1bd03493e8c154d7f7dc36d --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/init_im_poses.py @@ -0,0 +1,364 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Initialization functions for global alignment +# -------------------------------------------------------- +from functools import cache + +import numpy as np +import scipy.sparse as sp +import torch +import cv2 +import roma +from tqdm import tqdm + +from dust3r.utils.geometry import geotrf, inv, get_med_dist_between_poses +from dust3r.post_process import estimate_focal_knowing_depth +from dust3r.viz import to_numpy + +from dust3r.cloud_opt.commons import edge_str, i_j_ij, compute_edge_scores +import matplotlib.pyplot as plt +import seaborn as sns + +def draw_edge_scores_map(edge_scores, save_path, n_imgs=None): + # Determine the size of the heatmap + if n_imgs is None: + n_imgs = max(max(edge) for edge in edge_scores) + 1 + + # Create a matrix to hold the scores + heatmap_matrix = np.full((n_imgs, n_imgs), np.nan) + + # Populate the matrix with the edge scores + for (i, j), score in edge_scores.items(): + heatmap_matrix[i, j] = score + + # Plotting the heatmap + plt.figure(figsize=(int(5.5*np.log(n_imgs)-2), int((5.5*np.log(n_imgs)-2) * 3 / 4))) + sns.heatmap(heatmap_matrix, annot=True, fmt=".1f", cmap="viridis", cbar=True, annot_kws={"fontsize": int(-4.2*np.log(n_imgs)+22.4)}) + plt.title("Heatmap of Edge Scores") + plt.xlabel("Node") + plt.ylabel("Node") + plt.savefig(save_path) + +@torch.no_grad() +def init_from_known_poses(self, niter_PnP=10, min_conf_thr=3): + device = self.device + + # indices of known poses + nkp, known_poses_msk, known_poses = get_known_poses(self) + # assert nkp == self.n_imgs, 'not all poses are known' + + # get all focals + nkf, _, im_focals = get_known_focals(self) + # assert nkf == self.n_imgs + im_pp = self.get_principal_points() + + best_depthmaps = {} + # init all pairwise poses + for e, (i, j) in enumerate(tqdm(self.edges, disable=not self.verbose)): + i_j = edge_str(i, j) + + # find relative pose for this pair + P1 = torch.eye(4, device=device) + msk = self.conf_i[i_j] > min(min_conf_thr, self.conf_i[i_j].min() - 0.1) + _, P2 = fast_pnp(self.pred_j[i_j], float(im_focals[i].mean()), + pp=im_pp[i], msk=msk, device=device, niter_PnP=niter_PnP) + + # align the two predicted camera with the two gt cameras + s, R, T = align_multiple_poses(torch.stack((P1, P2)), known_poses[[i, j]]) + # normally we have known_poses[i] ~= sRT_to_4x4(s,R,T,device) @ P1 + # and geotrf(sRT_to_4x4(1,R,T,device), s*P2[:3,3]) + self._set_pose(self.pw_poses, e, R, T, scale=s) + + # remember if this is a good depthmap + score = float(self.conf_i[i_j].mean()) + if score > best_depthmaps.get(i, (0,))[0]: + best_depthmaps[i] = score, i_j, s + + # init all image poses + for n in range(self.n_imgs): + # assert known_poses_msk[n] + if n in best_depthmaps: + _, i_j, scale = best_depthmaps[n] + depth = self.pred_i[i_j][:, :, 2] + self._set_depthmap(n, depth * scale) + + +@torch.no_grad() +def init_minimum_spanning_tree(self, save_score_path=None, save_score_only=False, **kw): + """ Init all camera poses (image-wise and pairwise poses) given + an initial set of pairwise estimations. + """ + device = self.device + if save_score_only: + eadge_and_scores = compute_edge_scores(map(i_j_ij, self.edges), self.conf_i, self.conf_j) + draw_edge_scores_map(eadge_and_scores, save_score_path) + return + pts3d, _, im_focals, im_poses = minimum_spanning_tree(self.imshapes, self.edges, + self.pred_i, self.pred_j, self.conf_i, self.conf_j, self.im_conf, self.min_conf_thr, + device, has_im_poses=self.has_im_poses, verbose=self.verbose, save_score_path=save_score_path, + **kw) + + return init_from_pts3d(self, pts3d, im_focals, im_poses) + + +def init_from_pts3d(self, pts3d, im_focals, im_poses): + # init poses + nkp, known_poses_msk, known_poses = get_known_poses(self) + if nkp == 1: + raise NotImplementedError("Would be simpler to just align everything afterwards on the single known pose") + elif nkp > 1: + # global rigid SE3 alignment + s, R, T = align_multiple_poses(im_poses[known_poses_msk], known_poses[known_poses_msk]) + trf = sRT_to_4x4(s, R, T, device=known_poses.device) + + # rotate everything + im_poses = trf @ im_poses + im_poses[:, :3, :3] /= s # undo scaling on the rotation part + for img_pts3d in pts3d: + img_pts3d[:] = geotrf(trf, img_pts3d) + else: pass # no known poses + + # set all pairwise poses + for e, (i, j) in enumerate(self.edges): + i_j = edge_str(i, j) + # compute transform that goes from cam to world + s, R, T = rigid_points_registration(self.pred_i[i_j], pts3d[i], conf=self.conf_i[i_j]) + self._set_pose(self.pw_poses, e, R, T, scale=s) + + # take into account the scale normalization + s_factor = self.get_pw_norm_scale_factor() + im_poses[:, :3, 3] *= s_factor # apply downscaling factor + for img_pts3d in pts3d: + img_pts3d *= s_factor + + # init all image poses + if self.has_im_poses: + for i in range(self.n_imgs): + cam2world = im_poses[i] + depth = geotrf(inv(cam2world), pts3d[i])[..., 2] + self._set_depthmap(i, depth) + self._set_pose(self.im_poses, i, cam2world) + if im_focals[i] is not None: + if not self.shared_focal: + self._set_focal(i, im_focals[i]) + if self.shared_focal: + self._set_focal(0, sum(im_focals) / self.n_imgs) + if self.n_imgs > 2: + self._set_init_depthmap() + + if self.verbose: + with torch.no_grad(): + print(' init loss =', float(self())) + + +def minimum_spanning_tree(imshapes, edges, pred_i, pred_j, conf_i, conf_j, im_conf, min_conf_thr, + device, has_im_poses=True, niter_PnP=10, verbose=True, save_score_path=None): + n_imgs = len(imshapes) + eadge_and_scores = compute_edge_scores(map(i_j_ij, edges), conf_i, conf_j) + sparse_graph = -dict_to_sparse_graph(eadge_and_scores) + msp = sp.csgraph.minimum_spanning_tree(sparse_graph).tocoo() + + # temp variable to store 3d points + pts3d = [None] * len(imshapes) + + todo = sorted(zip(-msp.data, msp.row, msp.col)) # sorted edges + im_poses = [None] * n_imgs + im_focals = [None] * n_imgs + + # init with strongest edge + score, i, j = todo.pop() + # if verbose: + # print(f' init edge ({i}*,{j}*) {score=}') + if save_score_path is not None: + draw_edge_scores_map(eadge_and_scores, save_score_path, n_imgs=n_imgs) + save_tree_path = save_score_path.replace(".png", "_tree.txt") + with open(save_tree_path, "w") as f: + f.write(f'init edge ({i}*,{j}*) {score=}\n') + i_j = edge_str(i, j) + pts3d[i] = pred_i[i_j].clone() # the first one is set to be world coordinate + pts3d[j] = pred_j[i_j].clone() + done = {i, j} + if has_im_poses: + im_poses[i] = torch.eye(4, device=device) + im_focals[i] = estimate_focal(pred_i[i_j]) + + # set initial pointcloud based on pairwise graph + msp_edges = [(i, j)] + while todo: + # each time, predict the next one + score, i, j = todo.pop() + + if im_focals[i] is None: + im_focals[i] = estimate_focal(pred_i[i_j]) + + if i in done: # the first frame is already set, align the second frame with the first frame + # if verbose: + # print(f' init edge ({i},{j}*) {score=}') + if save_score_path is not None: + with open(save_tree_path, "a") as f: + f.write(f'init edge ({i},{j}*) {score=}\n') + assert j not in done + # align pred[i] with pts3d[i], and then set j accordingly + i_j = edge_str(i, j) + s, R, T = rigid_points_registration(pred_i[i_j], pts3d[i], conf=conf_i[i_j]) + trf = sRT_to_4x4(s, R, T, device) + pts3d[j] = geotrf(trf, pred_j[i_j]) + done.add(j) + msp_edges.append((i, j)) + + if has_im_poses and im_poses[i] is None: + im_poses[i] = sRT_to_4x4(1, R, T, device) + + elif j in done: # the second frame is already set, align the first frame with the second frame + # if verbose: + # print(f' init edge ({i}*,{j}) {score=}') + if save_score_path is not None: + with open(save_tree_path, "a") as f: + f.write(f'init edge ({i}*,{j}) {score=}\n') + assert i not in done + i_j = edge_str(i, j) + s, R, T = rigid_points_registration(pred_j[i_j], pts3d[j], conf=conf_j[i_j]) + trf = sRT_to_4x4(s, R, T, device) + pts3d[i] = geotrf(trf, pred_i[i_j]) + done.add(i) + msp_edges.append((i, j)) + + if has_im_poses and im_poses[i] is None: + im_poses[i] = sRT_to_4x4(1, R, T, device) + else: + # let's try again later + todo.insert(0, (score, i, j)) + + if has_im_poses: + # complete all missing informations + pair_scores = list(sparse_graph.values()) # already negative scores: less is best + edges_from_best_to_worse = np.array(list(sparse_graph.keys()))[np.argsort(pair_scores)] + for i, j in edges_from_best_to_worse.tolist(): + if im_focals[i] is None: + im_focals[i] = estimate_focal(pred_i[edge_str(i, j)]) + + for i in range(n_imgs): + if im_poses[i] is None: + msk = im_conf[i] > min_conf_thr + res = fast_pnp(pts3d[i], im_focals[i], msk=msk, device=device, niter_PnP=niter_PnP) + if res: + im_focals[i], im_poses[i] = res + if im_poses[i] is None: + im_poses[i] = torch.eye(4, device=device) + im_poses = torch.stack(im_poses) + else: + im_poses = im_focals = None + + return pts3d, msp_edges, im_focals, im_poses + + +def dict_to_sparse_graph(dic): + n_imgs = max(max(e) for e in dic) + 1 + res = sp.dok_array((n_imgs, n_imgs)) + for edge, value in dic.items(): + res[edge] = value + return res + + +def rigid_points_registration(pts1, pts2, conf): + R, T, s = roma.rigid_points_registration( + pts1.reshape(-1, 3), pts2.reshape(-1, 3), weights=conf.ravel(), compute_scaling=True) + return s, R, T # return un-scaled (R, T) + + +def sRT_to_4x4(scale, R, T, device): + trf = torch.eye(4, device=device) + trf[:3, :3] = R * scale + trf[:3, 3] = T.ravel() # doesn't need scaling + return trf + + +def estimate_focal(pts3d_i, pp=None): + if pp is None: + H, W, THREE = pts3d_i.shape + assert THREE == 3 + pp = torch.tensor((W/2, H/2), device=pts3d_i.device) + focal = estimate_focal_knowing_depth(pts3d_i.unsqueeze(0), pp.unsqueeze(0), focal_mode='weiszfeld').ravel() + return float(focal) + + +@cache +def pixel_grid(H, W): + return np.mgrid[:W, :H].T.astype(np.float32) + + +def fast_pnp(pts3d, focal, msk, device, pp=None, niter_PnP=10): + # extract camera poses and focals with RANSAC-PnP + if msk.sum() < 4: + return None # we need at least 4 points for PnP + pts3d, msk = map(to_numpy, (pts3d, msk)) + + H, W, THREE = pts3d.shape + assert THREE == 3 + pixels = pixel_grid(H, W) + + if focal is None: + S = max(W, H) + tentative_focals = np.geomspace(S/2, S*3, 21) + else: + tentative_focals = [focal] + + if pp is None: + pp = (W/2, H/2) + else: + pp = to_numpy(pp) + + best = 0, + for focal in tentative_focals: + K = np.float32([(focal, 0, pp[0]), (0, focal, pp[1]), (0, 0, 1)]) + try: + success, R, T, inliers = cv2.solvePnPRansac(pts3d[msk], pixels[msk], K, None, + iterationsCount=niter_PnP, reprojectionError=5, flags=cv2.SOLVEPNP_SQPNP) + except cv2.error: + continue + + if not success: + continue + + score = len(inliers) + if success and score > best[0]: + best = score, R, T, focal + + if not best[0]: + return None + + _, R, T, best_focal = best + R = cv2.Rodrigues(R)[0] # world to cam + R, T = map(torch.from_numpy, (R, T)) + return best_focal, inv(sRT_to_4x4(1, R, T, device)) # cam to world + + +def get_known_poses(self): + if self.has_im_poses: + known_poses_msk = torch.tensor([not (p.requires_grad) for p in self.im_poses]) + known_poses = self.get_im_poses() + return known_poses_msk.sum(), known_poses_msk, known_poses + else: + return 0, None, None + + +def get_known_focals(self): + if self.has_im_poses: + known_focal_msk = self.get_known_focal_mask() + known_focals = self.get_focals() + return known_focal_msk.sum(), known_focal_msk, known_focals + else: + return 0, None, None + + +def align_multiple_poses(src_poses, target_poses): + N = len(src_poses) + assert src_poses.shape == target_poses.shape == (N, 4, 4) + + def center_and_z(poses): + eps = get_med_dist_between_poses(poses) / 100 + return torch.cat((poses[:, :3, 3], poses[:, :3, 3] + eps*poses[:, :3, 2])) + R, T, s = roma.rigid_points_registration(center_and_z(src_poses), center_and_z(target_poses), compute_scaling=True) + return s, R, T diff --git a/dynamic_predictor/dust3r/cloud_opt/modular_optimizer.py b/dynamic_predictor/dust3r/cloud_opt/modular_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..e43c28fcf2660512b95df85dc85d41f7cf669943 --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/modular_optimizer.py @@ -0,0 +1,147 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Slower implementation of the global alignment that allows to freeze partial poses/intrinsics +# -------------------------------------------------------- +import numpy as np +import torch +import torch.nn as nn + +from dust3r.cloud_opt.base_opt import BasePCOptimizer +from dust3r.utils.geometry import geotrf +from dust3r.utils.device import to_cpu, to_numpy +from dust3r.utils.geometry import depthmap_to_pts3d +# from dust3r.cloud_opt.optimizer import PointCloudOptimizer, tum_to_pose_matrix, ParameterStack, xy_grid + +class ModularPointCloudOptimizer (BasePCOptimizer): + """ Optimize a global scene, given a list of pairwise observations. + Unlike PointCloudOptimizer, you can fix parts of the optimization process (partial poses/intrinsics) + Graph node: images + Graph edges: observations = (pred1, pred2) + """ + + def __init__(self, *args, optimize_pp=False, fx_and_fy=False, focal_brake=20, **kwargs): + super().__init__(*args, **kwargs) + self.has_im_poses = True # by definition of this class + self.focal_brake = focal_brake + + # adding thing to optimize + self.im_depthmaps = nn.ParameterList(torch.randn(H, W)/10-3 for H, W in self.imshapes) # log(depth) + self.im_poses = nn.ParameterList(self.rand_pose(self.POSE_DIM) for _ in range(self.n_imgs)) # camera poses + default_focals = [self.focal_brake * np.log(max(H, W)) for H, W in self.imshapes] + self.im_focals = nn.ParameterList(torch.FloatTensor([f, f] if fx_and_fy else [ + f]) for f in default_focals) # camera intrinsics + self.im_pp = nn.ParameterList(torch.zeros((2,)) for _ in range(self.n_imgs)) # camera intrinsics + self.im_pp.requires_grad_(optimize_pp) + + def preset_pose(self, known_poses, pose_msk=None): # cam-to-world + if isinstance(known_poses, torch.Tensor) and known_poses.ndim == 2: + known_poses = [known_poses] + if known_poses.shape[-1] == 7: # xyz wxyz + known_poses = [tum_to_pose_matrix(pose) for pose in known_poses] + for idx, pose in zip(self._get_msk_indices(pose_msk), known_poses): + if self.verbose: + print(f' (setting pose #{idx} = {pose[:3,3]})') + self._no_grad(self._set_pose(self.im_poses, idx, torch.tensor(pose), force=True)) + + # normalize scale if there's less than 1 known pose + n_known_poses = sum((p.requires_grad is False) for p in self.im_poses) + self.norm_pw_scale = (n_known_poses <= 1) + + def preset_intrinsics(self, known_intrinsics, msk=None): + if isinstance(known_intrinsics, torch.Tensor) and known_intrinsics.ndim == 2: + known_intrinsics = [known_intrinsics] + for K in known_intrinsics: + assert K.shape == (3, 3) + self.preset_focal([K.diagonal()[:2].mean() for K in known_intrinsics], msk) + self.preset_principal_point([K[:2, 2] for K in known_intrinsics], msk) + + def preset_focal(self, known_focals, msk=None): + for idx, focal in zip(self._get_msk_indices(msk), known_focals): + if self.verbose: + print(f' (setting focal #{idx} = {focal})') + self._no_grad(self._set_focal(idx, focal, force=True)) + + def preset_principal_point(self, known_pp, msk=None): + for idx, pp in zip(self._get_msk_indices(msk), known_pp): + if self.verbose: + print(f' (setting principal point #{idx} = {pp})') + self._no_grad(self._set_principal_point(idx, pp, force=True)) + + def _no_grad(self, tensor): + return tensor.requires_grad_(False) + + def _get_msk_indices(self, msk): + if msk is None: + return range(self.n_imgs) + elif isinstance(msk, int): + return [msk] + elif isinstance(msk, (tuple, list)): + return self._get_msk_indices(np.array(msk)) + elif msk.dtype in (bool, torch.bool, np.bool_): + assert len(msk) == self.n_imgs + return np.where(msk)[0] + elif np.issubdtype(msk.dtype, np.integer): + return msk + else: + raise ValueError(f'bad {msk=}') + + def _set_focal(self, idx, focal, force=False): + param = self.im_focals[idx] + if param.requires_grad or force: # can only init a parameter not already initialized + param.data[:] = self.focal_brake * np.log(focal) + return param + + def get_focals(self): + log_focals = torch.stack(list(self.im_focals), dim=0) + return (log_focals / self.focal_brake).exp() + + def _set_principal_point(self, idx, pp, force=False): + param = self.im_pp[idx] + H, W = self.imshapes[idx] + if param.requires_grad or force: # can only init a parameter not already initialized + param.data[:] = to_cpu(to_numpy(pp) - (W/2, H/2)) / 10 + return param + + def get_principal_points(self): + return torch.stack([pp.new((W/2, H/2))+10*pp for pp, (H, W) in zip(self.im_pp, self.imshapes)]) + + def get_intrinsics(self): + K = torch.zeros((self.n_imgs, 3, 3), device=self.device) + focals = self.get_focals().view(self.n_imgs, -1) + K[:, 0, 0] = focals[:, 0] + K[:, 1, 1] = focals[:, -1] + K[:, :2, 2] = self.get_principal_points() + K[:, 2, 2] = 1 + return K + + def get_im_poses(self): # cam to world + cam2world = self._get_poses(torch.stack(list(self.im_poses))) + return cam2world + + def _set_depthmap(self, idx, depth, force=False): + param = self.im_depthmaps[idx] + if param.requires_grad or force: # can only init a parameter not already initialized + param.data[:] = depth.log().nan_to_num(neginf=0) + return param + + def get_depthmaps(self): + return [d.exp() for d in self.im_depthmaps] + + def depth_to_pts3d(self): + # Get depths and projection params if not provided + focals = self.get_focals() + pp = self.get_principal_points() + im_poses = self.get_im_poses() + depth = self.get_depthmaps() + + # convert focal to (1,2,H,W) constant field + def focal_ex(i): return focals[i][..., None, None].expand(1, *focals[i].shape, *self.imshapes[i]) + # get pointmaps in camera frame + rel_ptmaps = [depthmap_to_pts3d(depth[i][None], focal_ex(i), pp=pp[i:i+1])[0] for i in range(im_poses.shape[0])] + # project to world frame + return [geotrf(pose, ptmap) for pose, ptmap in zip(im_poses, rel_ptmaps)] + + def get_pts3d(self): + return self.depth_to_pts3d() diff --git a/dynamic_predictor/dust3r/cloud_opt/motion_mask_from_raft.py b/dynamic_predictor/dust3r/cloud_opt/motion_mask_from_raft.py new file mode 100644 index 0000000000000000000000000000000000000000..059c36f99229f1423daac32d461387532572e532 --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/motion_mask_from_raft.py @@ -0,0 +1,95 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm +import contextlib + +from dust3r.cloud_opt.base_opt import BasePCOptimizer, edge_str +from dust3r.cloud_opt.pair_viewer import PairViewer +from dust3r.cloud_opt.camera_estimator import camera_parameter_estimation +from dust3r.utils.geometry import xy_grid, geotrf, depthmap_to_pts3d +from dust3r.utils.device import to_cpu, to_numpy +from dust3r.utils.goem_opt import DepthBasedWarping, OccMask, WarpImage, depth_regularization_si_weighted, tum_to_pose_matrix +from third_party.raft import load_RAFT +from dust3r.utils.image import rgb + + +def get_flow(imgs1, imgs2): #TODO: test with gt flow + # print('precomputing flow...') + device = 'cuda' if torch.cuda.is_available() else 'cpu' + get_valid_flow_mask = OccMask(th=3.0) + pair_imgs = [np.stack(imgs1), np.stack(imgs1)] + + flow_net = load_RAFT("third_party/RAFT/models/Tartan-C-T-TSKH-spring540x960-M.pth") + flow_net = flow_net.to(device) + flow_net.eval() + + with torch.no_grad(): + chunk_size = 12 + flow_ij = [] + flow_ji = [] + num_pairs = len(pair_imgs[0]) + for i in range(0, num_pairs, chunk_size): + end_idx = min(i + chunk_size, num_pairs) + imgs_ij = [torch.tensor(pair_imgs[0][i:end_idx]).float().to(device), + torch.tensor(pair_imgs[1][i:end_idx]).float().to(device)] + flow_ij.append(flow_net(imgs_ij[0].permute(0, 3, 1, 2) * 255, + imgs_ij[1].permute(0, 3, 1, 2) * 255, + iters=20, test_mode=True)[1]) + flow_ji.append(flow_net(imgs_ij[1].permute(0, 3, 1, 2) * 255, + imgs_ij[0].permute(0, 3, 1, 2) * 255, + iters=20, test_mode=True)[1]) + + flow_ij = torch.cat(flow_ij, dim=0) + flow_ji = torch.cat(flow_ji, dim=0) + valid_mask_i = get_valid_flow_mask(flow_ij, flow_ji) + valid_mask_j = get_valid_flow_mask(flow_ji, flow_ij) + # print('flow precomputed') + # delete the flow net + if flow_net is not None: del flow_net + return flow_ij, flow_ji, valid_mask_i, valid_mask_j + +def get_motion_mask_from_pairs(batch_result, motion_mask_thre=0.35): + view1, view2, pred1, pred2 = batch_result['view1'], batch_result['view2'], batch_result['pred1'], batch_result['pred2'] + imgs1 = [rgb(view1['img'][i]) for i in range(view1['img'].shape[0])] + imgs2 = [rgb(view2['img'][i]) for i in range(view2['img'].shape[0])] + + flow_ij, flow_ji, valid_mask_i, valid_mask_j = get_flow(imgs1, imgs2) + + depth_wrapper = DepthBasedWarping() + print('precomputing self motion mask...') + dynamic_masks = [] + for pair_i in range(view1['img'].shape[0]): + + v1 = {} + v2 = {} + p1 = {} + p2 = {} + + for key in ['true_shape']: + v1[key] = view1[key][pair_i] + v2[key] = view2[key][pair_i] + + for key in pred1.keys(): + p1[key] = pred1[key][pair_i] + + for key in pred2.keys(): + p2[key] = pred2[key][pair_i] + + conf, K, focal, R2, T2, depth_1, depth_2 = camera_parameter_estimation(v1, v2, p1, p2, p2['conf']) + K = torch.tensor(K).cuda()[None] + T2 = T2[None, :, None] + depth_1 = depth_1[None,None] + R2 = R2[None] + ego_flow_1_2, _ = depth_wrapper(torch.eye(3).cuda()[None], torch.zeros_like(T2), R2, T2, 1 / (depth_1 + 1e-6), K, torch.linalg.inv(K)) + + err_map_i = torch.norm(ego_flow_1_2[:, :2, ...] - flow_ij[pair_i], dim=1) + # normalize the error map for each pair + err_map_i = (err_map_i - err_map_i.amin(dim=(1, 2), keepdim=True)) / (err_map_i.amax(dim=(1, 2), keepdim=True) - err_map_i.amin(dim=(1, 2), keepdim=True)) + + + dynamic_masks.append(err_map_i > motion_mask_thre) + + return dynamic_masks + \ No newline at end of file diff --git a/dynamic_predictor/dust3r/cloud_opt/optimizer.py b/dynamic_predictor/dust3r/cloud_opt/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..05e490c1edabcb77dd5e040c79c01bd8a944a734 --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/optimizer.py @@ -0,0 +1,782 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm +import contextlib + +from dust3r.cloud_opt.base_opt import BasePCOptimizer, edge_str +from dust3r.cloud_opt.pair_viewer import PairViewer +from dust3r.utils.geometry import xy_grid, geotrf, depthmap_to_pts3d +from dust3r.utils.device import to_cpu, to_numpy +from dust3r.utils.goem_opt import DepthBasedWarping, OccMask, WarpImage, depth_regularization_si_weighted, tum_to_pose_matrix +from third_party.raft import load_RAFT +# from sam2.build_sam import build_sam2_video_predictor +# sam2_checkpoint = "third_party/sam2/checkpoints/sam2.1_hiera_large.pt" +# model_cfg = "configs/sam2.1/sam2.1_hiera_l.yaml" + +def smooth_L1_loss_fn(estimate, gt, mask, beta=1.0, per_pixel_thre=50.): + loss_raw_shape = F.smooth_l1_loss(estimate*mask, gt*mask, beta=beta, reduction='none') + if per_pixel_thre > 0: + per_pixel_mask = (loss_raw_shape < per_pixel_thre) * mask + else: + per_pixel_mask = mask + return torch.sum(loss_raw_shape * per_pixel_mask) / torch.sum(per_pixel_mask) + +def mse_loss_fn(estimate, gt, mask): + v = torch.sum((estimate*mask-gt*mask)**2) / torch.sum(mask) + return v # , v.item() + +class PointCloudOptimizer(BasePCOptimizer): + """ Optimize a global scene, given a list of pairwise observations. + Graph node: images + Graph edges: observations = (pred1, pred2) + """ + + def __init__(self, *args, optimize_pp=False, focal_break=20, shared_focal=False, flow_loss_fn='smooth_l1', flow_loss_weight=0.0, + depth_regularize_weight=0.0, num_total_iter=300, temporal_smoothing_weight=0, translation_weight=0.1, flow_loss_start_epoch=0.15, flow_loss_thre=50, + sintel_ckpt=False, use_self_mask=False, pxl_thre=50, sam2_mask_refine=True, motion_mask_thre=0.35, batchify=True, **kwargs): + super().__init__(*args, **kwargs) + + self.has_im_poses = True # by definition of this class + self.focal_break = focal_break + self.num_total_iter = num_total_iter + self.temporal_smoothing_weight = temporal_smoothing_weight + self.translation_weight = translation_weight + self.flow_loss_flag = False + self.flow_loss_start_epoch = flow_loss_start_epoch + self.flow_loss_thre = flow_loss_thre + self.optimize_pp = optimize_pp + self.pxl_thre = pxl_thre + self.motion_mask_thre = motion_mask_thre + self.batchify = batchify + + # adding thing to optimize + self.im_depthmaps = nn.ParameterList(torch.randn(H, W)/10-3 for H, W in self.imshapes) # log(depth) + self.im_poses = nn.ParameterList(self.rand_pose(self.POSE_DIM) for _ in range(self.n_imgs)) # camera poses + self.shared_focal = shared_focal + if self.shared_focal: + self.im_focals = nn.ParameterList(torch.FloatTensor( + [self.focal_break*np.log(max(H, W))]) for H, W in self.imshapes[:1]) # camera intrinsics + else: + self.im_focals = nn.ParameterList(torch.FloatTensor( + [self.focal_break*np.log(max(H, W))]) for H, W in self.imshapes) # camera intrinsics + self.im_pp = nn.ParameterList(torch.zeros((2,)) for _ in range(self.n_imgs)) # camera intrinsics + self.im_pp.requires_grad_(optimize_pp) + + self.imshape = self.imshapes[0] + im_areas = [h*w for h, w in self.imshapes] + self.max_area = max(im_areas) + + # adding thing to optimize + if self.batchify: + self.im_depthmaps = ParameterStack(self.im_depthmaps, is_param=True, fill=self.max_area) #(num_imgs, H*W) + self.im_poses = ParameterStack(self.im_poses, is_param=True) + self.im_focals = ParameterStack(self.im_focals, is_param=True) + self.im_pp = ParameterStack(self.im_pp, is_param=True) + self.register_buffer('_pp', torch.tensor([(w/2, h/2) for h, w in self.imshapes])) + self.register_buffer('_grid', ParameterStack( + [xy_grid(W, H, device=self.device) for H, W in self.imshapes], fill=self.max_area)) + # pre-compute pixel weights + self.register_buffer('_weight_i', ParameterStack( + [self.conf_trf(self.conf_i[i_j]) for i_j in self.str_edges], fill=self.max_area)) + self.register_buffer('_weight_j', ParameterStack( + [self.conf_trf(self.conf_j[i_j]) for i_j in self.str_edges], fill=self.max_area)) + # precompute aa + self.register_buffer('_stacked_pred_i', ParameterStack(self.pred_i, self.str_edges, fill=self.max_area)) + self.register_buffer('_stacked_pred_j', ParameterStack(self.pred_j, self.str_edges, fill=self.max_area)) + self.register_buffer('_ei', torch.tensor([i for i, j in self.edges])) + self.register_buffer('_ej', torch.tensor([j for i, j in self.edges])) + self.total_area_i = sum([im_areas[i] for i, j in self.edges]) + self.total_area_j = sum([im_areas[j] for i, j in self.edges]) + + self.depth_wrapper = DepthBasedWarping() + self.backward_warper = WarpImage() + self.depth_regularizer = depth_regularization_si_weighted + if flow_loss_fn == 'smooth_l1': + self.flow_loss_fn = smooth_L1_loss_fn + elif flow_loss_fn == 'mse': + self.low_loss_fn = mse_loss_fn + + self.flow_loss_weight = flow_loss_weight + self.depth_regularize_weight = depth_regularize_weight + if self.flow_loss_weight > 0: + self.flow_ij, self.flow_ji, self.flow_valid_mask_i, self.flow_valid_mask_j = self.get_flow(sintel_ckpt) # (num_pairs, 2, H, W) + if use_self_mask: self.get_motion_mask_from_pairs(*args) + # turn off the gradient for the flow + self.flow_ij.requires_grad_(False) + self.flow_ji.requires_grad_(False) + self.flow_valid_mask_i.requires_grad_(False) + self.flow_valid_mask_j.requires_grad_(False) + if sam2_mask_refine: + with torch.no_grad(): + self.refine_motion_mask_w_sam2() + else: + self.sam2_dynamic_masks = None + + def get_flow(self, sintel_ckpt=False): #TODO: test with gt flow + print('precomputing flow...') + device = 'cuda' if torch.cuda.is_available() else 'cpu' + get_valid_flow_mask = OccMask(th=3.0) + pair_imgs = [np.stack(self.imgs)[self._ei], np.stack(self.imgs)[self._ej]] + + flow_net = load_RAFT() if sintel_ckpt else load_RAFT("third_party/RAFT/models/Tartan-C-T-TSKH-spring540x960-M.pth") + flow_net = flow_net.to(device) + flow_net.eval() + + with torch.no_grad(): + chunk_size = 12 + flow_ij = [] + flow_ji = [] + num_pairs = len(pair_imgs[0]) + for i in tqdm(range(0, num_pairs, chunk_size)): + end_idx = min(i + chunk_size, num_pairs) + imgs_ij = [torch.tensor(pair_imgs[0][i:end_idx]).float().to(device), + torch.tensor(pair_imgs[1][i:end_idx]).float().to(device)] + flow_ij.append(flow_net(imgs_ij[0].permute(0, 3, 1, 2) * 255, + imgs_ij[1].permute(0, 3, 1, 2) * 255, + iters=20, test_mode=True)[1]) + flow_ji.append(flow_net(imgs_ij[1].permute(0, 3, 1, 2) * 255, + imgs_ij[0].permute(0, 3, 1, 2) * 255, + iters=20, test_mode=True)[1]) + + flow_ij = torch.cat(flow_ij, dim=0) + flow_ji = torch.cat(flow_ji, dim=0) + valid_mask_i = get_valid_flow_mask(flow_ij, flow_ji) + valid_mask_j = get_valid_flow_mask(flow_ji, flow_ij) + print('flow precomputed') + # delete the flow net + if flow_net is not None: del flow_net + torch.cuda.empty_cache() + return flow_ij, flow_ji, valid_mask_i, valid_mask_j + + def get_motion_mask_from_pairs(self, view1, view2, pred1, pred2): + assert self.is_symmetrized, 'only support symmetric case' + symmetry_pairs_idx = [(i, i+len(self.edges)//2) for i in range(len(self.edges)//2)] + intrinsics_i = [] + intrinsics_j = [] + R_i = [] + R_j = [] + T_i = [] + T_j = [] + depth_maps_i = [] + depth_maps_j = [] + for i, j in tqdm(symmetry_pairs_idx): + new_view1 = {} + new_view2 = {} + for key in view1.keys(): + if isinstance(view1[key], list): + new_view1[key] = [view1[key][i], view1[key][j]] + new_view2[key] = [view2[key][i], view2[key][j]] + elif isinstance(view1[key], torch.Tensor): + new_view1[key] = torch.stack([view1[key][i], view1[key][j]]) + new_view2[key] = torch.stack([view2[key][i], view2[key][j]]) + new_view1['idx'] = [0, 1] + new_view2['idx'] = [1, 0] + new_pred1 = {} + new_pred2 = {} + for key in pred1.keys(): + if isinstance(pred1[key], list): + new_pred1[key] = [pred1[key][i], pred1[key][j]] + elif isinstance(pred1[key], torch.Tensor): + new_pred1[key] = torch.stack([pred1[key][i], pred1[key][j]]) + for key in pred2.keys(): + if isinstance(pred2[key], list): + new_pred2[key] = [pred2[key][i], pred2[key][j]] + elif isinstance(pred2[key], torch.Tensor): + new_pred2[key] = torch.stack([pred2[key][i], pred2[key][j]]) + pair_viewer = PairViewer(new_view1, new_view2, new_pred1, new_pred2, verbose=False) + intrinsics_i.append(pair_viewer.get_intrinsics()[0]) + intrinsics_j.append(pair_viewer.get_intrinsics()[1]) + R_i.append(pair_viewer.get_im_poses()[0][:3, :3]) + R_j.append(pair_viewer.get_im_poses()[1][:3, :3]) + T_i.append(pair_viewer.get_im_poses()[0][:3, 3:]) + T_j.append(pair_viewer.get_im_poses()[1][:3, 3:]) + depth_maps_i.append(pair_viewer.get_depthmaps()[0]) + depth_maps_j.append(pair_viewer.get_depthmaps()[1]) + + self.intrinsics_i = torch.stack(intrinsics_i).to(self.flow_ij.device) + self.intrinsics_j = torch.stack(intrinsics_j).to(self.flow_ij.device) + self.R_i = torch.stack(R_i).to(self.flow_ij.device) + self.R_j = torch.stack(R_j).to(self.flow_ij.device) + self.T_i = torch.stack(T_i).to(self.flow_ij.device) + self.T_j = torch.stack(T_j).to(self.flow_ij.device) + self.depth_maps_i = torch.stack(depth_maps_i).unsqueeze(1).to(self.flow_ij.device) + self.depth_maps_j = torch.stack(depth_maps_j).unsqueeze(1).to(self.flow_ij.device) + + ego_flow_1_2, _ = self.depth_wrapper(self.R_i, self.T_i, self.R_j, self.T_j, 1 / (self.depth_maps_i + 1e-6), self.intrinsics_j, torch.linalg.inv(self.intrinsics_i)) + ego_flow_2_1, _ = self.depth_wrapper(self.R_j, self.T_j, self.R_i, self.T_i, 1 / (self.depth_maps_j + 1e-6), self.intrinsics_i, torch.linalg.inv(self.intrinsics_j)) + + err_map_i = torch.norm(ego_flow_1_2[:, :2, ...] - self.flow_ij[:len(symmetry_pairs_idx)], dim=1) + err_map_j = torch.norm(ego_flow_2_1[:, :2, ...] - self.flow_ji[:len(symmetry_pairs_idx)], dim=1) + # normalize the error map for each pair + err_map_i = (err_map_i - err_map_i.amin(dim=(1, 2), keepdim=True)) / (err_map_i.amax(dim=(1, 2), keepdim=True) - err_map_i.amin(dim=(1, 2), keepdim=True)) + err_map_j = (err_map_j - err_map_j.amin(dim=(1, 2), keepdim=True)) / (err_map_j.amax(dim=(1, 2), keepdim=True) - err_map_j.amin(dim=(1, 2), keepdim=True)) + self.dynamic_masks = [[] for _ in range(self.n_imgs)] + + for i, j in symmetry_pairs_idx: + i_idx = self._ei[i] + j_idx = self._ej[i] + self.dynamic_masks[i_idx].append(err_map_i[i]) + self.dynamic_masks[j_idx].append(err_map_j[i]) + + for i in range(self.n_imgs): + self.dynamic_masks[i] = torch.stack(self.dynamic_masks[i]).mean(dim=0) > self.motion_mask_thre + + def refine_motion_mask_w_sam2(self): + device = 'cuda' if torch.cuda.is_available() else 'cpu' + + # Save previous TF32 settings + if device == 'cuda': + prev_allow_tf32 = torch.backends.cuda.matmul.allow_tf32 + prev_allow_cudnn_tf32 = torch.backends.cudnn.allow_tf32 + # Enable TF32 for Ampere GPUs + if torch.cuda.get_device_properties(0).major >= 8: + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + + try: + autocast_dtype = torch.bfloat16 if device == 'cuda' else torch.float32 + with torch.autocast(device_type=device, dtype=autocast_dtype): + predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) + frame_tensors = torch.from_numpy(np.array((self.imgs))).permute(0, 3, 1, 2).to(device) + inference_state = predictor.init_state(video_path=frame_tensors) + mask_list = [self.dynamic_masks[i] for i in range(self.n_imgs)] + + ann_obj_id = 1 + self.sam2_dynamic_masks = [[] for _ in range(self.n_imgs)] + + # Process even frames + predictor.reset_state(inference_state) + for idx, mask in enumerate(mask_list): + if idx % 2 == 1: + _, out_obj_ids, out_mask_logits = predictor.add_new_mask( + inference_state, + frame_idx=idx, + obj_id=ann_obj_id, + mask=mask, + ) + video_segments = {} + for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, start_frame_idx=0): + video_segments[out_frame_idx] = { + out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() + for i, out_obj_id in enumerate(out_obj_ids) + } + for out_frame_idx in range(self.n_imgs): + if out_frame_idx % 2 == 0: + self.sam2_dynamic_masks[out_frame_idx] = video_segments[out_frame_idx][ann_obj_id] + + # Process odd frames + predictor.reset_state(inference_state) + for idx, mask in enumerate(mask_list): + if idx % 2 == 0: + _, out_obj_ids, out_mask_logits = predictor.add_new_mask( + inference_state, + frame_idx=idx, + obj_id=ann_obj_id, + mask=mask, + ) + video_segments = {} + for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, start_frame_idx=0): + video_segments[out_frame_idx] = { + out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() + for i, out_obj_id in enumerate(out_obj_ids) + } + for out_frame_idx in range(self.n_imgs): + if out_frame_idx % 2 == 1: + self.sam2_dynamic_masks[out_frame_idx] = video_segments[out_frame_idx][ann_obj_id] + + # Update dynamic masks + for i in range(self.n_imgs): + self.sam2_dynamic_masks[i] = torch.from_numpy(self.sam2_dynamic_masks[i][0]).to(device) + self.dynamic_masks[i] = self.dynamic_masks[i].to(device) + self.dynamic_masks[i] = self.dynamic_masks[i] | self.sam2_dynamic_masks[i] + + # Clean up + del predictor + finally: + # Restore previous TF32 settings + if device == 'cuda': + torch.backends.cuda.matmul.allow_tf32 = prev_allow_tf32 + torch.backends.cudnn.allow_tf32 = prev_allow_cudnn_tf32 + + + def _check_all_imgs_are_selected(self, msk): + self.msk = torch.from_numpy(np.array(msk, dtype=bool)).to(self.device) + assert np.all(self._get_msk_indices(msk) == np.arange(self.n_imgs)), 'incomplete mask!' + pass + + def preset_pose(self, known_poses, pose_msk=None, requires_grad=False): # cam-to-world + self._check_all_imgs_are_selected(pose_msk) + + if isinstance(known_poses, torch.Tensor) and known_poses.ndim == 2: + known_poses = [known_poses] + if known_poses.shape[-1] == 7: # xyz wxyz + known_poses = [tum_to_pose_matrix(pose) for pose in known_poses] + for idx, pose in zip(self._get_msk_indices(pose_msk), known_poses): + if self.verbose: + print(f' (setting pose #{idx} = {pose[:3,3]})') + self._no_grad(self._set_pose(self.im_poses, idx, torch.tensor(pose))) + + # normalize scale if there's less than 1 known pose + n_known_poses = sum((p.requires_grad is False) for p in self.im_poses) + self.norm_pw_scale = (n_known_poses <= 1) + if len(known_poses) == self.n_imgs: + if requires_grad: + self.im_poses.requires_grad_(True) + else: + self.im_poses.requires_grad_(False) + self.norm_pw_scale = False + + def preset_intrinsics(self, known_intrinsics, msk=None): + if isinstance(known_intrinsics, torch.Tensor) and known_intrinsics.ndim == 2: + known_intrinsics = [known_intrinsics] + for K in known_intrinsics: + assert K.shape == (3, 3) + self.preset_focal([K.diagonal()[:2].mean() for K in known_intrinsics], msk) + if self.optimize_pp: + self.preset_principal_point([K[:2, 2] for K in known_intrinsics], msk) + + def preset_focal(self, known_focals, msk=None, requires_grad=False): + self._check_all_imgs_are_selected(msk) + + for idx, focal in zip(self._get_msk_indices(msk), known_focals): + if self.verbose: + print(f' (setting focal #{idx} = {focal})') + self._no_grad(self._set_focal(idx, focal)) + if len(known_focals) == self.n_imgs: + if requires_grad: + self.im_focals.requires_grad_(True) + else: + self.im_focals.requires_grad_(False) + + def preset_principal_point(self, known_pp, msk=None): + self._check_all_imgs_are_selected(msk) + + for idx, pp in zip(self._get_msk_indices(msk), known_pp): + if self.verbose: + print(f' (setting principal point #{idx} = {pp})') + self._no_grad(self._set_principal_point(idx, pp)) + + self.im_pp.requires_grad_(False) + + def _get_msk_indices(self, msk): + if msk is None: + return range(self.n_imgs) + elif isinstance(msk, int): + return [msk] + elif isinstance(msk, (tuple, list)): + return self._get_msk_indices(np.array(msk)) + elif msk.dtype in (bool, torch.bool, np.bool_): + assert len(msk) == self.n_imgs + return np.where(msk)[0] + elif np.issubdtype(msk.dtype, np.integer): + return msk + else: + raise ValueError(f'bad {msk=}') + + def _no_grad(self, tensor): + assert tensor.requires_grad, 'it must be True at this point, otherwise no modification occurs' + + def _set_focal(self, idx, focal, force=False): + param = self.im_focals[idx] + if param.requires_grad or force: # can only init a parameter not already initialized + param.data[:] = self.focal_break * np.log(focal) + return param + + def get_focals(self): + if self.shared_focal: + log_focals = torch.stack([self.im_focals[0]] * self.n_imgs, dim=0) + else: + log_focals = torch.stack(list(self.im_focals), dim=0) + return (log_focals / self.focal_break).exp() + + def get_known_focal_mask(self): + return torch.tensor([not (p.requires_grad) for p in self.im_focals]) + + def _set_principal_point(self, idx, pp, force=False): + param = self.im_pp[idx] + H, W = self.imshapes[idx] + if param.requires_grad or force: # can only init a parameter not already initialized + param.data[:] = to_cpu(to_numpy(pp) - (W/2, H/2)) / 10 + return param + + def get_principal_points_non_batch(self): + return torch.stack([pp.new((W/2, H/2))+10*pp for pp, (H, W) in zip(self.im_pp, self.imshapes)]) + + def get_principal_points_batch(self): + return self._pp + 10 * self.im_pp + + def get_principal_points(self): + if self.batchify: + return self.get_principal_points_batch() + else: + return self.get_principal_points_non_batch() + + def get_intrinsics(self): + K = torch.zeros((self.n_imgs, 3, 3), device=self.device) + focals = self.get_focals().flatten() + K[:, 0, 0] = K[:, 1, 1] = focals + K[:, :2, 2] = self.get_principal_points() + K[:, 2, 2] = 1 + return K + + def get_im_poses_batch(self): # cam to world + cam2world = self._get_poses(self.im_poses) + return cam2world + + def get_im_poses_non_batch(self): # cam to world + cam2world = self._get_poses(torch.stack(list(self.im_poses))) + return cam2world + + def get_im_poses(self): + if self.batchify: + return self.get_im_poses_batch() + else: + return self.get_im_poses_non_batch() + + def _set_depthmap_batch(self, idx, depth, force=False): + depth = _ravel_hw(depth, self.max_area) + + param = self.im_depthmaps[idx] + if param.requires_grad or force: # can only init a parameter not already initialized + param.data[:] = depth.log().nan_to_num(neginf=0) + return param + + def _set_depthmap_non_batch(self, idx, depth, force=False): + param = self.im_depthmaps[idx] + if param.requires_grad or force: # can only init a parameter not already initialized + param.data[:] = depth.log().nan_to_num(neginf=0) + return param + + def _set_depthmap(self, idx, depth, force=False): + if self.batchify: + return self._set_depthmap_batch(idx, depth, force) + else: + return self._set_depthmap_non_batch(idx, depth, force) + + def preset_depthmap(self, known_depthmaps, msk=None, requires_grad=False): + self._check_all_imgs_are_selected(msk) + + for idx, depth in zip(self._get_msk_indices(msk), known_depthmaps): + if self.verbose: + print(f' (setting depthmap #{idx})') + self._no_grad(self._set_depthmap(idx, depth)) + + if len(known_depthmaps) == self.n_imgs: + if requires_grad: + self.im_depthmaps.requires_grad_(True) + else: + self.im_depthmaps.requires_grad_(False) + + def _set_init_depthmap(self): + depth_maps = self.get_depthmaps(raw=True) + self.init_depthmap = [dm.detach().clone() for dm in depth_maps] + + def get_init_depthmaps(self, raw=False): + res = self.init_depthmap + if not raw: + res = [dm[:h*w].view(h, w) for dm, (h, w) in zip(res, self.imshapes)] + return res + + def get_depthmaps_batch(self, raw=False): + res = self.im_depthmaps.exp() + if not raw: + res = [dm[:h*w].view(h, w) for dm, (h, w) in zip(res, self.imshapes)] + return res + + def get_depthmaps_non_batch(self): + return [d.exp() for d in self.im_depthmaps] + + def get_depthmaps(self, raw=False): + if self.batchify: + return self.get_depthmaps_batch(raw) + else: + return self.get_depthmaps_non_batch() + + def depth_to_pts3d(self): + # Get depths and projection params if not provided + focals = self.get_focals() + pp = self.get_principal_points() + im_poses = self.get_im_poses() + depth = self.get_depthmaps(raw=True) + + # get pointmaps in camera frame + rel_ptmaps = _fast_depthmap_to_pts3d(depth, self._grid, focals, pp=pp) + # project to world frame + return geotrf(im_poses, rel_ptmaps) + + def depth_to_pts3d_partial(self): + # Get depths and projection params if not provided + focals = self.get_focals() + pp = self.get_principal_points() + im_poses = self.get_im_poses() + depth = self.get_depthmaps() + + # convert focal to (1,2,H,W) constant field + def focal_ex(i): return focals[i][..., None, None].expand(1, *focals[i].shape, *self.imshapes[i]) + # get pointmaps in camera frame + rel_ptmaps = [depthmap_to_pts3d(depth[i][None], focal_ex(i), pp=pp[i:i+1])[0] for i in range(im_poses.shape[0])] + # project to world frame + return [geotrf(pose, ptmap) for pose, ptmap in zip(im_poses, rel_ptmaps)] + + def get_pts3d_batch(self, raw=False, **kwargs): + res = self.depth_to_pts3d() + if not raw: + res = [dm[:h*w].view(h, w, 3) for dm, (h, w) in zip(res, self.imshapes)] + return res + + def get_pts3d(self, raw=False, **kwargs): + if self.batchify: + return self.get_pts3d_batch(raw, **kwargs) + else: + return self.depth_to_pts3d_partial() + + def forward_batchify(self, epoch=9999): + pw_poses = self.get_pw_poses() # cam-to-world + + pw_adapt = self.get_adaptors().unsqueeze(1) + proj_pts3d = self.get_pts3d(raw=True) + + # rotate pairwise prediction according to pw_poses + aligned_pred_i = geotrf(pw_poses, pw_adapt * self._stacked_pred_i) + aligned_pred_j = geotrf(pw_poses, pw_adapt * self._stacked_pred_j) + + # compute the less + li = self.dist(proj_pts3d[self._ei], aligned_pred_i, weight=self._weight_i).sum() / self.total_area_i + lj = self.dist(proj_pts3d[self._ej], aligned_pred_j, weight=self._weight_j).sum() / self.total_area_j + + # camera temporal loss + if self.temporal_smoothing_weight > 0: + temporal_smoothing_loss = self.relative_pose_loss(self.get_im_poses()[:-1], self.get_im_poses()[1:]).sum() + else: + temporal_smoothing_loss = 0 + + if self.flow_loss_weight > 0 and epoch >= self.num_total_iter * self.flow_loss_start_epoch: # enable flow loss after certain epoch + R_all, T_all = self.get_im_poses()[:,:3].split([3, 1], dim=-1) + R1, T1 = R_all[self._ei], T_all[self._ei] + R2, T2 = R_all[self._ej], T_all[self._ej] + K_all = self.get_intrinsics() + inv_K_all = torch.linalg.inv(K_all) + K_1, inv_K_1 = K_all[self._ei], inv_K_all[self._ei] + K_2, inv_K_2 = K_all[self._ej], inv_K_all[self._ej] + depth_all = torch.stack(self.get_depthmaps(raw=False)).unsqueeze(1) + depth1, depth2 = depth_all[self._ei], depth_all[self._ej] + disp_1, disp_2 = 1 / (depth1 + 1e-6), 1 / (depth2 + 1e-6) + ego_flow_1_2, _ = self.depth_wrapper(R1, T1, R2, T2, disp_1, K_2, inv_K_1) + ego_flow_2_1, _ = self.depth_wrapper(R2, T2, R1, T1, disp_2, K_1, inv_K_2) + dynamic_masks_all = torch.stack(self.dynamic_masks).to(self.device).unsqueeze(1) + dynamic_mask1, dynamic_mask2 = dynamic_masks_all[self._ei], dynamic_masks_all[self._ej] + + flow_loss_i = self.flow_loss_fn(ego_flow_1_2[:, :2, ...], self.flow_ij, ~dynamic_mask1, per_pixel_thre=self.pxl_thre) + flow_loss_j = self.flow_loss_fn(ego_flow_2_1[:, :2, ...], self.flow_ji, ~dynamic_mask2, per_pixel_thre=self.pxl_thre) + flow_loss = flow_loss_i + flow_loss_j + print(f'flow loss: {flow_loss.item()}') + if flow_loss.item() > self.flow_loss_thre and self.flow_loss_thre > 0: + flow_loss = 0 + self.flow_loss_flag = True + else: + flow_loss = 0 + + if self.depth_regularize_weight > 0: + init_depthmaps = torch.stack(self.get_init_depthmaps(raw=False)).unsqueeze(1) + depthmaps = torch.stack(self.get_depthmaps(raw=False)).unsqueeze(1) + dynamic_masks_all = torch.stack(self.dynamic_masks).to(self.device).unsqueeze(1) + depth_prior_loss = self.depth_regularizer(depthmaps, init_depthmaps, dynamic_masks_all) + else: + depth_prior_loss = 0 + + loss = (li + lj) * 1 + self.temporal_smoothing_weight * temporal_smoothing_loss + \ + self.flow_loss_weight * flow_loss + self.depth_regularize_weight * depth_prior_loss + + return loss + + def forward_non_batchify(self, epoch=9999): + + # --(1) Perform the original pairwise 3D consistency loss (pairwise 3D consistency)-- + pw_poses = self.get_pw_poses() # pair-wise poses (or adaptive poses) + pw_adapt = self.get_adaptors() + proj_pts3d = self.get_pts3d() # 3D point clouds for each image + weight_i = {i_j: self.conf_trf(c) for i_j, c in self.conf_i.items()} + weight_j = {i_j: self.conf_trf(c) for i_j, c in self.conf_j.items()} + + loss = 0.0 + for e, (i, j) in enumerate(self.edges): + i_j = edge_str(i, j) + # Transform the pairwise predictions to the world coordinate system + aligned_pred_i = geotrf(pw_poses[e], pw_adapt[e] * self.pred_i[i_j]) + aligned_pred_j = geotrf(pw_poses[e], pw_adapt[e] * self.pred_j[i_j]) + # Compute the distance loss between the projected point clouds and the predictions + li = self.dist(proj_pts3d[i], aligned_pred_i, weight=weight_i[i_j]).mean() + lj = self.dist(proj_pts3d[j], aligned_pred_j, weight=weight_j[i_j]).mean() + loss += (li + lj) + + # Average the loss + loss /= self.n_edges + + # --(2) Add temporal smoothing constraint between adjacent frames (temporal smoothing)-- + temporal_smoothing_loss = 0.0 + if self.temporal_smoothing_weight > 0: + # Get the global poses (4x4) for all images + im_poses = self.get_im_poses() # shape: (n_imgs, 4, 4) + # Stack the relative poses between adjacent frames and use the existing relative_pose_loss function + rel_RT1, rel_RT2 = [], [] + for idx in range(self.n_imgs - 1): + rel_RT1.append(im_poses[idx]) + rel_RT2.append(im_poses[idx + 1]) + if len(rel_RT1) > 0: + rel_RT1 = torch.stack(rel_RT1, dim=0) # shape: (n_imgs-1, 4, 4) + rel_RT2 = torch.stack(rel_RT2, dim=0) + # Compute the pose difference between adjacent frames + temporal_smoothing_loss = self.relative_pose_loss(rel_RT1, rel_RT2).sum() + loss += self.temporal_smoothing_weight * temporal_smoothing_loss + + # --(3) Add flow constraint (flow_loss), similar to forward_batchify-- + flow_loss = 0.0 + if self.flow_loss_weight > 0 and epoch >= self.num_total_iter * self.flow_loss_start_epoch: + # Iterate through each pair of images and compute the depth map and flow comparison + im_poses = self.get_im_poses() # (n_imgs, 4, 4) + K_all = self.get_intrinsics() # (n_imgs, 3, 3) + inv_K_all = torch.linalg.inv(K_all) + depthmaps = self.get_depthmaps(raw=False) # list of depth maps (H, W) + + for e, (i, j) in enumerate(self.edges): + # Get the rotation, translation, and intrinsics for the two frames + R1 = im_poses[i][:3, :3].unsqueeze(0) # shape: (1, 3, 3) + T1 = im_poses[i][:3, 3].unsqueeze(-1).unsqueeze(0) # (1, 3, 1) + R2 = im_poses[j][:3, :3].unsqueeze(0) + T2 = im_poses[j][:3, 3].unsqueeze(-1).unsqueeze(0) + K1 = K_all[i].unsqueeze(0) # (1, 3, 3) + K2 = K_all[j].unsqueeze(0) + inv_K1 = inv_K_all[i].unsqueeze(0) + inv_K2 = inv_K_all[j].unsqueeze(0) + + # Construct disparity: disp = 1/depth + depth1 = depthmaps[i].unsqueeze(0).unsqueeze(1) # (1, 1, H, W) + depth2 = depthmaps[j].unsqueeze(0).unsqueeze(1) + disp_1 = 1.0 / (depth1 + 1e-6) + disp_2 = 1.0 / (depth2 + 1e-6) + + # Compute "ego-motion flow" by projecting using DepthBasedWarping + # Note that DepthBasedWarping expects batch dimension, so add unsqueeze(0) + ego_flow_1_2, _ = self.depth_wrapper(R1, T1, R2, T2, disp_1, K2, inv_K1) + ego_flow_2_1, _ = self.depth_wrapper(R2, T2, R1, T1, disp_2, K1, inv_K2) + + # Get the corresponding dynamic region masks (if any) + dynamic_mask_i = self.dynamic_masks[i].to(self.device) # shape: (H, W) + dynamic_mask_j = self.dynamic_masks[j].to(self.device) + + # When computing flow loss, exclude or ignore dynamic regions + flow_loss_i = self.flow_loss_fn( + ego_flow_1_2[0, :2, ...], # shape: (2, H, W) + self.flow_ij[e], # shape: (2, H, W), i->j + ~dynamic_mask_i, # mask: True = keep, False = ignore + per_pixel_thre=self.pxl_thre + ) + flow_loss_j = self.flow_loss_fn( + ego_flow_2_1[0, :2, ...], + self.flow_ji[e], # j->i + ~dynamic_mask_j, + per_pixel_thre=self.pxl_thre + ) + flow_loss += (flow_loss_i + flow_loss_j) + + # Optional: handle cases where the flow loss is too large (e.g., early stop) + # divide by the number of edges + flow_loss /= self.n_edges + print(f'flow loss: {flow_loss.item()}') + if flow_loss.item() > self.flow_loss_thre and self.flow_loss_thre > 0: + flow_loss = 0.0 + + loss += self.flow_loss_weight * flow_loss + + # --(4) Add depth regularization (depth_prior_loss) to constrain the initial depth-- + if self.depth_regularize_weight > 0: + init_depthmaps = self.get_init_depthmaps(raw=False) # initial depth maps + current_depthmaps = self.get_depthmaps(raw=False) # current optimized depth maps + depth_prior_loss = 0.0 + for i in range(self.n_imgs): + # Apply constraints on static regions (ignore dynamic regions) + # Make sure the shape has the batch dimension (B,1,H,W) + depth_prior_loss += self.depth_regularizer( + current_depthmaps[i].unsqueeze(0).unsqueeze(1), + init_depthmaps[i].unsqueeze(0).unsqueeze(1), + self.dynamic_masks[i].unsqueeze(0).unsqueeze(1) + ) + loss += self.depth_regularize_weight * depth_prior_loss + + return loss + + def forward(self, epoch=9999): + if self.batchify: + return self.forward_batchify(epoch) + else: + return self.forward_non_batchify(epoch) + + def relative_pose_loss(self, RT1, RT2): + relative_RT = torch.matmul(torch.inverse(RT1), RT2) + rotation_diff = relative_RT[:, :3, :3] + translation_diff = relative_RT[:, :3, 3] + + # Frobenius norm for rotation difference + rotation_loss = torch.norm(rotation_diff - (torch.eye(3, device=RT1.device)), dim=(1, 2)) + + # L2 norm for translation difference + translation_loss = torch.norm(translation_diff, dim=1) + + # Combined loss (one can weigh these differently if needed) + pose_loss = rotation_loss + translation_loss * self.translation_weight + return pose_loss + +def _fast_depthmap_to_pts3d(depth, pixel_grid, focal, pp): + pp = pp.unsqueeze(1) + focal = focal.unsqueeze(1) + assert focal.shape == (len(depth), 1, 1) + assert pp.shape == (len(depth), 1, 2) + assert pixel_grid.shape == depth.shape + (2,) + depth = depth.unsqueeze(-1) + return torch.cat((depth * (pixel_grid - pp) / focal, depth), dim=-1) + + +def ParameterStack(params, keys=None, is_param=None, fill=0): + if keys is not None: + params = [params[k] for k in keys] + + if fill > 0: + params = [_ravel_hw(p, fill) for p in params] + + requires_grad = params[0].requires_grad + assert all(p.requires_grad == requires_grad for p in params) + + params = torch.stack(list(params)).float().detach() + if is_param or requires_grad: + params = nn.Parameter(params) + params.requires_grad_(requires_grad) + return params + + +def _ravel_hw(tensor, fill=0): + # ravel H,W + tensor = tensor.view((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:]) + + if len(tensor) < fill: + tensor = torch.cat((tensor, tensor.new_zeros((fill - len(tensor),)+tensor.shape[1:]))) + return tensor + + +def acceptable_focal_range(H, W, minf=0.5, maxf=3.5): + focal_base = max(H, W) / (2 * np.tan(np.deg2rad(60) / 2)) # size / 1.1547005383792515 + return minf*focal_base, maxf*focal_base + + +def apply_mask(img, msk): + img = img.copy() + img[msk] = 0 + return img + +def ordered_ratio(disp_a, disp_b, mask=None): + ratio_a = torch.maximum(disp_a, disp_b) / \ + (torch.minimum(disp_a, disp_b)+1e-5) + if mask is not None: + ratio_a = ratio_a[mask] + return ratio_a - 1 \ No newline at end of file diff --git a/dynamic_predictor/dust3r/cloud_opt/pair_viewer.py b/dynamic_predictor/dust3r/cloud_opt/pair_viewer.py new file mode 100644 index 0000000000000000000000000000000000000000..c6894c6375b4e5fbcc4b5dc564c1cbe184445b54 --- /dev/null +++ b/dynamic_predictor/dust3r/cloud_opt/pair_viewer.py @@ -0,0 +1,133 @@ +# -------------------------------------------------------- +# Dummy optimizer for visualizing pairs +# -------------------------------------------------------- +import numpy as np +import torch +import torch.nn as nn +import cv2 + +from dust3r.cloud_opt.base_opt import BasePCOptimizer +from dust3r.utils.geometry import inv, geotrf, depthmap_to_absolute_camera_coordinates +from dust3r.cloud_opt.commons import edge_str +from dust3r.post_process import estimate_focal_knowing_depth + + +class PairViewer (BasePCOptimizer): + """ + This a Dummy Optimizer. + To use only when the goal is to visualize the results for a pair of images (with is_symmetrized) + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + assert self.is_symmetrized and self.n_edges == 2 + self.has_im_poses = True + + # compute all parameters directly from raw input + self.focals = [] + self.pp = [] + rel_poses = [] + confs = [] + for i in range(self.n_imgs): + conf = float(self.conf_i[edge_str(i, 1-i)].mean() * self.conf_j[edge_str(i, 1-i)].mean()) + if self.verbose: + print(f' - {conf=:.3} for edge {i}-{1-i}') + confs.append(conf) + + H, W = self.imshapes[i] + pts3d = self.pred_i[edge_str(i, 1-i)] + pp = torch.tensor((W/2, H/2)) + focal = float(estimate_focal_knowing_depth(pts3d[None], pp, focal_mode='weiszfeld')) + self.focals.append(focal) + self.pp.append(pp) + + # estimate the pose of pts1 in image 2 + pixels = np.mgrid[:W, :H].T.astype(np.float32) + pts3d = self.pred_j[edge_str(1-i, i)].numpy() + assert pts3d.shape[:2] == (H, W) + msk = self.get_masks()[i].numpy() + K = np.float32([(focal, 0, pp[0]), (0, focal, pp[1]), (0, 0, 1)]) + + try: + res = cv2.solvePnPRansac(pts3d[msk], pixels[msk], K, None, + iterationsCount=100, reprojectionError=5, flags=cv2.SOLVEPNP_SQPNP) + success, R, T, inliers = res + assert success + + R = cv2.Rodrigues(R)[0] # world to cam + pose = inv(np.r_[np.c_[R, T], [(0, 0, 0, 1)]]) # cam to world + except: + pose = np.eye(4) + rel_poses.append(torch.from_numpy(pose.astype(np.float32))) + + # let's use the pair with the most confidence + if confs[0] > confs[1]: + # ptcloud is expressed in camera1 + self.im_poses = [torch.eye(4), rel_poses[1]] # I, cam2-to-cam1 + self.depth = [self.pred_i['0_1'][..., 2], geotrf(inv(rel_poses[1]), self.pred_j['0_1'])[..., 2]] + else: + # ptcloud is expressed in camera2 + self.im_poses = [rel_poses[0], torch.eye(4)] # I, cam1-to-cam2 + self.depth = [geotrf(inv(rel_poses[0]), self.pred_j['1_0'])[..., 2], self.pred_i['1_0'][..., 2]] + + self.im_poses = nn.Parameter(torch.stack(self.im_poses, dim=0), requires_grad=False) + self.focals = nn.Parameter(torch.tensor(self.focals), requires_grad=False) + self.pp = nn.Parameter(torch.stack(self.pp, dim=0), requires_grad=False) + self.depth = nn.ParameterList(self.depth) + for p in self.parameters(): + p.requires_grad = False + + def _set_depthmap(self, idx, depth, force=False): + if self.verbose: + print('_set_depthmap is ignored in PairViewer') + return + + def get_depthmaps(self, raw=False): + depth = [d.to(self.device) for d in self.depth] + return depth + + def _set_focal(self, idx, focal, force=False): + self.focals[idx] = focal + + def get_focals(self): + return self.focals + + def get_known_focal_mask(self): + return torch.tensor([not (p.requires_grad) for p in self.focals]) + + def get_principal_points(self): + return self.pp + + def get_intrinsics(self): + focals = self.get_focals() + pps = self.get_principal_points() + K = torch.zeros((len(focals), 3, 3), device=self.device) + for i in range(len(focals)): + K[i, 0, 0] = K[i, 1, 1] = focals[i] + K[i, :2, 2] = pps[i] + K[i, 2, 2] = 1 + return K + + def get_im_poses(self): + return self.im_poses + + def depth_to_pts3d(self, raw_pts=False): + pts3d = [] + if raw_pts: + im_poses = self.get_im_poses() + if im_poses[0].sum() == 4: + pts3d.append(self.pred_i['0_1']) + pts3d.append(self.pred_j['0_1']) + else: + pts3d.append(self.pred_j['1_0']) + pts3d.append(self.pred_i['1_0']) + else: + for d, intrinsics, im_pose in zip(self.depth, self.get_intrinsics(), self.get_im_poses()): + pts, _ = depthmap_to_absolute_camera_coordinates(d.cpu().numpy(), + intrinsics.cpu().numpy(), + im_pose.cpu().numpy()) + pts3d.append(torch.from_numpy(pts).to(device=self.device)) + return pts3d + + def forward(self): + return float('nan') \ No newline at end of file diff --git a/dynamic_predictor/dust3r/datasets/__init__.py b/dynamic_predictor/dust3r/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1cf539b718a62036bd8a0f701575ca47d7d11f0 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/__init__.py @@ -0,0 +1,56 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +from .utils.transforms import * +from .base.batched_sampler import BatchedRandomSampler # noqa +from .arkitscenes import ARKitScenes # noqa +from .blendedmvs import BlendedMVS # noqa +from .co3d import Co3d # noqa +from .habitat import Habitat # noqa +from .megadepth import MegaDepth # noqa +from .scannetpp import ScanNetpp # noqa +from .staticthings3d import StaticThings3D # noqa +from .waymo import Waymo # noqa +from .wildrgbd import WildRGBD # noqa +from .pointodyssey import PointOdysseyDUSt3R # noqa +from .sintel import SintelDUSt3R # noqa +from .tartanair import TarTanAirDUSt3R # noqa +from .spring_dataset import SpringDUSt3R # noqa +from .dynamic_replica import DynamicReplicaDUSt3R # noqa + +def get_data_loader(dataset, batch_size, num_workers=8, shuffle=True, drop_last=True, pin_mem=True): + import torch + from croco.utils.misc import get_world_size, get_rank + + # pytorch dataset + if isinstance(dataset, str): + dataset = eval(dataset) + # dataset: "1000 @ Co3d(split='train', ROOT='data/co3d_subset_processed', aug_crop=16, mask_bg='rand', resolution=224, transform=ColorJitter)" + # eval(dataset) returns Co3d(split='train', ROOT='data/co3d_subset_processed', aug_crop=16, mask_bg='rand', resolution=224, transform=ColorJitter) + + world_size = get_world_size() + rank = get_rank() + + try: + sampler = dataset.make_sampler(batch_size, shuffle=shuffle, world_size=world_size, + rank=rank, drop_last=drop_last) + except (AttributeError, NotImplementedError): + # not avail for this dataset + if torch.distributed.is_initialized(): + sampler = torch.utils.data.DistributedSampler( + dataset, num_replicas=world_size, rank=rank, shuffle=shuffle, drop_last=drop_last + ) + elif shuffle: + sampler = torch.utils.data.RandomSampler(dataset) + else: + sampler = torch.utils.data.SequentialSampler(dataset) + + data_loader = torch.utils.data.DataLoader( + dataset, + sampler=sampler, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=pin_mem, + drop_last=drop_last, + ) + + return data_loader diff --git a/dynamic_predictor/dust3r/datasets/arkitscenes.py b/dynamic_predictor/dust3r/datasets/arkitscenes.py new file mode 100644 index 0000000000000000000000000000000000000000..4fad51acdc18b82cd6a4d227de0dac3b25783e33 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/arkitscenes.py @@ -0,0 +1,102 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed arkitscenes +# dataset at https://github.com/apple/ARKitScenes - Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License https://github.com/apple/ARKitScenes/tree/main?tab=readme-ov-file#license +# See datasets_preprocess/preprocess_arkitscenes.py +# -------------------------------------------------------- +import os.path as osp +import cv2 +import numpy as np + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 + + +class ARKitScenes(BaseStereoViewDataset): + def __init__(self, *args, split, ROOT, **kwargs): + self.ROOT = ROOT + super().__init__(*args, **kwargs) + if split == "train": + self.split = "Training" + elif split == "test": + self.split = "Test" + else: + raise ValueError("") + + self.loaded_data = self._load_data(self.split) + + def _load_data(self, split): + with np.load(osp.join(self.ROOT, split, 'all_metadata.npz')) as data: + self.scenes = data['scenes'] + self.sceneids = data['sceneids'] + self.images = data['images'] + self.intrinsics = data['intrinsics'].astype(np.float32) + self.trajectories = data['trajectories'].astype(np.float32) + self.pairs = data['pairs'][:, :2].astype(int) + + def __len__(self): + return len(self.pairs) + + def _get_views(self, idx, resolution, rng): + + image_idx1, image_idx2 = self.pairs[idx] + + views = [] + for view_idx in [image_idx1, image_idx2]: + scene_id = self.sceneids[view_idx] + scene_dir = osp.join(self.ROOT, self.split, self.scenes[scene_id]) + + intrinsics = self.intrinsics[view_idx] + camera_pose = self.trajectories[view_idx] + basename = self.images[view_idx] + + # Load RGB image + rgb_image = imread_cv2(osp.join(scene_dir, 'vga_wide', basename.replace('.png', '.jpg'))) + # Load depthmap + depthmap = imread_cv2(osp.join(scene_dir, 'lowres_depth', basename), cv2.IMREAD_UNCHANGED) + depthmap = depthmap.astype(np.float32) / 1000 + depthmap[~np.isfinite(depthmap)] = 0 # invalid + + rgb_image, depthmap, intrinsics = self._crop_resize_if_necessary( + rgb_image, depthmap, intrinsics, resolution, rng=rng, info=view_idx) + + views.append(dict( + img=rgb_image, + depthmap=depthmap.astype(np.float32), + camera_pose=camera_pose.astype(np.float32), + camera_intrinsics=intrinsics.astype(np.float32), + dataset='arkitscenes', + label=self.scenes[scene_id] + '_' + basename, + instance=f'{str(idx)}_{str(view_idx)}', + )) + + return views + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = ARKitScenes(split='train', ROOT="data/arkitscenes_processed", resolution=224, aug_crop=16) + + for idx in np.random.permutation(len(dataset)): + views = dataset[idx] + assert len(views) == 2 + print(view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx * 255, (1 - idx) * 255, 0), + image=colors, + cam_size=cam_size) + viz.show() diff --git a/dynamic_predictor/dust3r/datasets/base/__init__.py b/dynamic_predictor/dust3r/datasets/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a32692113d830ddc4af4e6ed608f222fbe062e6e --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/base/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). diff --git a/dynamic_predictor/dust3r/datasets/base/base_stereo_view_dataset.py b/dynamic_predictor/dust3r/datasets/base/base_stereo_view_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ff9489fb0e572fa07ed2b7050cd0761135eb7749 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/base/base_stereo_view_dataset.py @@ -0,0 +1,229 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# base class for implementing datasets +# -------------------------------------------------------- +import PIL +import numpy as np +import torch + +from dust3r.datasets.base.easy_dataset import EasyDataset +from dust3r.datasets.utils.transforms import ImgNorm +from dust3r.utils.geometry import depthmap_to_absolute_camera_coordinates +import dust3r.datasets.utils.cropping as cropping + + +class BaseStereoViewDataset (EasyDataset): + """ Define all basic options. + + Usage: + class MyDataset (BaseStereoViewDataset): + def _get_views(self, idx, rng): + # overload here + views = [] + views.append(dict(img=, ...)) + return views + """ + + def __init__(self, *, # only keyword arguments + split=None, + resolution=None, # square_size or (width, height) or list of [(width,height), ...] + transform=ImgNorm, + aug_crop=False, + aug_focal=False, + z_far=0, + seed=None): + self.num_views = 2 + self.split = split + self._set_resolutions(resolution) + + self.transform = transform + if isinstance(transform, str): + transform = eval(transform) + + self.aug_crop = aug_crop + self.aug_focal = aug_focal + self.seed = seed + self.z_far = z_far + + def __len__(self): + return len(self.scenes) + + def get_stats(self): + return f"{len(self)} pairs" + + def __repr__(self): + resolutions_str = '['+';'.join(f'{w}x{h}' for w, h in self._resolutions)+']' + return f"""{type(self).__name__}({self.get_stats()}, + {self.split=}, + {self.seed=}, + resolutions={resolutions_str}, + {self.transform=})""".replace('self.', '').replace('\n', '').replace(' ', '') + + def _get_views(self, idx, resolution, rng): + raise NotImplementedError() + + def __getitem__(self, idx): + if isinstance(idx, tuple): + # the idx is specifying the aspect-ratio + idx, ar_idx = idx + else: + assert len(self._resolutions) == 1 + ar_idx = 0 + + # set-up the rng + if self.seed: # reseed for each __getitem__ + self._rng = np.random.default_rng(seed=self.seed + idx) + elif not hasattr(self, '_rng'): + seed = torch.initial_seed() # this is different for each dataloader process + self._rng = np.random.default_rng(seed=seed) + + # over-loaded code + resolution = self._resolutions[ar_idx] # DO NOT CHANGE THIS (compatible with BatchedRandomSampler) + views = self._get_views(idx, resolution, self._rng) + assert len(views) == self.num_views + + # check data-types + for v, view in enumerate(views): + assert 'pts3d' not in view, f"pts3d should not be there, they will be computed afterwards based on intrinsics+depthmap for view {view_name(view)}" + view['idx'] = (idx, ar_idx, v) + + # encode the image + width, height = view['img'].size + view['true_shape'] = np.int32((height, width)) + view['img'] = self.transform(view['img']) + + assert 'camera_intrinsics' in view + if 'camera_pose' not in view: + view['camera_pose'] = np.full((4, 4), np.nan, dtype=np.float32) + else: + assert np.isfinite(view['camera_pose']).all(), f'NaN in camera pose for view {view_name(view)}' + assert 'pts3d' not in view + assert 'valid_mask' not in view + assert np.isfinite(view['depthmap']).all(), f'NaN in depthmap for view {view_name(view)}' + view['z_far'] = self.z_far + pts3d, valid_mask = depthmap_to_absolute_camera_coordinates(**view) + + view['pts3d'] = pts3d + view['valid_mask'] = valid_mask & np.isfinite(pts3d).all(axis=-1) + + # check all datatypes + for key, val in view.items(): + res, err_msg = is_good_type(key, val) + assert res, f"{err_msg} with {key}={val} for view {view_name(view)}" + K = view['camera_intrinsics'] + + # last thing done! + for view in views: + # transpose to make sure all views are the same size + transpose_to_landscape(view) + # this allows to check whether the RNG is is the same state each time + view['rng'] = int.from_bytes(self._rng.bytes(4), 'big') + return views + + def _set_resolutions(self, resolutions): + assert resolutions is not None, 'undefined resolution' + + if not isinstance(resolutions, list): + resolutions = [resolutions] + + self._resolutions = [] + for resolution in resolutions: + if isinstance(resolution, int): + width = height = resolution + else: + width, height = resolution + assert isinstance(width, int), f'Bad type for {width=} {type(width)=}, should be int' + assert isinstance(height, int), f'Bad type for {height=} {type(height)=}, should be int' + assert width >= height + self._resolutions.append((width, height)) + + def _crop_resize_if_necessary(self, image, depthmap, intrinsics, resolution, rng=None, info=None): + """ This function: + - first downsizes the image with LANCZOS inteprolation, + which is better than bilinear interpolation in + """ + if not isinstance(image, PIL.Image.Image): + image = PIL.Image.fromarray(image) + + # downscale with lanczos interpolation so that image.size == resolution + # cropping centered on the principal point + W, H = image.size + cx, cy = intrinsics[:2, 2].round().astype(int) + min_margin_x = min(cx, W-cx) + min_margin_y = min(cy, H-cy) + assert min_margin_x > W/5, f'Bad principal point in view={info}' + assert min_margin_y > H/5, f'Bad principal point in view={info}' + # the new window will be a rectangle of size (2*min_margin_x, 2*min_margin_y) centered on (cx,cy) + l, t = cx - min_margin_x, cy - min_margin_y + r, b = cx + min_margin_x, cy + min_margin_y + crop_bbox = (l, t, r, b) + image, depthmap, intrinsics = cropping.crop_image_depthmap(image, depthmap, intrinsics, crop_bbox) + + # transpose the resolution if necessary + W, H = image.size # new size + assert resolution[0] >= resolution[1] + if H > 1.1*W: + # image is portrait mode + resolution = resolution[::-1] + elif 0.9 < H/W < 1.1 and resolution[0] != resolution[1]: + # image is square, so we chose (portrait, landscape) randomly + if rng.integers(2): + resolution = resolution[::-1] + + # high-quality Lanczos down-scaling + target_resolution = np.array(resolution) + if self.aug_focal: + crop_scale = self.aug_focal + (1.0 - self.aug_focal) * np.random.beta(0.5, 0.5) # beta distribution, bi-modal + image, depthmap, intrinsics = cropping.center_crop_image_depthmap(image, depthmap, intrinsics, crop_scale) + + if self.aug_crop > 1: + target_resolution += rng.integers(0, self.aug_crop) + image, depthmap, intrinsics = cropping.rescale_image_depthmap(image, depthmap, intrinsics, target_resolution) # slightly scale the image a bit larger than the target resolution + + # actual cropping (if necessary) with bilinear interpolation + intrinsics2 = cropping.camera_matrix_of_crop(intrinsics, image.size, resolution, offset_factor=0.5) + crop_bbox = cropping.bbox_from_intrinsics_in_out(intrinsics, intrinsics2, resolution) + image, depthmap, intrinsics2 = cropping.crop_image_depthmap(image, depthmap, intrinsics, crop_bbox) + + return image, depthmap, intrinsics2 + + +def is_good_type(key, v): + """ returns (is_good, err_msg) + """ + if isinstance(v, (str, int, tuple)): + return True, None + if v.dtype not in (torch.bool, np.float32, torch.float32, bool, np.int32, np.int64, np.uint8): + return False, f"bad {v.dtype=}" + return True, None + + +def view_name(view, batch_index=None): + def sel(x): return x[batch_index] if batch_index not in (None, slice(None)) else x + db = sel(view['dataset']) + label = sel(view['label']) + instance = sel(view['instance']) + return f"{db}/{label}/{instance}" + + +def transpose_to_landscape(view): + height, width = view['true_shape'] + + if width < height: + # rectify portrait to landscape + assert view['img'].shape == (3, height, width) + view['img'] = view['img'].swapaxes(1, 2) + + assert view['valid_mask'].shape == (height, width) + view['valid_mask'] = view['valid_mask'].swapaxes(0, 1) + + assert view['depthmap'].shape == (height, width) + view['depthmap'] = view['depthmap'].swapaxes(0, 1) + + assert view['pts3d'].shape == (height, width, 3) + view['pts3d'] = view['pts3d'].swapaxes(0, 1) + + # transpose x and y pixels + view['camera_intrinsics'] = view['camera_intrinsics'][[1, 0, 2]] diff --git a/dynamic_predictor/dust3r/datasets/base/batched_sampler.py b/dynamic_predictor/dust3r/datasets/base/batched_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..85f58a65d41bb8101159e032d5b0aac26a7cf1a1 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/base/batched_sampler.py @@ -0,0 +1,74 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Random sampling under a constraint +# -------------------------------------------------------- +import numpy as np +import torch + + +class BatchedRandomSampler: + """ Random sampling under a constraint: each sample in the batch has the same feature, + which is chosen randomly from a known pool of 'features' for each batch. + + For instance, the 'feature' could be the image aspect-ratio. + + The index returned is a tuple (sample_idx, feat_idx). + This sampler ensures that each series of `batch_size` indices has the same `feat_idx`. + """ + + def __init__(self, dataset, batch_size, pool_size, world_size=1, rank=0, drop_last=True): + self.batch_size = batch_size + self.pool_size = pool_size + + self.len_dataset = N = len(dataset) + self.total_size = round_by(N, batch_size*world_size) if drop_last else N + assert world_size == 1 or drop_last, 'must drop the last batch in distributed mode' + + # distributed sampler + self.world_size = world_size + self.rank = rank + self.epoch = None + + def __len__(self): + return self.total_size // self.world_size + + def set_epoch(self, epoch): + self.epoch = epoch + + def __iter__(self): + # prepare RNG + if self.epoch is None: + assert self.world_size == 1 and self.rank == 0, 'use set_epoch() if distributed mode is used' + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + else: + seed = self.epoch + 777 + rng = np.random.default_rng(seed=seed) + + # random indices (will restart from 0 if not drop_last) + sample_idxs = np.arange(self.total_size) + rng.shuffle(sample_idxs) + + # random feat_idxs (same across each batch) + n_batches = (self.total_size+self.batch_size-1) // self.batch_size + feat_idxs = rng.integers(self.pool_size, size=n_batches) + feat_idxs = np.broadcast_to(feat_idxs[:, None], (n_batches, self.batch_size)) + feat_idxs = feat_idxs.ravel()[:self.total_size] + + # put them together + idxs = np.c_[sample_idxs, feat_idxs] # shape = (total_size, 2) + + # Distributed sampler: we select a subset of batches + # make sure the slice for each node is aligned with batch_size + size_per_proc = self.batch_size * ((self.total_size + self.world_size * + self.batch_size-1) // (self.world_size * self.batch_size)) + idxs = idxs[self.rank*size_per_proc: (self.rank+1)*size_per_proc] + + yield from (tuple(idx) for idx in idxs) + + +def round_by(total, multiple, up=False): + if up: + total = total + multiple-1 + return (total//multiple) * multiple diff --git a/dynamic_predictor/dust3r/datasets/base/easy_dataset.py b/dynamic_predictor/dust3r/datasets/base/easy_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4939a88f02715a1f80be943ddb6d808e1be84db7 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/base/easy_dataset.py @@ -0,0 +1,157 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# A dataset base class that you can easily resize and combine. +# -------------------------------------------------------- +import numpy as np +from dust3r.datasets.base.batched_sampler import BatchedRandomSampler + + +class EasyDataset: + """ a dataset that you can easily resize and combine. + Examples: + --------- + 2 * dataset ==> duplicate each element 2x + + 10 @ dataset ==> set the size to 10 (random sampling, duplicates if necessary) + + dataset1 + dataset2 ==> concatenate datasets + """ + + def __add__(self, other): + return CatDataset([self, other]) + + def __rmul__(self, factor): + return MulDataset(factor, self) + + def __rmatmul__(self, factor): + return ResizedDataset(factor, self) + + def set_epoch(self, epoch): + pass # nothing to do by default + + def make_sampler(self, batch_size, shuffle=True, world_size=1, rank=0, drop_last=True): + if not (shuffle): + raise NotImplementedError() # cannot deal yet + num_of_aspect_ratios = len(self._resolutions) + return BatchedRandomSampler(self, batch_size, num_of_aspect_ratios, world_size=world_size, rank=rank, drop_last=drop_last) + + +class MulDataset (EasyDataset): + """ Artifically augmenting the size of a dataset. + """ + multiplicator: int + + def __init__(self, multiplicator, dataset): + assert isinstance(multiplicator, int) and multiplicator > 0 + self.multiplicator = multiplicator + self.dataset = dataset + + def __len__(self): + return self.multiplicator * len(self.dataset) + + def __repr__(self): + return f'{self.multiplicator}*{repr(self.dataset)}' + + def __getitem__(self, idx): + if isinstance(idx, tuple): + idx, other = idx + return self.dataset[idx // self.multiplicator, other] + else: + return self.dataset[idx // self.multiplicator] + + @property + def _resolutions(self): + return self.dataset._resolutions + + +class ResizedDataset (EasyDataset): + """ Artifically changing the size of a dataset. + """ + new_size: int + + def __init__(self, new_size, dataset): + assert isinstance(new_size, int) and new_size > 0 + self.new_size = new_size + self.dataset = dataset + + def __len__(self): + return self.new_size + + def __repr__(self): + size_str = str(self.new_size) + for i in range((len(size_str)-1) // 3): + sep = -4*i-3 + size_str = size_str[:sep] + '_' + size_str[sep:] + return f'{size_str} @ {repr(self.dataset)}' + + def set_epoch(self, epoch): + # this random shuffle only depends on the epoch + rng = np.random.default_rng(seed=epoch+777) + + # shuffle all indices + perm = rng.permutation(len(self.dataset)) + + # rotary extension until target size is met + shuffled_idxs = np.concatenate([perm] * (1 + (len(self)-1) // len(self.dataset))) + self._idxs_mapping = shuffled_idxs[:self.new_size] + + assert len(self._idxs_mapping) == self.new_size + + def __getitem__(self, idx): + assert hasattr(self, '_idxs_mapping'), 'You need to call dataset.set_epoch() to use ResizedDataset.__getitem__()' + if isinstance(idx, tuple): + idx, other = idx + return self.dataset[self._idxs_mapping[idx], other] + else: + return self.dataset[self._idxs_mapping[idx]] + + @property + def _resolutions(self): + return self.dataset._resolutions + + +class CatDataset (EasyDataset): + """ Concatenation of several datasets + """ + + def __init__(self, datasets): + for dataset in datasets: + assert isinstance(dataset, EasyDataset) + self.datasets = datasets + self._cum_sizes = np.cumsum([len(dataset) for dataset in datasets]) + + def __len__(self): + return self._cum_sizes[-1] + + def __repr__(self): + # remove uselessly long transform + return ' + '.join(repr(dataset).replace(',transform=Compose( ToTensor() Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))', '') for dataset in self.datasets) + + def set_epoch(self, epoch): + for dataset in self.datasets: + dataset.set_epoch(epoch) + + def __getitem__(self, idx): + other = None + if isinstance(idx, tuple): + idx, other = idx + + if not (0 <= idx < len(self)): + raise IndexError() + + db_idx = np.searchsorted(self._cum_sizes, idx, 'right') + dataset = self.datasets[db_idx] + new_idx = idx - (self._cum_sizes[db_idx - 1] if db_idx > 0 else 0) + + if other is not None: + new_idx = (new_idx, other) + return dataset[new_idx] + + @property + def _resolutions(self): + resolutions = self.datasets[0]._resolutions + for dataset in self.datasets[1:]: + assert tuple(dataset._resolutions) == tuple(resolutions) + return resolutions diff --git a/dynamic_predictor/dust3r/datasets/blendedmvs.py b/dynamic_predictor/dust3r/datasets/blendedmvs.py new file mode 100644 index 0000000000000000000000000000000000000000..93e68c28620cc47a7b1743834e45f82d576126d0 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/blendedmvs.py @@ -0,0 +1,104 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed BlendedMVS +# dataset at https://github.com/YoYo000/BlendedMVS +# See datasets_preprocess/preprocess_blendedmvs.py +# -------------------------------------------------------- +import os.path as osp +import numpy as np + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 + + +class BlendedMVS (BaseStereoViewDataset): + """ Dataset of outdoor street scenes, 5 images each time + """ + + def __init__(self, *args, ROOT, split=None, **kwargs): + self.ROOT = ROOT + super().__init__(*args, **kwargs) + self._load_data(split) + + def _load_data(self, split): + pairs = np.load(osp.join(self.ROOT, 'blendedmvs_pairs.npy')) + if split is None: + selection = slice(None) + if split == 'train': + # select 90% of all scenes + selection = (pairs['seq_low'] % 10) > 0 + if split == 'val': + # select 10% of all scenes + selection = (pairs['seq_low'] % 10) == 0 + self.pairs = pairs[selection] + + # list of all scenes + self.scenes = np.unique(self.pairs['seq_low']) # low is unique enough + + def __len__(self): + return len(self.pairs) + + def get_stats(self): + return f'{len(self)} pairs from {len(self.scenes)} scenes' + + def _get_views(self, pair_idx, resolution, rng): + seqh, seql, img1, img2, score = self.pairs[pair_idx] + + seq = f"{seqh:08x}{seql:016x}" + seq_path = osp.join(self.ROOT, seq) + + views = [] + + for view_index in [img1, img2]: + impath = f"{view_index:08n}" + image = imread_cv2(osp.join(seq_path, impath + ".jpg")) + depthmap = imread_cv2(osp.join(seq_path, impath + ".exr")) + camera_params = np.load(osp.join(seq_path, impath + ".npz")) + + intrinsics = np.float32(camera_params['intrinsics']) + camera_pose = np.eye(4, dtype=np.float32) + camera_pose[:3, :3] = camera_params['R_cam2world'] + camera_pose[:3, 3] = camera_params['t_cam2world'] + + image, depthmap, intrinsics = self._crop_resize_if_necessary( + image, depthmap, intrinsics, resolution, rng, info=(seq_path, impath)) + + views.append(dict( + img=image, + depthmap=depthmap, + camera_pose=camera_pose, # cam2world + camera_intrinsics=intrinsics, + dataset='BlendedMVS', + label=osp.relpath(seq_path, self.ROOT), + instance=impath)) + + return views + + +if __name__ == '__main__': + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = BlendedMVS(split='train', ROOT="data/blendedmvs_processed", resolution=224, aug_crop=16) + + for idx in np.random.permutation(len(dataset)): + views = dataset[idx] + assert len(views) == 2 + print(idx, view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx * 255, (1 - idx) * 255, 0), + image=colors, + cam_size=cam_size) + viz.show() diff --git a/dynamic_predictor/dust3r/datasets/co3d.py b/dynamic_predictor/dust3r/datasets/co3d.py new file mode 100644 index 0000000000000000000000000000000000000000..20a9ac2b27b83459274e3c69dd573bc63e61f29e --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/co3d.py @@ -0,0 +1,192 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed Co3d_v2 +# dataset at https://github.com/facebookresearch/co3d - Creative Commons Attribution-NonCommercial 4.0 International +# See datasets_preprocess/preprocess_co3d.py +# -------------------------------------------------------- +import sys +sys.path.append('.') +import os.path as osp +import json +import itertools +from collections import deque + +import cv2 +import numpy as np + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 + + +class Co3d(BaseStereoViewDataset): + def __init__(self, mask_bg=True, *args, ROOT, **kwargs): + self.ROOT = ROOT + super().__init__(*args, **kwargs) + assert mask_bg in (True, False, 'rand') + self.mask_bg = mask_bg + self.dataset_label = 'Co3d_v2' + + # load all scenes + with open(osp.join(self.ROOT, f'selected_seqs_{self.split}.json'), 'r') as f: + self.scenes = json.load(f) + self.scenes = {k: v for k, v in self.scenes.items() if len(v) > 0} + self.scenes = {(k, k2): v2 for k, v in self.scenes.items() + for k2, v2 in v.items()} + self.scene_list = list(self.scenes.keys()) + # 00: + # ('apple', '110_13051_23361') + # 01: + # ('apple', '189_20393_38136') + + # for each scene, we have 100 images ==> 360 degrees (so 25 frames ~= 90 degrees) + # we prepare all combinations such that i-j = +/- [5, 10, .., 90] degrees + self.combinations = [(i, j) + for i, j in itertools.combinations(range(100), 2) + if 0 < abs(i - j) <= 30 and abs(i - j) % 5 == 0] + + self.invalidate = {scene: {} for scene in self.scene_list} + + def __len__(self): + return len(self.scene_list) * len(self.combinations) + + def _get_metadatapath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'images', f'frame{view_idx:06n}.npz') + + def _get_impath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'images', f'frame{view_idx:06n}.jpg') + + def _get_depthpath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'depths', f'frame{view_idx:06n}.jpg.geometric.png') + + def _get_maskpath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'masks', f'frame{view_idx:06n}.png') + + def _read_depthmap(self, depthpath, input_metadata): + depthmap = imread_cv2(depthpath, cv2.IMREAD_UNCHANGED) + depthmap = (depthmap.astype(np.float32) / 65535) * np.nan_to_num(input_metadata['maximum_depth']) + return depthmap + + def _get_views(self, idx, resolution, rng): + # choose a scene + obj, instance = self.scene_list[idx // len(self.combinations)] + image_pool = self.scenes[obj, instance] + im1_idx, im2_idx = self.combinations[idx % len(self.combinations)] + + # add a bit of randomness + last = len(image_pool) - 1 + + if resolution not in self.invalidate[obj, instance]: # flag invalid images + self.invalidate[obj, instance][resolution] = [False for _ in range(len(image_pool))] + + # decide now if we mask the bg + mask_bg = (self.mask_bg == True) or (self.mask_bg == 'rand' and rng.choice(2)) + + views = [] + imgs_idxs = [max(0, min(im_idx + rng.integers(-4, 5), last)) for im_idx in [im2_idx, im1_idx]] + imgs_idxs = deque(imgs_idxs) + while len(imgs_idxs) > 0: # some images (few) have zero depth + im_idx = imgs_idxs.pop() + + if self.invalidate[obj, instance][resolution][im_idx]: + # search for a valid image + random_direction = 2 * rng.choice(2) - 1 + for offset in range(1, len(image_pool)): + tentative_im_idx = (im_idx + (random_direction * offset)) % len(image_pool) + if not self.invalidate[obj, instance][resolution][tentative_im_idx]: + im_idx = tentative_im_idx + break + + view_idx = image_pool[im_idx] + + impath = self._get_impath(obj, instance, view_idx) + depthpath = self._get_depthpath(obj, instance, view_idx) + + # load camera params + metadata_path = self._get_metadatapath(obj, instance, view_idx) + input_metadata = np.load(metadata_path) + camera_pose = input_metadata['camera_pose'].astype(np.float32) + intrinsics = input_metadata['camera_intrinsics'].astype(np.float32) + + # load image and depth + rgb_image = imread_cv2(impath) + depthmap = self._read_depthmap(depthpath, input_metadata) + + if mask_bg: + # load object mask + maskpath = self._get_maskpath(obj, instance, view_idx) + maskmap = imread_cv2(maskpath, cv2.IMREAD_UNCHANGED).astype(np.float32) + maskmap = (maskmap / 255.0) > 0.1 + + # update the depthmap with mask + depthmap *= maskmap + + rgb_image, depthmap, intrinsics = self._crop_resize_if_necessary( + rgb_image, depthmap, intrinsics, resolution, rng=rng, info=impath) + + num_valid = (depthmap > 0.0).sum() + if num_valid == 0: + # problem, invalidate image and retry + self.invalidate[obj, instance][resolution][im_idx] = True + imgs_idxs.append(im_idx) + continue + + views.append(dict( + img=rgb_image, + depthmap=depthmap, + camera_pose=camera_pose, + camera_intrinsics=intrinsics, + dataset=self.dataset_label, + label=osp.join(obj, instance), + instance=osp.split(impath)[1], + )) + return views + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + import gradio as gr + import random + + def visualize_scene(idx): + views = dataset[idx] + assert len(views) == 2 + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(255, 0, 0), + image=colors, + cam_size=cam_size) + path = f"./tmp/scene_{idx}.glb" + return viz.save_glb(path) + + # Load the dataset + dataset = Co3d(split='train', ROOT="data/co3d_subset_processed", resolution=224, aug_crop=16) + + use_gradio = False + if use_gradio: + # Create Gradio interface + iface = gr.Interface( + fn=visualize_scene, + inputs=gr.Slider(0, len(dataset)-1, step=1, label="Index"), + outputs=gr.Model3D(), # Use Model3D output type for 3D models + live=True + ) + + # Launch the interface + iface.launch() + else: + # sample an idx + idx = random.randint(0, len(dataset)-1) + visualize_scene(idx) + print(f"Visualizing scene {idx}...") \ No newline at end of file diff --git a/dynamic_predictor/dust3r/datasets/dynamic_replica.py b/dynamic_predictor/dust3r/datasets/dynamic_replica.py new file mode 100644 index 0000000000000000000000000000000000000000..8d6af64a9b97cceac186504d47598a277ee38db1 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/dynamic_replica.py @@ -0,0 +1,300 @@ +import sys +sys.path.append('.') +import os +import torch +import numpy as np +import os.path as osp +import torchvision.transforms as transforms +import torch.nn.functional as F +from PIL import Image +from torch._C import dtype, set_flush_denormal +import dust3r.utils.po_utils.basic +import dust3r.utils.po_utils.improc +from dust3r.utils.po_utils.misc import farthest_point_sample_py +from dust3r.utils.po_utils.geom import apply_4x4_py, apply_pix_T_cam_py +import glob +import cv2 +from torchvision.transforms import ColorJitter, GaussianBlur +from functools import partial +import json + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 +from dust3r.utils.misc import get_stride_distribution + +np.random.seed(125) +torch.multiprocessing.set_sharing_strategy('file_system') + + +def convert_ndc_to_pixel_intrinsics( + focal_length_ndc, principal_point_ndc, image_width, image_height, intrinsics_format='ndc_isotropic' +): + f_x_ndc, f_y_ndc = focal_length_ndc + c_x_ndc, c_y_ndc = principal_point_ndc + + # Compute half image size + half_image_size_wh_orig = np.array([image_width, image_height]) / 2.0 + + # Determine rescale factor based on intrinsics_format + if intrinsics_format.lower() == "ndc_norm_image_bounds": + rescale = half_image_size_wh_orig # [image_width/2, image_height/2] + elif intrinsics_format.lower() == "ndc_isotropic": + rescale = np.min(half_image_size_wh_orig) # scalar value + else: + raise ValueError(f"Unknown intrinsics format: {intrinsics_format}") + + # Convert focal length from NDC to pixel coordinates + if intrinsics_format.lower() == "ndc_norm_image_bounds": + focal_length_px = np.array([f_x_ndc, f_y_ndc]) * rescale + elif intrinsics_format.lower() == "ndc_isotropic": + focal_length_px = np.array([f_x_ndc, f_y_ndc]) * rescale + + # Convert principal point from NDC to pixel coordinates + principal_point_px = half_image_size_wh_orig - np.array([c_x_ndc, c_y_ndc]) * rescale + + # Construct the intrinsics matrix in pixel coordinates + K_pixel = np.array([ + [focal_length_px[0], 0, principal_point_px[0]], + [0, focal_length_px[1], principal_point_px[1]], + [0, 0, 1] + ]) + + return K_pixel + +def load_16big_png_depth(depth_png): + with Image.open(depth_png) as depth_pil: + # the image is stored with 16-bit depth but PIL reads it as I (32 bit). + # we cast it to uint16, then reinterpret as float16, then cast to float32 + depth = ( + np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16) + .astype(np.float32) + .reshape((depth_pil.size[1], depth_pil.size[0])) + ) + return depth + +class DynamicReplicaDUSt3R(BaseStereoViewDataset): + def __init__(self, + dataset_location='data/dynamic_replica', + use_augs=False, + S=2, + strides=[1,2,3,4,5,6,7,8,9], + clip_step=2, + quick=False, + verbose=False, + dist_type=None, + clip_step_last_skip = 0, + *args, + **kwargs + ): + + print('loading pointodyssey dataset...') + super().__init__(*args, **kwargs) + self.dataset_label = 'pointodyssey' + self.S = S # stride + self.verbose = verbose + + self.use_augs = use_augs + + self.rgb_paths = [] + self.depth_paths = [] + self.normal_paths = [] + self.traj_paths = [] + self.annotation_paths = [] + self.full_idxs = [] + self.sample_stride = [] + self.strides = strides + + self.subdirs = [] + self.sequences = [] + self.subdirs.append(os.path.join(dataset_location)) + + anno_path = os.path.join(dataset_location, 'frame_annotations_train.json') + with open(anno_path, 'r') as f: + self.anno = json.load(f) + + #organize anno by 'sequence_name' + anno_by_seq = {} + for a in self.anno: + seq_name = a['sequence_name'] + if seq_name not in anno_by_seq: + anno_by_seq[seq_name] = [] + anno_by_seq[seq_name].append(a) + + for subdir in self.subdirs: + for seq in glob.glob(os.path.join(subdir, "*/")): + seq_name = seq.split('/')[-1] + self.sequences.append(seq) + + self.sequences = anno_by_seq.keys() + if self.verbose: + print(self.sequences) + print('found %d unique videos in %s ' % (len(self.sequences), dataset_location)) + + + if quick: + self.sequences = self.sequences[1:2] + + for seq in self.sequences: + if self.verbose: + print('seq', seq) + + anno = anno_by_seq[seq] + + + for stride in strides: + for ii in range(0,len(anno)-self.S*max(stride,clip_step_last_skip)+1, clip_step): + full_idx = ii + np.arange(self.S)*stride + self.rgb_paths.append([os.path.join(dataset_location, anno[idx]['image']['path']) for idx in full_idx]) + self.depth_paths.append([os.path.join(dataset_location, anno[idx]['depth']['path']) for idx in full_idx]) + # check if all paths are valid, if not, skip + if not all([os.path.exists(p) for p in self.rgb_paths[-1]]) or not all([os.path.exists(p) for p in self.depth_paths[-1]]): + self.rgb_paths.pop() + self.depth_paths.pop() + continue + self.annotation_paths.append([anno[idx]['viewpoint'] for idx in full_idx]) + self.full_idxs.append(full_idx) + self.sample_stride.append(stride) + if self.verbose: + sys.stdout.write('.') + sys.stdout.flush() + + + self.stride_counts = {} + self.stride_idxs = {} + for stride in strides: + self.stride_counts[stride] = 0 + self.stride_idxs[stride] = [] + for i, stride in enumerate(self.sample_stride): + self.stride_counts[stride] += 1 + self.stride_idxs[stride].append(i) + print('stride counts:', self.stride_counts) + + if len(strides) > 1 and dist_type is not None: + self._resample_clips(strides, dist_type) + + print('collected %d clips of length %d in %s' % ( + len(self.rgb_paths), self.S, dataset_location,)) + + def _resample_clips(self, strides, dist_type): + + # Get distribution of strides, and sample based on that + dist = get_stride_distribution(strides, dist_type=dist_type) + dist = dist / np.max(dist) + max_num_clips = self.stride_counts[strides[np.argmax(dist)]] + num_clips_each_stride = [min(self.stride_counts[stride], int(dist[i]*max_num_clips)) for i, stride in enumerate(strides)] + print('resampled_num_clips_each_stride:', num_clips_each_stride) + resampled_idxs = [] + for i, stride in enumerate(strides): + resampled_idxs += np.random.choice(self.stride_idxs[stride], num_clips_each_stride[i], replace=False).tolist() + + self.rgb_paths = [self.rgb_paths[i] for i in resampled_idxs] + self.depth_paths = [self.depth_paths[i] for i in resampled_idxs] + self.annotation_paths = [self.annotation_paths[i] for i in resampled_idxs] + self.full_idxs = [self.full_idxs[i] for i in resampled_idxs] + self.sample_stride = [self.sample_stride[i] for i in resampled_idxs] + + def __len__(self): + return len(self.rgb_paths) + + def _get_views(self, index, resolution, rng): + + rgb_paths = self.rgb_paths[index] + depth_paths = self.depth_paths[index] + full_idx = self.full_idxs[index] + annotations = self.annotation_paths[index] + focals = [np.array(annotations[i]['focal_length']).astype(np.float32) for i in range(2)] + pp = [np.array(annotations[i]['principal_point']).astype(np.float32) for i in range(2)] + intrinsics_format = [annotations[i]['intrinsics_format'] for i in range(2)] + cams_T_world_R = [np.array(annotations[i]['R']).astype(np.float32) for i in range(2)] + cams_T_world_t = [np.array(annotations[i]['T']).astype(np.float32) for i in range(2)] + + views = [] + for i in range(2): + + impath = rgb_paths[i] + depthpath = depth_paths[i] + + # load camera params + R = cams_T_world_R[i] + t = cams_T_world_t[i] + camera_pose = np.eye(4, dtype=np.float32) + camera_pose[:3,:3] = R.T + camera_pose[:3,3] = -R.T @ t + + # load image and depth + rgb_image = imread_cv2(impath) + depthmap = load_16big_png_depth(depthpath) + + # load intrinsics + intrinsics = convert_ndc_to_pixel_intrinsics(focals[i], pp[i], rgb_image.shape[1], rgb_image.shape[0], + intrinsics_format=intrinsics_format[i]) + intrinsics = intrinsics.astype(np.float32) + + + rgb_image, depthmap, intrinsics = self._crop_resize_if_necessary( + rgb_image, depthmap, intrinsics, resolution, rng=rng, info=impath) + + views.append(dict( + img=rgb_image, + depthmap=depthmap, + camera_pose=camera_pose, + camera_intrinsics=intrinsics, + dataset=self.dataset_label, + label=rgb_paths[i].split('/')[-3], + instance=osp.split(rgb_paths[i])[1], + )) + return views + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + import gradio as gr + import random + + use_augs = False + S = 2 + strides = [1,2,3,4,5,6,7,8,9] + clip_step = 2 + quick = False # Set to True for quick testing + + def visualize_scene(idx): + views = dataset[idx] + assert len(views) == 2 + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.25) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(255, 0, 0), + image=colors, + cam_size=cam_size) + os.makedirs('./tmp/replica', exist_ok=True) + path = f"./tmp/replica/replica_scene_{idx}.glb" + return viz.save_glb(path) + + dataset = DynamicReplicaDUSt3R( + use_augs=use_augs, + S=S, + strides=strides, + clip_step=clip_step, + quick=quick, + verbose=False, + resolution=512, + aug_crop=16, + dist_type='linear_1_9', + aug_focal=1.0, + z_far=80) + + idxs = np.arange(0, len(dataset)-1, (len(dataset)-1)//10) + # idx = random.randint(0, len(dataset)-1) + # idx = 0 + for idx in idxs: + print(f"Visualizing scene {idx}...") + visualize_scene(idx) diff --git a/dynamic_predictor/dust3r/datasets/habitat.py b/dynamic_predictor/dust3r/datasets/habitat.py new file mode 100644 index 0000000000000000000000000000000000000000..11ce8a0ffb2134387d5fb794df89834db3ea8c9f --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/habitat.py @@ -0,0 +1,107 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed habitat +# dataset at https://github.com/facebookresearch/habitat-sim/blob/main/DATASETS.md +# See datasets_preprocess/habitat for more details +# -------------------------------------------------------- +import os.path as osp +import os +os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1" # noqa +import cv2 # noqa +import numpy as np +from PIL import Image +import json + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset + + +class Habitat(BaseStereoViewDataset): + def __init__(self, size, *args, ROOT, **kwargs): + self.ROOT = ROOT + super().__init__(*args, **kwargs) + assert self.split is not None + # loading list of scenes + with open(osp.join(self.ROOT, f'Habitat_{size}_scenes_{self.split}.txt')) as f: + self.scenes = f.read().splitlines() + self.instances = list(range(1, 5)) + + def filter_scene(self, label, instance=None): + if instance: + subscene, instance = instance.split('_') + label += '/' + subscene + self.instances = [int(instance) - 1] + valid = np.bool_([scene.startswith(label) for scene in self.scenes]) + assert sum(valid), 'no scene was selected for {label=} {instance=}' + self.scenes = [scene for i, scene in enumerate(self.scenes) if valid[i]] + + def _get_views(self, idx, resolution, rng): + scene = self.scenes[idx] + data_path, key = osp.split(osp.join(self.ROOT, scene)) + views = [] + two_random_views = [0, rng.choice(self.instances)] # view 0 is connected with all other views + for view_index in two_random_views: + # load the view (and use the next one if this one's broken) + for ii in range(view_index, view_index + 5): + image, depthmap, intrinsics, camera_pose = self._load_one_view(data_path, key, ii % 5, resolution, rng) + if np.isfinite(camera_pose).all(): + break + views.append(dict( + img=image, + depthmap=depthmap, + camera_pose=camera_pose, # cam2world + camera_intrinsics=intrinsics, + dataset='Habitat', + label=osp.relpath(data_path, self.ROOT), + instance=f"{key}_{view_index}")) + return views + + def _load_one_view(self, data_path, key, view_index, resolution, rng): + view_index += 1 # file indices starts at 1 + impath = osp.join(data_path, f"{key}_{view_index}.jpeg") + image = Image.open(impath) + + depthmap_filename = osp.join(data_path, f"{key}_{view_index}_depth.exr") + depthmap = cv2.imread(depthmap_filename, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_ANYDEPTH) + + camera_params_filename = osp.join(data_path, f"{key}_{view_index}_camera_params.json") + with open(camera_params_filename, 'r') as f: + camera_params = json.load(f) + + intrinsics = np.float32(camera_params['camera_intrinsics']) + camera_pose = np.eye(4, dtype=np.float32) + camera_pose[:3, :3] = camera_params['R_cam2world'] + camera_pose[:3, 3] = camera_params['t_cam2world'] + + image, depthmap, intrinsics = self._crop_resize_if_necessary( + image, depthmap, intrinsics, resolution, rng, info=impath) + return image, depthmap, intrinsics, camera_pose + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = Habitat(1_000_000, split='train', ROOT="data/habitat_processed", + resolution=224, aug_crop=16) + + for idx in np.random.permutation(len(dataset)): + views = dataset[idx] + assert len(views) == 2 + print(view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx * 255, (1 - idx) * 255, 0), + image=colors, + cam_size=cam_size) + viz.show() diff --git a/dynamic_predictor/dust3r/datasets/megadepth.py b/dynamic_predictor/dust3r/datasets/megadepth.py new file mode 100644 index 0000000000000000000000000000000000000000..8131498b76d855e5293fe79b3686fc42bf87eea8 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/megadepth.py @@ -0,0 +1,123 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed MegaDepth +# dataset at https://www.cs.cornell.edu/projects/megadepth/ +# See datasets_preprocess/preprocess_megadepth.py +# -------------------------------------------------------- +import os.path as osp +import numpy as np + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 + + +class MegaDepth(BaseStereoViewDataset): + def __init__(self, *args, split, ROOT, **kwargs): + self.ROOT = ROOT + super().__init__(*args, **kwargs) + self.loaded_data = self._load_data(self.split) + + if self.split is None: + pass + elif self.split == 'train': + self.select_scene(('0015', '0022'), opposite=True) + elif self.split == 'val': + self.select_scene(('0015', '0022')) + else: + raise ValueError(f'bad {self.split=}') + + def _load_data(self, split): + with np.load(osp.join(self.ROOT, 'all_metadata.npz')) as data: + self.all_scenes = data['scenes'] + self.all_images = data['images'] + self.pairs = data['pairs'] + + def __len__(self): + return len(self.pairs) + + def get_stats(self): + return f'{len(self)} pairs from {len(self.all_scenes)} scenes' + + def select_scene(self, scene, *instances, opposite=False): + scenes = (scene,) if isinstance(scene, str) else tuple(scene) + scene_id = [s.startswith(scenes) for s in self.all_scenes] + assert any(scene_id), 'no scene found' + + valid = np.in1d(self.pairs['scene_id'], np.nonzero(scene_id)[0]) + if instances: + image_id = [i.startswith(instances) for i in self.all_images] + image_id = np.nonzero(image_id)[0] + assert len(image_id), 'no instance found' + # both together? + if len(instances) == 2: + valid &= np.in1d(self.pairs['im1_id'], image_id) & np.in1d(self.pairs['im2_id'], image_id) + else: + valid &= np.in1d(self.pairs['im1_id'], image_id) | np.in1d(self.pairs['im2_id'], image_id) + + if opposite: + valid = ~valid + assert valid.any() + self.pairs = self.pairs[valid] + + def _get_views(self, pair_idx, resolution, rng): + scene_id, im1_id, im2_id, score = self.pairs[pair_idx] + + scene, subscene = self.all_scenes[scene_id].split() + seq_path = osp.join(self.ROOT, scene, subscene) + + views = [] + + for im_id in [im1_id, im2_id]: + img = self.all_images[im_id] + try: + image = imread_cv2(osp.join(seq_path, img + '.jpg')) + depthmap = imread_cv2(osp.join(seq_path, img + ".exr")) + camera_params = np.load(osp.join(seq_path, img + ".npz")) + except Exception as e: + raise OSError(f'cannot load {img}, got exception {e}') + + intrinsics = np.float32(camera_params['intrinsics']) + camera_pose = np.float32(camera_params['cam2world']) + + image, depthmap, intrinsics = self._crop_resize_if_necessary( + image, depthmap, intrinsics, resolution, rng, info=(seq_path, img)) + + views.append(dict( + img=image, + depthmap=depthmap, + camera_pose=camera_pose, # cam2world + camera_intrinsics=intrinsics, + dataset='MegaDepth', + label=osp.relpath(seq_path, self.ROOT), + instance=img)) + + return views + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = MegaDepth(split='train', ROOT="data/megadepth_processed", resolution=224, aug_crop=16) + + for idx in np.random.permutation(len(dataset)): + views = dataset[idx] + assert len(views) == 2 + print(idx, view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx * 255, (1 - idx) * 255, 0), + image=colors, + cam_size=cam_size) + viz.show() diff --git a/dynamic_predictor/dust3r/datasets/pointodyssey.py b/dynamic_predictor/dust3r/datasets/pointodyssey.py new file mode 100644 index 0000000000000000000000000000000000000000..630db3d03d70127cfad6384a84a04f685edb15bb --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/pointodyssey.py @@ -0,0 +1,479 @@ +import sys +sys.path.append('.') +import os +import torch +import numpy as np +import os.path as osp +import torchvision.transforms as transforms +import torch.nn.functional as F +from PIL import Image +from torch._C import dtype, set_flush_denormal +import dust3r.utils.po_utils.basic +import dust3r.utils.po_utils.improc +from dust3r.utils.po_utils.misc import farthest_point_sample_py +from dust3r.utils.po_utils.geom import apply_4x4_py, apply_pix_T_cam_py +import glob +import cv2 +from torchvision.transforms import ColorJitter, GaussianBlur +from functools import partial +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset, is_good_type, transpose_to_landscape +from dust3r.utils.image import imread_cv2 +from dust3r.utils.misc import get_stride_distribution +from dust3r.datasets.utils.geom import apply_4x4_py, realative_T_py +from dust3r.utils.geometry import depthmap_to_absolute_camera_coordinates +from mpl_toolkits.mplot3d import Axes3D +import matplotlib.pyplot as plt +from scipy.interpolate import griddata + + +from pyntcloud import PyntCloud +import pandas as pd + +np.random.seed(125) +torch.multiprocessing.set_sharing_strategy('file_system') + + + + +class PointOdysseyDUSt3R(BaseStereoViewDataset): + def __init__(self, + dataset_location='data/pointodyssey', + dset='train', + use_augs=False, + S=2, + N=16, + strides=[1,2,3,4,5,6,7,8,9], + clip_step=2, + quick=False, + verbose=False, + dist_type=None, + clip_step_last_skip = 0, + motion_thresh = 1e-6, + *args, + **kwargs + ): + + print('loading pointodyssey dataset...') + super().__init__(*args, **kwargs) + self.dataset_label = 'pointodyssey' + self.split = dset + self.S = S # stride + self.N = N # min num points + self.verbose = verbose + self.motion_thresh = motion_thresh + self.use_augs = use_augs + self.dset = dset + + self.rgb_paths = [] + self.depth_paths = [] + self.normal_paths = [] + self.traj_2d_paths = [] + self.traj_3d_paths = [] + self.extrinsic_paths = [] + self.intrinsic_paths = [] + self.masks_paths = [] + self.valids_paths = [] + self.visibs_paths = [] + self.annotation_paths = [] + self.full_idxs = [] + self.sample_stride = [] + self.strides = strides + + self.subdirs = [] + self.sequences = [] + self.subdirs.append(os.path.join(dataset_location, dset)) + + for subdir in self.subdirs: + for seq in glob.glob(os.path.join(subdir, "*/")): + seq_name = seq.split('/')[-1] + self.sequences.append(seq) + + self.sequences = sorted(self.sequences) + + if quick: + self.sequences = self.sequences[1:2] + + if self.verbose: + print(self.sequences) + print('found %d unique videos in %s (dset=%s)' % (len(self.sequences), dataset_location, dset)) + + ## load trajectories + print('loading trajectories...') + + + + for seq in self.sequences: + if self.verbose: + print('seq', seq) + + rgb_path = os.path.join(seq, 'rgbs') + info_path = os.path.join(seq, 'info.npz') + annotations_path = os.path.join(seq, 'anno.npz') + + if os.path.isfile(info_path) and os.path.isfile(annotations_path): + + traj_3d_files = glob.glob(os.path.join(seq, 'trajs_3d', '*.npy')) + if len(traj_3d_files): + traj_3d_files_0 = np.load(traj_3d_files[0], allow_pickle=True) + trajs_3d_shape = traj_3d_files_0.shape[0] + else: + trajs_3d_shape = 0 + + if len(traj_3d_files) and trajs_3d_shape > self.N: + + for stride in strides: + for ii in range(0,len(os.listdir(rgb_path))-self.S*max(stride,clip_step_last_skip)+1, clip_step): + full_idx = ii + np.arange(self.S)*stride + self.rgb_paths.append([os.path.join(seq, 'rgbs', 'rgb_%05d.jpg' % idx) for idx in full_idx]) + self.depth_paths.append([os.path.join(seq, 'depths', 'depth_%05d.png' % idx) for idx in full_idx]) + self.normal_paths.append([os.path.join(seq, 'normals', 'normal_%05d.jpg' % idx) for idx in full_idx]) + # self.traj_2d_paths.append([os.path.join(seq, 'trajs_2d', 'traj_2d_%05d.npy' % idx) for idx in full_idx]) + self.traj_3d_paths.append([os.path.join(seq, 'trajs_3d', 'traj_3d_%05d.npy' % idx) for idx in full_idx]) + self.extrinsic_paths.append([os.path.join(seq, 'extrinsics', 'extrinsic_%05d.npy' % idx) for idx in full_idx]) + self.intrinsic_paths.append([os.path.join(seq, 'intrinsics', 'intrinsic_%05d.npy' % idx) for idx in full_idx]) + self.masks_paths.append([os.path.join(seq, 'masks', 'mask_%05d.png' % idx) for idx in full_idx]) + self.valids_paths.append([os.path.join(seq, 'valids', 'valid_%05d.npy' % idx) for idx in full_idx]) + self.visibs_paths.append([os.path.join(seq, 'visibs', 'visib_%05d.npy' % idx) for idx in full_idx]) + + self.full_idxs.append(full_idx) + self.sample_stride.append(stride) + if self.verbose: + sys.stdout.write('.') + sys.stdout.flush() + elif self.verbose: + print('rejecting seq for missing 3d') + elif self.verbose: + print('rejecting seq for missing info or anno') + + self.stride_counts = {} + self.stride_idxs = {} + for stride in strides: + self.stride_counts[stride] = 0 + self.stride_idxs[stride] = [] + for i, stride in enumerate(self.sample_stride): + self.stride_counts[stride] += 1 + self.stride_idxs[stride].append(i) + print('stride counts:', self.stride_counts) + + if len(strides) > 1 and dist_type is not None: + self._resample_clips(strides, dist_type) + + print('collected %d clips of length %d in %s (dset=%s)' % ( + len(self.rgb_paths), self.S, dataset_location, dset)) + + def _resample_clips(self, strides, dist_type): + + # Get distribution of strides, and sample based on that + dist = get_stride_distribution(strides, dist_type=dist_type) + dist = dist / np.max(dist) + max_num_clips = self.stride_counts[strides[np.argmax(dist)]] + num_clips_each_stride = [min(self.stride_counts[stride], int(dist[i]*max_num_clips)) for i, stride in enumerate(strides)] + print('resampled_num_clips_each_stride:', num_clips_each_stride) + resampled_idxs = [] + for i, stride in enumerate(strides): + resampled_idxs += np.random.choice(self.stride_idxs[stride], num_clips_each_stride[i], replace=False).tolist() + + self.rgb_paths = [self.rgb_paths[i] for i in resampled_idxs] + self.depth_paths = [self.depth_paths[i] for i in resampled_idxs] + self.normal_paths = [self.normal_paths[i] for i in resampled_idxs] + # self.traj_2d_paths = [self.traj_2d_paths[i] for i in resampled_idxs] + self.traj_3d_paths = [self.traj_3d_paths[i] for i in resampled_idxs] + self.extrinsic_paths = [self.extrinsic_paths[i] for i in resampled_idxs] + self.intrinsic_paths = [self.intrinsic_paths[i] for i in resampled_idxs] + self.full_idxs = [self.full_idxs[i] for i in resampled_idxs] + self.sample_stride = [self.sample_stride[i] for i in resampled_idxs] + self.masks_paths = [self.masks_paths[i] for i in resampled_idxs] + self.valids_paths = [self.valids_paths[i] for i in resampled_idxs] + self.visibs_paths = [self.visibs_paths[i] for i in resampled_idxs] + + def __len__(self): + return len(self.rgb_paths) + + def _get_views(self, index, resolution, rng): + + rgb_paths = self.rgb_paths[index] + depth_paths = self.depth_paths[index] + # normal_paths = self.normal_paths[index] + traj_3d_paths = self.traj_3d_paths[index] + extrinsic_paths = self.extrinsic_paths[index] + intrinsic_paths = self.intrinsic_paths[index] + masks_paths = self.masks_paths[index] + valids_paths = self.valids_paths[index] + visibs_paths = self.visibs_paths[index] + + # full_idx = self.full_idxs[index] + + + + + traj_3d = [np.load(traj_3d_path, allow_pickle=True) for traj_3d_path in traj_3d_paths] + pix_T_cams = [np.load(intrinsic_path, allow_pickle=True) for intrinsic_path in intrinsic_paths] + cams_T_world = [np.load(extrinsic_path, allow_pickle=True) for extrinsic_path in extrinsic_paths] + + # motion_vector = traj_3d[0] - traj_3d[1] + # motion_vector_norm = np.linalg.norm(motion_vector, axis=-1) + # motion_mask_3d = motion_vector_norm > self.motion_thresh + + motion_mask_3d = (traj_3d[0]==traj_3d[1]).sum(axis=1)!=3 + # # Project motion_mask_3d to camera space + # traj_3d_cam_space = apply_4x4_py(cams_T_world[0], traj_3d[0]) + # motion_mask_3d_cam_space = apply_pix_T_cam_py(pix_T_cams[0], traj_3d_cam_space) + # rgb_image = imread_cv2(rgb_paths[0]) + # rgb_image2 = imread_cv2(rgb_paths[1]) + # height, width, _ = rgb_image.shape + + # # Filter motion_mask_3d_cam_space to be within image boundaries + # motion_mask_3d_cam_space = np.round(motion_mask_3d_cam_space).astype(int) + # x, y = motion_mask_3d_cam_space[:, 0], motion_mask_3d_cam_space[:, 1] + # valid_mask = (x >= 0) & (x < width) & (y >= 0) & (y < height) & valid_mask & visib_mask + # motion_mask_3d_cam_space = motion_mask_3d_cam_space[valid_mask] + + # motion_mask = np.zeros_like(rgb_image, dtype=np.float32) + # motion_mask[motion_mask_3d_cam_space[:, 1], motion_mask_3d_cam_space[:, 0]] = [255, 255, 255] + # # Save the RGB image and motion mask + # rgb_image_path = os.path.join('tmp', '%05d_rgb.jpg' % index) + # motion_mask_path = os.path.join('tmp', '%05d_motion_mask.png' % index) + # sem_path = os.path.join('tmp', '%05d_sem.png' % index) + # rgb_image2_path = os.path.join('tmp', '%05d_rgb2.jpg' % index) + # cv2.imwrite(rgb_image_path, rgb_image) + # cv2.imwrite(motion_mask_path, motion_mask) + # cv2.imwrite(sem_path, sem_mask) + # cv2.imwrite(rgb_image2_path, rgb_image2) + # print(rgb_image_path) + # print(motion_mask_path) + # print(sem_path) + # print(rgb_image2_path) + + # # Create a DataFrame for the point cloud + # points = traj_3d[0] + # colors = np.zeros_like(points) + # colors[motion_mask_3d] = [255, 0, 0] # Red for motion points + # colors[~motion_mask_3d] = [0, 0, 0] # Green for static points + + # point_cloud_df = pd.DataFrame( + # np.hstack((points, colors)), + # columns=["x", "y", "z", "red", "green", "blue"] + # ) + + # # Create a PyntCloud object + # point_cloud = PyntCloud(point_cloud_df) + # point_cloud.plot() + + # try ten samples to see if the motion mask is correct. + + views = [] + for i in range(2): + + impath = rgb_paths[i] + depthpath = depth_paths[i] + # masks_path = masks_paths[i] + # valids_path = valids_paths[i] + # visibs_path = visibs_paths[i] + + # load camera params + extrinsics = cams_T_world[i] + R = extrinsics[:3,:3] + t = extrinsics[:3,3] + camera_pose = np.eye(4, dtype=np.float32) + camera_pose[:3,:3] = R.T + camera_pose[:3,3] = -R.T @ t + intrinsics = pix_T_cams[i] + + # load image and depth + rgb_image = imread_cv2(impath) + # masks_image = imread_cv2(masks_path) + + + depth16 = cv2.imread(depthpath, cv2.IMREAD_ANYDEPTH) + depthmap = depth16.astype(np.float32) / 65535.0 * 1000.0 # 1000 is the max depth in the dataset + + # masks_image, _, _ = self._crop_resize_if_necessary( + # masks_image, depthmap, intrinsics, resolution, rng=rng, info=impath) + rgb_image, depthmap, intrinsics = self._crop_resize_if_necessary( + rgb_image, depthmap, intrinsics, resolution, rng=rng, info=impath) + + + views.append(dict( + img=rgb_image, + # mask=masks_image, + depthmap=depthmap, + camera_pose=camera_pose, + camera_intrinsics=intrinsics, + dataset=self.dataset_label, + label=rgb_paths[i].split('/')[-3], + instance=osp.split(rgb_paths[i])[1], + )) + return views, motion_mask_3d, traj_3d + + def __getitem__(self, idx): + if isinstance(idx, tuple): + # the idx is specifying the aspect-ratio + idx, ar_idx = idx + else: + assert len(self._resolutions) == 1 + ar_idx = 0 + + # set-up the rng + if self.seed: # reseed for each __getitem__ + self._rng = np.random.default_rng(seed=self.seed + idx) + elif not hasattr(self, '_rng'): + seed = torch.initial_seed() # this is different for each dataloader process + self._rng = np.random.default_rng(seed=seed) + + # over-loaded code + resolution = self._resolutions[ar_idx] # DO NOT CHANGE THIS (compatible with BatchedRandomSampler) + views, motion_mask_3d, traj_3d = self._get_views(idx, resolution, self._rng) + assert len(views) == self.num_views + + # check data-types + # img = [] + # mask = [] + # mmask_save = [] + + for v, view in enumerate(views): + assert 'pts3d' not in view, f"pts3d should not be there, they will be computed afterwards based on intrinsics+depthmap for view {view_name(view)}" + view['idx'] = (idx, ar_idx, v) + + # img.append(np.array(view['img'])) + # mask.append(np.array(view['mask'])) + # encode the image + width, height = view['img'].size + view['true_shape'] = np.int32((height, width)) + view['img'] = self.transform(view['img']) + # view['mask'] = self.transform(view['mask']) + + assert 'camera_intrinsics' in view + if 'camera_pose' not in view: + view['camera_pose'] = np.full((4, 4), np.nan, dtype=np.float32) + else: + assert np.isfinite(view['camera_pose']).all(), f'NaN in camera pose for view {view_name(view)}' + assert 'pts3d' not in view + assert 'valid_mask' not in view + assert np.isfinite(view['depthmap']).all(), f'NaN in depthmap for view {view_name(view)}' + view['z_far'] = self.z_far + pts3d, valid_mask = depthmap_to_absolute_camera_coordinates(**view) + + view['pts3d'] = pts3d + view['valid_mask'] = valid_mask & np.isfinite(pts3d).all(axis=-1) + + pts3d = view['pts3d'].copy() + pts3d[~view['valid_mask']]=0 + pts3d = pts3d.reshape(-1, pts3d.shape[-1]) + + try: + mmask = griddata(traj_3d[v], motion_mask_3d, pts3d, method='nearest', fill_value=0).astype(np.float32) + mmask = np.clip(mmask, 0, 1) + except Exception as e: + print(f"Failed to compute mmask for view {v} at index {idx}: {e}") + mmask = np.zeros((pts3d.shape[0],), dtype=np.float32) + + + view['dynamic_mask'] = mmask.reshape(valid_mask.shape) + + + # mmask_save.append((mmask.reshape(valid_mask.shape) * 255).astype(np.uint8)) + + # visualize masks + + + # # visualization + # colors = np.zeros((pts3d.shape[0], 3)) + # colors[:, 0] = 255 * mmask # Green channel weighted by mmask + + + # point_cloud_df = pd.DataFrame( + # np.hstack((pts3d, colors)), + # columns=["x", "y", "z", "red", "green", "blue"] + # ) + + # point_cloud = PyntCloud(point_cloud_df) + # point_cloud.to_file(f"./tmp/po/point_cloud_{idx}.ply") + # psudo + + # check all datatypes + for key, val in view.items(): + res, err_msg = is_good_type(key, val) + assert res, f"{err_msg} with {key}={val} for view {view_name(view)}" + # if val.dtype in (torch.bool, np.float32, torch.float32, bool, np.int32, np.int64, np.uint8): + # print(f"{key}={val.shape} for view {view['label']}") + K = view['camera_intrinsics'] + + # Concatenate images, masks, and motion masks into one image and save to tmp/ + # concatenated_images = [] + # for i in range(len(img)): + # concatenated_image = np.concatenate((img[i], mask[i], mmask_save[i][...,None]*[255,255,255]), axis=0) + # concatenated_images.append(concatenated_image) + + # concatenated_images = np.concatenate(concatenated_images, axis=1) + # concatenated_image_path = os.path.join('tmp', f'{idx}_concatenated.jpg') + # cv2.imwrite(concatenated_image_path, concatenated_images) + + # last thing done! + for view in views: + # transpose to make sure all views are the same size + transpose_to_landscape(view) + # this allows to check whether the RNG is is the same state each time + view['rng'] = int.from_bytes(self._rng.bytes(4), 'big') + return views + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + import gradio as gr + import random + + + dataset_location = 'data/point_odyssey' # Change this to the correct path + dset = 'train' + use_augs = False + S = 2 + N = 1 + strides = [1,2,3,4,5,6,7,8,9] + clip_step = 2 + quick = False # Set to True for quick testing + + def visualize_scene(idx): + views = dataset[idx] + assert len(views) == 2 + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.25) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(255, 0, 0), + image=colors, + cam_size=cam_size) + os.makedirs('./tmp/po', exist_ok=True) + path = f"./tmp/po/po_scene_{idx}.glb" + return viz.save_glb(path) + + dataset = PointOdysseyDUSt3R( + dataset_location=dataset_location, + dset=dset, + use_augs=use_augs, + S=S, + N=N, + strides=strides, + clip_step=clip_step, + quick=quick, + verbose=False, + resolution=224, + aug_crop=16, + dist_type='linear_9_1', + aug_focal=1.5, + z_far=80) +# around 514k samples + + idxs = np.arange(0, len(dataset)-1, (len(dataset)-1)//10) + # idx = random.randint(0, len(dataset)-1) + # idx = 0 + for idx in idxs: + print(f"Visualizing scene {idx}...") + visualize_scene(idx) diff --git a/dynamic_predictor/dust3r/datasets/scannetpp.py b/dynamic_predictor/dust3r/datasets/scannetpp.py new file mode 100644 index 0000000000000000000000000000000000000000..520deedd0eb8cba8663af941731d89e0b2e71a80 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/scannetpp.py @@ -0,0 +1,96 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed scannet++ +# dataset at https://github.com/scannetpp/scannetpp - non-commercial research and educational purposes +# https://kaldir.vc.in.tum.de/scannetpp/static/scannetpp-terms-of-use.pdf +# See datasets_preprocess/preprocess_scannetpp.py +# -------------------------------------------------------- +import os.path as osp +import cv2 +import numpy as np + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 + + +class ScanNetpp(BaseStereoViewDataset): + def __init__(self, *args, ROOT, **kwargs): + self.ROOT = ROOT + super().__init__(*args, **kwargs) + assert self.split == 'train' + self.loaded_data = self._load_data() + + def _load_data(self): + with np.load(osp.join(self.ROOT, 'all_metadata.npz')) as data: + self.scenes = data['scenes'] + self.sceneids = data['sceneids'] + self.images = data['images'] + self.intrinsics = data['intrinsics'].astype(np.float32) + self.trajectories = data['trajectories'].astype(np.float32) + self.pairs = data['pairs'][:, :2].astype(int) + + def __len__(self): + return len(self.pairs) + + def _get_views(self, idx, resolution, rng): + + image_idx1, image_idx2 = self.pairs[idx] + + views = [] + for view_idx in [image_idx1, image_idx2]: + scene_id = self.sceneids[view_idx] + scene_dir = osp.join(self.ROOT, self.scenes[scene_id]) + + intrinsics = self.intrinsics[view_idx] + camera_pose = self.trajectories[view_idx] + basename = self.images[view_idx] + + # Load RGB image + rgb_image = imread_cv2(osp.join(scene_dir, 'images', basename + '.jpg')) + # Load depthmap + depthmap = imread_cv2(osp.join(scene_dir, 'depth', basename + '.png'), cv2.IMREAD_UNCHANGED) + depthmap = depthmap.astype(np.float32) / 1000 + depthmap[~np.isfinite(depthmap)] = 0 # invalid + + rgb_image, depthmap, intrinsics = self._crop_resize_if_necessary( + rgb_image, depthmap, intrinsics, resolution, rng=rng, info=view_idx) + + views.append(dict( + img=rgb_image, + depthmap=depthmap.astype(np.float32), + camera_pose=camera_pose.astype(np.float32), + camera_intrinsics=intrinsics.astype(np.float32), + dataset='ScanNet++', + label=self.scenes[scene_id] + '_' + basename, + instance=f'{str(idx)}_{str(view_idx)}', + )) + return views + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = ScanNetpp(split='train', ROOT="data/scannetpp_processed", resolution=224, aug_crop=16) + + for idx in np.random.permutation(len(dataset)): + views = dataset[idx] + assert len(views) == 2 + print(view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx*255, (1 - idx)*255, 0), + image=colors, + cam_size=cam_size) + viz.show() diff --git a/dynamic_predictor/dust3r/datasets/sintel.py b/dynamic_predictor/dust3r/datasets/sintel.py new file mode 100644 index 0000000000000000000000000000000000000000..36f5b2a1304130ef4ed114fada5833741c8dd5ef --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/sintel.py @@ -0,0 +1,276 @@ +import sys +sys.path.append('.') +import os +import torch +import numpy as np +import os.path as osp +import glob +import PIL.Image +import torchvision.transforms as tvf + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2, crop_img +from dust3r.utils.misc import get_stride_distribution + +np.random.seed(125) +torch.multiprocessing.set_sharing_strategy('file_system') +TAG_FLOAT = 202021.25 +ImgNorm = tvf.Compose([tvf.ToTensor(), tvf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) +ToTensor = tvf.ToTensor() + +def depth_read(filename): + """ Read depth data from file, return as numpy array. """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check) + width = np.fromfile(f,dtype=np.int32,count=1)[0] + height = np.fromfile(f,dtype=np.int32,count=1)[0] + size = width*height + assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height) + depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width)) + return depth + +def cam_read(filename): + """ Read camera data, return (M,N) tuple. + + M is the intrinsic matrix, N is the extrinsic matrix, so that + + x = M*N*X, + with x being a point in homogeneous image pixel coordinates, X being a + point in homogeneous world coordinates. + """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check) + M = np.fromfile(f,dtype='float64',count=9).reshape((3,3)) + N = np.fromfile(f,dtype='float64',count=12).reshape((3,4)) + return M,N + +class SintelDUSt3R(BaseStereoViewDataset): + def __init__(self, + dataset_location='data/sintel/training', + dset='clean', + use_augs=False, + S=2, + strides=[7], + clip_step=2, + quick=False, + verbose=False, + dist_type=None, + clip_step_last_skip = 0, + load_dynamic_mask=True, + *args, + **kwargs + ): + + print('loading sintel dataset...') + super().__init__(*args, **kwargs) + self.dataset_label = 'sintel' + self.split = dset + self.S = S # stride + self.verbose = verbose + self.load_dynamic_mask = load_dynamic_mask + + self.use_augs = use_augs + self.dset = dset + + self.rgb_paths = [] + self.depth_paths = [] + self.traj_paths = [] + self.annotation_paths = [] + self.dynamic_mask_paths = [] + self.full_idxs = [] + self.sample_stride = [] + self.strides = strides + + self.subdirs = [] + self.sequences = [] + self.subdirs.append(os.path.join(dataset_location, dset)) + + for subdir in self.subdirs: + for seq in glob.glob(os.path.join(subdir, "*/")): + self.sequences.append(seq) + + self.sequences = sorted(self.sequences) + if self.verbose: + print(self.sequences) + print('found %d unique videos in %s (dset=%s)' % (len(self.sequences), dataset_location, dset)) + + ## load trajectories + print('loading trajectories...') + + if quick: + self.sequences = self.sequences[1:2] + + for seq in self.sequences: + if self.verbose: + print('seq', seq) + + rgb_path = seq + depth_path = seq.replace(dset,'depth') + caminfo_path = seq.replace(dset,'camdata_left') + dynamic_mask_path = seq.replace(dset,'dynamic_label_perfect') + + for stride in strides: + for ii in range(1,len(os.listdir(rgb_path))-self.S*max(stride,clip_step_last_skip)+1, clip_step): + full_idx = ii + np.arange(self.S)*stride + self.rgb_paths.append([os.path.join(rgb_path, 'frame_%04d.png' % idx) for idx in full_idx]) + self.depth_paths.append([os.path.join(depth_path, 'frame_%04d.dpt' % idx) for idx in full_idx]) + self.annotation_paths.append([os.path.join(caminfo_path, 'frame_%04d.cam' % idx) for idx in full_idx]) + self.dynamic_mask_paths.append([os.path.join(dynamic_mask_path, 'frame_%04d.png' % idx) for idx in full_idx]) + self.full_idxs.append(full_idx) + self.sample_stride.append(stride) + if self.verbose: + sys.stdout.write('.') + sys.stdout.flush() + + self.stride_counts = {} + self.stride_idxs = {} + for stride in strides: + self.stride_counts[stride] = 0 + self.stride_idxs[stride] = [] + for i, stride in enumerate(self.sample_stride): + self.stride_counts[stride] += 1 + self.stride_idxs[stride].append(i) + print('stride counts:', self.stride_counts) + + if len(strides) > 1 and dist_type is not None: + self._resample_clips(strides, dist_type) + + print('collected %d clips of length %d in %s (dset=%s)' % ( + len(self.rgb_paths), self.S, dataset_location, dset)) + + def _resample_clips(self, strides, dist_type): + + # Get distribution of strides, and sample based on that + dist = get_stride_distribution(strides, dist_type=dist_type) + dist = dist / np.max(dist) + max_num_clips = self.stride_counts[strides[np.argmax(dist)]] + num_clips_each_stride = [min(self.stride_counts[stride], int(dist[i]*max_num_clips)) for i, stride in enumerate(strides)] + print('resampled_num_clips_each_stride:', num_clips_each_stride) + resampled_idxs = [] + for i, stride in enumerate(strides): + resampled_idxs += np.random.choice(self.stride_idxs[stride], num_clips_each_stride[i], replace=False).tolist() + + self.rgb_paths = [self.rgb_paths[i] for i in resampled_idxs] + self.depth_paths = [self.depth_paths[i] for i in resampled_idxs] + self.annotation_paths = [self.annotation_paths[i] for i in resampled_idxs] + self.dynamic_mask_paths = [self.dynamic_mask_paths[i] for i in resampled_idxs] + self.full_idxs = [self.full_idxs[i] for i in resampled_idxs] + self.sample_stride = [self.sample_stride[i] for i in resampled_idxs] + + def __len__(self): + return len(self.rgb_paths) + + def _get_views(self, index, resolution, rng): + + rgb_paths = self.rgb_paths[index] + depth_paths = self.depth_paths[index] + full_idx = self.full_idxs[index] + annotations_paths = self.annotation_paths[index] + dynamic_mask_paths = self.dynamic_mask_paths[index] + + views = [] + for i in range(2): + impath = rgb_paths[i] + depthpath = depth_paths[i] + dynamic_mask_path = dynamic_mask_paths[i] + + # load camera params + intrinsics, extrinsics = cam_read(annotations_paths[i]) + intrinsics, extrinsics = np.array(intrinsics, dtype=np.float32), np.array(extrinsics, dtype=np.float32) + R = extrinsics[:3,:3] + t = extrinsics[:3,3] + camera_pose = np.eye(4, dtype=np.float32) + camera_pose[:3,:3] = R.T + camera_pose[:3,3] = -R.T @ t + + # load image and depth + rgb_image = imread_cv2(impath) + depthmap = depth_read(depthpath) + + # load dynamic mask + if dynamic_mask_path is not None and os.path.exists(dynamic_mask_path): + dynamic_mask = PIL.Image.open(dynamic_mask_path).convert('L') + dynamic_mask = ToTensor(dynamic_mask).sum(0).numpy() + _, dynamic_mask, _ = self._crop_resize_if_necessary( + rgb_image, dynamic_mask, intrinsics, resolution, rng=rng, info=impath) + dynamic_mask = dynamic_mask > 0.5 + assert not np.all(dynamic_mask), f"Dynamic mask is all True for {impath}" + else: + dynamic_mask = np.ones((resolution[1],resolution[0]), dtype=bool) + + rgb_image, depthmap, intrinsics = self._crop_resize_if_necessary( + rgb_image, depthmap, intrinsics, resolution, rng=rng, info=impath) + + if self.load_dynamic_mask: + views.append(dict( + img=rgb_image, + depthmap=depthmap, + camera_pose=camera_pose, + camera_intrinsics=intrinsics, + dataset=self.dataset_label, + label=rgb_paths[i].split('/')[-2], + instance=osp.split(rgb_paths[i])[1], + dynamic_mask=dynamic_mask, + full_idx=full_idx, + )) + else: + views.append(dict( + img=rgb_image, + depthmap=depthmap, + camera_pose=camera_pose, + camera_intrinsics=intrinsics, + dataset=self.dataset_label, + label=rgb_paths[i].split('/')[-2], + instance=osp.split(rgb_paths[i])[1], + full_idx=full_idx, + )) + return views + + +if __name__ == "__main__": + + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + use_augs = False + S = 2 + strides = [1] + clip_step = 1 + quick = False # Set to True for quick testing + + + def visualize_scene(idx): + views = dataset[idx] + assert len(views) == 2 + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.25) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(255, 0, 0), + image=colors, + cam_size=cam_size) + path = f"./tmp/sintel_scene_{idx}.glb" + return viz.save_glb(path) + + dataset = SintelDUSt3R( + use_augs=use_augs, + S=S, + strides=strides, + clip_step=clip_step, + quick=quick, + verbose=False, + resolution=(512,224), + seed = 777, + clip_step_last_skip=0, + aug_crop=16) + + idx = random.randint(0, len(dataset)-1) + visualize_scene(idx) \ No newline at end of file diff --git a/dynamic_predictor/dust3r/datasets/spring_dataset.py b/dynamic_predictor/dust3r/datasets/spring_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a76c9afacb730f1a14e381751f74b92b79a31031 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/spring_dataset.py @@ -0,0 +1,242 @@ +import sys +sys.path.append('.') +import os +import torch +import numpy as np +import os.path as osp +import glob + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 +from dust3r.utils.misc import get_stride_distribution +import h5py + +SPRING_BASELINE = 0.065 +np.random.seed(125) +torch.multiprocessing.set_sharing_strategy('file_system') + +def get_depth(disp1, intrinsics, baseline=SPRING_BASELINE): + """ + get depth from reference frame disparity and camera intrinsics + """ + return intrinsics[0] * baseline / disp1 + + +def readDsp5Disp(filename): + with h5py.File(filename, "r") as f: + if "disparity" not in f.keys(): + raise IOError(f"File {filename} does not have a 'disparity' key. Is this a valid dsp5 file?") + return f["disparity"][()] + + +class SpringDUSt3R(BaseStereoViewDataset): + def __init__(self, + dataset_location='data/spring', + dset='train', + use_augs=False, + S=2, + strides=[8], + clip_step=2, + quick=False, + verbose=False, + dist_type=None, + remove_seq_list=[], + *args, + **kwargs + ): + + print('loading Spring dataset...') + super().__init__(*args, **kwargs) + self.dataset_label = 'Spring' + self.split = dset + self.S = S # number of frames + self.verbose = verbose + + self.use_augs = use_augs + self.dset = dset + + self.rgb_paths = [] + self.depth_paths = [] + self.annotations = [] + self.intrinsics = [] + self.full_idxs = [] + self.sample_stride = [] + self.strides = strides + + self.subdirs = [] + self.sequences = [] + self.subdirs.append(os.path.join(dataset_location, dset)) + + for subdir in self.subdirs: + for seq in glob.glob(os.path.join(subdir, "*/")): + seq_name = seq.split('/')[-2] + print(f"Processing sequence {seq_name}") + # remove_seq_list = ['0008', '0041', '0043'] + if seq_name in remove_seq_list: + print(f"Skipping sequence {seq_name}") + continue + self.sequences.append(seq) + + self.sequences = sorted(self.sequences) + if self.verbose: + print(self.sequences) + print('found %d unique videos in %s (dset=%s)' % (len(self.sequences), dataset_location, dset)) + + if quick: + self.sequences = self.sequences[1:2] + + for seq in self.sequences: + if self.verbose: + print('seq', seq) + + rgb_path = os.path.join(seq, 'frame_left') + depth_path = os.path.join(seq, 'disp1_left') + caminfo_path = os.path.join(seq, 'cam_data/extrinsics.txt') + caminfo = np.loadtxt(caminfo_path) + intrinsics_path = os.path.join(seq, 'cam_data/intrinsics.txt') + intrinsics = np.loadtxt(intrinsics_path) + + for stride in strides: + for ii in range(1,len(os.listdir(rgb_path))-self.S*stride+2, clip_step): + full_idx = ii + np.arange(self.S)*stride + self.rgb_paths.append([os.path.join(rgb_path, 'frame_left_%04d.png' % idx) for idx in full_idx]) + self.depth_paths.append([os.path.join(depth_path, 'disp1_left_%04d.dsp5' % idx) for idx in full_idx]) + self.annotations.append(caminfo[full_idx-1]) + self.intrinsics.append(intrinsics[full_idx-1]) + self.full_idxs.append(full_idx) + self.sample_stride.append(stride) + if self.verbose: + sys.stdout.write('.') + sys.stdout.flush() + + self.stride_counts = {} + self.stride_idxs = {} + for stride in strides: + self.stride_counts[stride] = 0 + self.stride_idxs[stride] = [] + for i, stride in enumerate(self.sample_stride): + self.stride_counts[stride] += 1 + self.stride_idxs[stride].append(i) + print('stride counts:', self.stride_counts) + + if len(strides) > 1 and dist_type is not None: + self._resample_clips(strides, dist_type) + + print('collected %d clips of length %d in %s (dset=%s)' % ( + len(self.rgb_paths), self.S, dataset_location, dset)) + + def _resample_clips(self, strides, dist_type): + + # Get distribution of strides, and sample based on that + dist = get_stride_distribution(strides, dist_type=dist_type) + dist = dist / np.max(dist) + max_num_clips = self.stride_counts[strides[np.argmax(dist)]] + num_clips_each_stride = [min(self.stride_counts[stride], int(dist[i]*max_num_clips)) for i, stride in enumerate(strides)] + print('resampled_num_clips_each_stride:', num_clips_each_stride) + resampled_idxs = [] + for i, stride in enumerate(strides): + resampled_idxs += np.random.choice(self.stride_idxs[stride], num_clips_each_stride[i], replace=False).tolist() + + self.rgb_paths = [self.rgb_paths[i] for i in resampled_idxs] + self.depth_paths = [self.depth_paths[i] for i in resampled_idxs] + self.annotations = [self.annotations[i] for i in resampled_idxs] + self.intrinsics = [self.intrinsics[i] for i in resampled_idxs] + self.full_idxs = [self.full_idxs[i] for i in resampled_idxs] + self.sample_stride = [self.sample_stride[i] for i in resampled_idxs] + + def __len__(self): + return len(self.rgb_paths) + + def _get_views(self, index, resolution, rng): + + rgb_paths = self.rgb_paths[index] + depth_paths = self.depth_paths[index] + annotations = self.annotations[index] + intrinsics = self.intrinsics[index] + + views = [] + for i in range(2): + impath = rgb_paths[i] + depthpath = depth_paths[i] + + # load camera params + extrinsic = np.reshape(annotations[i], (4,4)).astype(np.float32) + camera_pose = np.linalg.inv(extrinsic) + intrinsic_matrix = np.zeros((3, 3), dtype=np.float32) + intrinsic_matrix[0, 0] = intrinsics[i][0] + intrinsic_matrix[1, 1] = intrinsics[i][1] + intrinsic_matrix[0, 2] = intrinsics[i][2] + intrinsic_matrix[1, 2] = intrinsics[i][3] + + # load image and depth + rgb_image = imread_cv2(impath) + depthmap = get_depth(readDsp5Disp(depthpath), intrinsics[i]).astype(np.float32) + depthmap = depthmap[::2, ::2] + depthmap = np.where(np.isnan(depthmap), -1, depthmap) + depthmap = np.where(np.isinf(depthmap), -1, depthmap) + + rgb_image, depthmap, intrinsic_matrix = self._crop_resize_if_necessary( + rgb_image, depthmap, intrinsic_matrix, resolution, rng=rng, info=impath) + + views.append(dict( + img=rgb_image, + depthmap=depthmap, + camera_pose=camera_pose, + camera_intrinsics=intrinsic_matrix, + dataset=self.dataset_label, + label=rgb_paths[i].split('/')[-3], + instance=osp.split(rgb_paths[i])[1], + )) + return views + +if __name__ == "__main__": + + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + use_augs = False + S = 2 + strides=[2,4,6,8,10,12,14,16,18] + clip_step = 2 + quick = False # Set to True for quick testing + dist_type='linear_9_1' + + + def visualize_scene(idx): + views = dataset[idx] + assert len(views) == 2 + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 1) + label = views[0]['label'] + instance = views[0]['instance'] + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(255, 0, 0), + image=colors, + cam_size=cam_size) + os.makedirs("./tmp/spring_scene", exist_ok=True) + path = f"./tmp/spring_scene/spring_scene_{label}_{views[0]['instance']}_{views[1]['instance']}.glb" + return viz.save_glb(path) + + dataset = SpringDUSt3R( + use_augs=use_augs, + S=S, + strides=strides, + clip_step=clip_step, + quick=quick, + verbose=False, + resolution=(512,288), + aug_crop=16, + dist_type=dist_type, + z_far=80) + + idxs = np.arange(0, len(dataset)-1, (len(dataset)-1)//10) + for idx in idxs: + print(f"Visualizing scene {idx}...") + visualize_scene(idx) \ No newline at end of file diff --git a/dynamic_predictor/dust3r/datasets/staticthings3d.py b/dynamic_predictor/dust3r/datasets/staticthings3d.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f70f0ee7bf8c8ab6bb1702aa2481f3d16df413 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/staticthings3d.py @@ -0,0 +1,96 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed StaticThings3D +# dataset at https://github.com/lmb-freiburg/robustmvd/ +# See datasets_preprocess/preprocess_staticthings3d.py +# -------------------------------------------------------- +import os.path as osp +import numpy as np + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 + + +class StaticThings3D (BaseStereoViewDataset): + """ Dataset of indoor scenes, 5 images each time + """ + def __init__(self, ROOT, *args, mask_bg='rand', **kwargs): + self.ROOT = ROOT + super().__init__(*args, **kwargs) + + assert mask_bg in (True, False, 'rand') + self.mask_bg = mask_bg + + # loading all pairs + assert self.split is None + self.pairs = np.load(osp.join(ROOT, 'staticthings_pairs.npy')) + + def __len__(self): + return len(self.pairs) + + def get_stats(self): + return f'{len(self)} pairs' + + def _get_views(self, pair_idx, resolution, rng): + scene, seq, cam1, im1, cam2, im2 = self.pairs[pair_idx] + seq_path = osp.join('TRAIN', scene.decode('ascii'), f'{seq:04d}') + + views = [] + + mask_bg = (self.mask_bg == True) or (self.mask_bg == 'rand' and rng.choice(2)) + + CAM = {b'l':'left', b'r':'right'} + for cam, idx in [(CAM[cam1], im1), (CAM[cam2], im2)]: + num = f"{idx:04n}" + img = num+"_clean.jpg" if rng.choice(2) else num+"_final.jpg" + image = imread_cv2(osp.join(self.ROOT, seq_path, cam, img)) + depthmap = imread_cv2(osp.join(self.ROOT, seq_path, cam, num+".exr")) + camera_params = np.load(osp.join(self.ROOT, seq_path, cam, num+".npz")) + + intrinsics = camera_params['intrinsics'] + camera_pose = camera_params['cam2world'] + + if mask_bg: + depthmap[depthmap > 200] = 0 + + image, depthmap, intrinsics = self._crop_resize_if_necessary(image, depthmap, intrinsics, resolution, rng, info=(seq_path,cam,img)) + + views.append(dict( + img = image, + depthmap = depthmap, + camera_pose = camera_pose, # cam2world + camera_intrinsics = intrinsics, + dataset = 'StaticThings3D', + label = seq_path, + instance = cam+'_'+img)) + + return views + + +if __name__ == '__main__': + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = StaticThings3D(ROOT="data/staticthings3d_processed", resolution=224, aug_crop=16) + + for idx in np.random.permutation(len(dataset)): + views = dataset[idx] + assert len(views) == 2 + print(idx, view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx*255, (1 - idx)*255, 0), + image=colors, + cam_size=cam_size) + viz.show() diff --git a/dynamic_predictor/dust3r/datasets/tartanair.py b/dynamic_predictor/dust3r/datasets/tartanair.py new file mode 100644 index 0000000000000000000000000000000000000000..3bbcb111ff5c897a37614eb983ac61bddbc8ba82 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/tartanair.py @@ -0,0 +1,234 @@ + +import sys +sys.path.append('.') +import os +import torch +import numpy as np +import os.path as osp +import glob + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 +from dust3r.utils.misc import get_stride_distribution + +np.random.seed(125) +torch.multiprocessing.set_sharing_strategy('file_system') + +def depth_read(filename): + depth = np.load(filename) + return depth + +def xyzqxqyqxqw_to_c2w(xyzqxqyqxqw): + xyzqxqyqxqw = np.array(xyzqxqyqxqw, dtype=np.float32) + #NOTE: we need to convert x_y_z coordinate system to z_x_y coordinate system + z, x, y = xyzqxqyqxqw[:3] + qz, qx, qy, qw = xyzqxqyqxqw[3:] + c2w = np.eye(4) + c2w[:3, :3] = np.array([ + [1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw], + [2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw], + [2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy] + ]) + c2w[:3, 3] = np.array([x, y, z]) + return c2w + +class TarTanAirDUSt3R(BaseStereoViewDataset): + def __init__(self, + dataset_location='data/tartanair', + dset='Hard', + use_augs=False, + S=2, + strides=[8], + clip_step=2, + quick=False, + verbose=False, + dist_type=None, + *args, + **kwargs + ): + + print('loading tartanair dataset...') + super().__init__(*args, **kwargs) + self.dataset_label = 'tartanair' + self.split = dset + self.S = S # number of frames + self.verbose = verbose + + self.use_augs = use_augs + self.dset = dset + + self.rgb_paths = [] + self.depth_paths = [] + self.normal_paths = [] + self.traj_paths = [] + self.annotations = [] + self.full_idxs = [] + self.sample_stride = [] + self.strides = strides + + self.subdirs = [] + self.sequences = [] + self.subdirs.append(os.path.join(dataset_location)) #'data/tartanair' + + for subdir in self.subdirs: + for seq in glob.glob(os.path.join(subdir, "*/", dset, "*/")): + self.sequences.append(seq) + + self.sequences = sorted(self.sequences) + if self.verbose: + print(self.sequences) + print('found %d unique videos in %s (dset=%s)' % (len(self.sequences), dataset_location, dset)) + + if quick: + self.sequences = self.sequences[1:2] + + for seq in self.sequences: + if self.verbose: + print('seq', seq) + + rgb_path = os.path.join(seq, 'image_left') + depth_path = os.path.join(seq, 'depth_left') + caminfo_path = os.path.join(seq, 'pose_left.txt') + caminfo = np.loadtxt(caminfo_path) + + for stride in strides: + for ii in range(0,len(os.listdir(rgb_path))-self.S*stride+1, clip_step): + full_idx = ii + np.arange(self.S)*stride + self.rgb_paths.append([os.path.join(rgb_path, '%06d_left.png' % idx) for idx in full_idx]) + self.depth_paths.append([os.path.join(depth_path, '%06d_left_depth.npy' % idx) for idx in full_idx]) + self.annotations.append(caminfo[full_idx]) + self.full_idxs.append(full_idx) + self.sample_stride.append(stride) + if self.verbose: + sys.stdout.write('.') + sys.stdout.flush() + + + fx = 320.0 # focal length x + fy = 320.0 # focal length y + cx = 320.0 # optical center x + cy = 240.0 # optical center y + + width = 640 + height = 480 + + self.intrinsics = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32) + self.stride_counts = {} + self.stride_idxs = {} + for stride in strides: + self.stride_counts[stride] = 0 + self.stride_idxs[stride] = [] + for i, stride in enumerate(self.sample_stride): + self.stride_counts[stride] += 1 + self.stride_idxs[stride].append(i) + print('stride counts:', self.stride_counts) + + if len(strides) > 1 and dist_type is not None: + self._resample_clips(strides, dist_type) + + print('collected %d clips of length %d in %s (dset=%s)' % ( + len(self.rgb_paths), self.S, dataset_location, dset)) + + def _resample_clips(self, strides, dist_type): + + # Get distribution of strides, and sample based on that + dist = get_stride_distribution(strides, dist_type=dist_type) + dist = dist / np.max(dist) + max_num_clips = self.stride_counts[strides[np.argmax(dist)]] + num_clips_each_stride = [min(self.stride_counts[stride], int(dist[i]*max_num_clips)) for i, stride in enumerate(strides)] + print('resampled_num_clips_each_stride:', num_clips_each_stride) + resampled_idxs = [] + for i, stride in enumerate(strides): + resampled_idxs += np.random.choice(self.stride_idxs[stride], num_clips_each_stride[i], replace=False).tolist() + + self.rgb_paths = [self.rgb_paths[i] for i in resampled_idxs] + self.depth_paths = [self.depth_paths[i] for i in resampled_idxs] + self.annotations = [self.annotations[i] for i in resampled_idxs] + self.full_idxs = [self.full_idxs[i] for i in resampled_idxs] + self.sample_stride = [self.sample_stride[i] for i in resampled_idxs] + + def __len__(self): + return len(self.rgb_paths) + + def _get_views(self, index, resolution, rng): + + rgb_paths = self.rgb_paths[index] + depth_paths = self.depth_paths[index] + full_idx = self.full_idxs[index] + annotations = self.annotations[index] + + views = [] + for i in range(2): + impath = rgb_paths[i] + depthpath = depth_paths[i] + + # load camera params + camera_pose = np.array(xyzqxqyqxqw_to_c2w(annotations[i]), dtype=np.float32) + # camera_pose = np.linalg.inv(camera_pose) + + # load image and depth + rgb_image = imread_cv2(impath) + depthmap = depth_read(depthpath) + + rgb_image, depthmap, intrinsics = self._crop_resize_if_necessary( + rgb_image, depthmap, self.intrinsics, resolution, rng=rng, info=impath) + + views.append(dict( + img=rgb_image, + depthmap=depthmap, + camera_pose=camera_pose, + camera_intrinsics=intrinsics, + dataset=self.dataset_label, + label=rgb_paths[i].split('/')[-5]+'-'+rgb_paths[i].split('/')[-3], + instance=osp.split(rgb_paths[i])[1], + )) + return views + +if __name__ == "__main__": + + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + use_augs = False + S = 2 + strides = [1,2,3,4,5,6,7,8,9] + clip_step = 2 + quick = False # Set to True for quick testing + + + def visualize_scene(idx): + views = dataset[idx] + assert len(views) == 2 + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 1) + label = views[0]['label'] + instance = views[0]['instance'] + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(255, 0, 0), + image=colors, + cam_size=cam_size) + path = f"./tmp/tartanair/tartanair_scene_{label}_{instance}.glb" + return viz.save_glb(path) + + dataset = TarTanAirDUSt3R( + use_augs=use_augs, + S=S, + strides=strides, + clip_step=clip_step, + quick=quick, + verbose=False, + resolution=(512,384), + dist_type='linear_9_1', + aug_crop=16) + + idxs = np.arange(0, len(dataset)-1, (len(dataset)-1)//10) + for idx in idxs: + print(f"Visualizing scene {idx}...") + visualize_scene(idx) \ No newline at end of file diff --git a/dynamic_predictor/dust3r/datasets/utils/__init__.py b/dynamic_predictor/dust3r/datasets/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a32692113d830ddc4af4e6ed608f222fbe062e6e --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). diff --git a/dynamic_predictor/dust3r/datasets/utils/basic.py b/dynamic_predictor/dust3r/datasets/utils/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..4a4a15ecf2c566fe9216f2622ff21c576f0d43f7 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/basic.py @@ -0,0 +1,397 @@ +import os +import numpy as np +from os.path import isfile +import torch +import torch.nn.functional as F +EPS = 1e-6 +import copy + +def sub2ind(height, width, y, x): + return y*width + x + +def ind2sub(height, width, ind): + y = ind // width + x = ind % width + return y, x + +def get_lr_str(lr): + lrn = "%.1e" % lr # e.g., 5.0e-04 + lrn = lrn[0] + lrn[3:5] + lrn[-1] # e.g., 5e-4 + return lrn + +def strnum(x): + s = '%g' % x + if '.' in s: + if x < 1.0: + s = s[s.index('.'):] + s = s[:min(len(s),4)] + return s + +def assert_same_shape(t1, t2): + for (x, y) in zip(list(t1.shape), list(t2.shape)): + assert(x==y) + +def print_stats(name, tensor): + shape = tensor.shape + tensor = tensor.detach().cpu().numpy() + print('%s (%s) min = %.2f, mean = %.2f, max = %.2f' % (name, tensor.dtype, np.min(tensor), np.mean(tensor), np.max(tensor)), shape) + +def print_stats_py(name, tensor): + shape = tensor.shape + print('%s (%s) min = %.2f, mean = %.2f, max = %.2f' % (name, tensor.dtype, np.min(tensor), np.mean(tensor), np.max(tensor)), shape) + +def print_(name, tensor): + tensor = tensor.detach().cpu().numpy() + print(name, tensor, tensor.shape) + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + +def normalize_single(d): + # d is a whatever shape torch tensor + dmin = torch.min(d) + dmax = torch.max(d) + d = (d-dmin)/(EPS+(dmax-dmin)) + return d + +def normalize(d): + # d is B x whatever. normalize within each element of the batch + out = torch.zeros(d.size()) + if d.is_cuda: + out = out.cuda() + B = list(d.size())[0] + for b in list(range(B)): + out[b] = normalize_single(d[b]) + return out + +def hard_argmax2d(tensor): + B, C, Y, X = list(tensor.shape) + assert(C==1) + + # flatten the Tensor along the height and width axes + flat_tensor = tensor.reshape(B, -1) + # argmax of the flat tensor + argmax = torch.argmax(flat_tensor, dim=1) + + # convert the indices into 2d coordinates + argmax_y = torch.floor(argmax / X) # row + argmax_x = argmax % X # col + + argmax_y = argmax_y.reshape(B) + argmax_x = argmax_x.reshape(B) + return argmax_y, argmax_x + +def argmax2d(heat, hard=True): + B, C, Y, X = list(heat.shape) + assert(C==1) + + if hard: + # hard argmax + loc_y, loc_x = hard_argmax2d(heat) + loc_y = loc_y.float() + loc_x = loc_x.float() + else: + heat = heat.reshape(B, Y*X) + prob = torch.nn.functional.softmax(heat, dim=1) + + grid_y, grid_x = meshgrid2d(B, Y, X) + + grid_y = grid_y.reshape(B, -1) + grid_x = grid_x.reshape(B, -1) + + loc_y = torch.sum(grid_y*prob, dim=1) + loc_x = torch.sum(grid_x*prob, dim=1) + # these are B + + return loc_y, loc_x + +def reduce_masked_mean(x, mask, dim=None, keepdim=False): + # x and mask are the same shape, or at least broadcastably so < actually it's safer if you disallow broadcasting + # returns shape-1 + # axis can be a list of axes + for (a,b) in zip(x.size(), mask.size()): + # if not b==1: + assert(a==b) # some shape mismatch! + # assert(x.size() == mask.size()) + prod = x*mask + if dim is None: + numer = torch.sum(prod) + denom = EPS+torch.sum(mask) + else: + numer = torch.sum(prod, dim=dim, keepdim=keepdim) + denom = EPS+torch.sum(mask, dim=dim, keepdim=keepdim) + + mean = numer/denom + return mean + +def reduce_masked_median(x, mask, keep_batch=False): + # x and mask are the same shape + assert(x.size() == mask.size()) + device = x.device + + B = list(x.shape)[0] + x = x.detach().cpu().numpy() + mask = mask.detach().cpu().numpy() + + if keep_batch: + x = np.reshape(x, [B, -1]) + mask = np.reshape(mask, [B, -1]) + meds = np.zeros([B], np.float32) + for b in list(range(B)): + xb = x[b] + mb = mask[b] + if np.sum(mb) > 0: + xb = xb[mb > 0] + meds[b] = np.median(xb) + else: + meds[b] = np.nan + meds = torch.from_numpy(meds).to(device) + return meds.float() + else: + x = np.reshape(x, [-1]) + mask = np.reshape(mask, [-1]) + if np.sum(mask) > 0: + x = x[mask > 0] + med = np.median(x) + else: + med = np.nan + med = np.array([med], np.float32) + med = torch.from_numpy(med).to(device) + return med.float() + +def pack_seqdim(tensor, B): + shapelist = list(tensor.shape) + B_, S = shapelist[:2] + assert(B==B_) + otherdims = shapelist[2:] + tensor = torch.reshape(tensor, [B*S]+otherdims) + return tensor + +def unpack_seqdim(tensor, B): + shapelist = list(tensor.shape) + BS = shapelist[0] + assert(BS%B==0) + otherdims = shapelist[1:] + S = int(BS/B) + tensor = torch.reshape(tensor, [B,S]+otherdims) + return tensor + +def meshgrid2d(B, Y, X, stack=False, norm=False, device='cuda', on_chans=False): + # returns a meshgrid sized B x Y x X + + grid_y = torch.linspace(0.0, Y-1, Y, device=torch.device(device)) + grid_y = torch.reshape(grid_y, [1, Y, 1]) + grid_y = grid_y.repeat(B, 1, X) + + grid_x = torch.linspace(0.0, X-1, X, device=torch.device(device)) + grid_x = torch.reshape(grid_x, [1, 1, X]) + grid_x = grid_x.repeat(B, Y, 1) + + if norm: + grid_y, grid_x = normalize_grid2d( + grid_y, grid_x, Y, X) + + if stack: + # note we stack in xy order + # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample) + if on_chans: + grid = torch.stack([grid_x, grid_y], dim=1) + else: + grid = torch.stack([grid_x, grid_y], dim=-1) + return grid + else: + return grid_y, grid_x + +def meshgrid3d(B, Z, Y, X, stack=False, norm=False, device='cuda'): + # returns a meshgrid sized B x Z x Y x X + + grid_z = torch.linspace(0.0, Z-1, Z, device=device) + grid_z = torch.reshape(grid_z, [1, Z, 1, 1]) + grid_z = grid_z.repeat(B, 1, Y, X) + + grid_y = torch.linspace(0.0, Y-1, Y, device=device) + grid_y = torch.reshape(grid_y, [1, 1, Y, 1]) + grid_y = grid_y.repeat(B, Z, 1, X) + + grid_x = torch.linspace(0.0, X-1, X, device=device) + grid_x = torch.reshape(grid_x, [1, 1, 1, X]) + grid_x = grid_x.repeat(B, Z, Y, 1) + + # if cuda: + # grid_z = grid_z.cuda() + # grid_y = grid_y.cuda() + # grid_x = grid_x.cuda() + + if norm: + grid_z, grid_y, grid_x = normalize_grid3d( + grid_z, grid_y, grid_x, Z, Y, X) + + if stack: + # note we stack in xyz order + # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample) + grid = torch.stack([grid_x, grid_y, grid_z], dim=-1) + return grid + else: + return grid_z, grid_y, grid_x + +def normalize_grid2d(grid_y, grid_x, Y, X, clamp_extreme=True): + # make things in [-1,1] + grid_y = 2.0*(grid_y / float(Y-1)) - 1.0 + grid_x = 2.0*(grid_x / float(X-1)) - 1.0 + + if clamp_extreme: + grid_y = torch.clamp(grid_y, min=-2.0, max=2.0) + grid_x = torch.clamp(grid_x, min=-2.0, max=2.0) + + return grid_y, grid_x + +def normalize_grid3d(grid_z, grid_y, grid_x, Z, Y, X, clamp_extreme=True): + # make things in [-1,1] + grid_z = 2.0*(grid_z / float(Z-1)) - 1.0 + grid_y = 2.0*(grid_y / float(Y-1)) - 1.0 + grid_x = 2.0*(grid_x / float(X-1)) - 1.0 + + if clamp_extreme: + grid_z = torch.clamp(grid_z, min=-2.0, max=2.0) + grid_y = torch.clamp(grid_y, min=-2.0, max=2.0) + grid_x = torch.clamp(grid_x, min=-2.0, max=2.0) + + return grid_z, grid_y, grid_x + +def gridcloud2d(B, Y, X, norm=False, device='cuda'): + # we want to sample for each location in the grid + grid_y, grid_x = meshgrid2d(B, Y, X, norm=norm, device=device) + x = torch.reshape(grid_x, [B, -1]) + y = torch.reshape(grid_y, [B, -1]) + # these are B x N + xy = torch.stack([x, y], dim=2) + # this is B x N x 2 + return xy + +def gridcloud3d(B, Z, Y, X, norm=False, device='cuda'): + # we want to sample for each location in the grid + grid_z, grid_y, grid_x = meshgrid3d(B, Z, Y, X, norm=norm, device=device) + x = torch.reshape(grid_x, [B, -1]) + y = torch.reshape(grid_y, [B, -1]) + z = torch.reshape(grid_z, [B, -1]) + # these are B x N + xyz = torch.stack([x, y, z], dim=2) + # this is B x N x 3 + return xyz + +import re +def readPFM(file): + file = open(file, 'rb') + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header == b'PF': + color = True + elif header == b'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline()) + if dim_match: + width, height = map(int, dim_match.groups()) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data + +def normalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin / float(H) + ymax = ymax / float(H) + xmin = xmin / float(W) + xmax = xmax / float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin * float(H) + ymax = ymax * float(H) + xmin = xmin * float(W) + xmax = xmax * float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_box2d(box2d, H, W): + return unnormalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def normalize_box2d(box2d, H, W): + return normalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def get_gaussian_kernel_2d(channels, kernel_size=3, sigma=2.0, mid_one=False): + C = channels + xy_grid = gridcloud2d(C, kernel_size, kernel_size) # C x N x 2 + + mean = (kernel_size - 1)/2.0 + variance = sigma**2.0 + + gaussian_kernel = (1.0/(2.0*np.pi*variance)**1.5) * torch.exp(-torch.sum((xy_grid - mean)**2.0, dim=-1) / (2.0*variance)) # C X N + gaussian_kernel = gaussian_kernel.view(C, 1, kernel_size, kernel_size) # C x 1 x 3 x 3 + kernel_sum = torch.sum(gaussian_kernel, dim=(2,3), keepdim=True) + + gaussian_kernel = gaussian_kernel / kernel_sum # normalize + + if mid_one: + # normalize so that the middle element is 1 + maxval = gaussian_kernel[:,:,(kernel_size//2),(kernel_size//2)].reshape(C, 1, 1, 1) + gaussian_kernel = gaussian_kernel / maxval + + return gaussian_kernel + +def gaussian_blur_2d(input, kernel_size=3, sigma=2.0, reflect_pad=False, mid_one=False): + B, C, Z, X = input.shape + kernel = get_gaussian_kernel_2d(C, kernel_size, sigma, mid_one=mid_one) + if reflect_pad: + pad = (kernel_size - 1)//2 + out = F.pad(input, (pad, pad, pad, pad), mode='reflect') + out = F.conv2d(out, kernel, padding=0, groups=C) + else: + out = F.conv2d(input, kernel, padding=(kernel_size - 1)//2, groups=C) + return out + +def gradient2d(x, absolute=False, square=False, return_sum=False): + # x should be B x C x H x W + dh = x[:, :, 1:, :] - x[:, :, :-1, :] + dw = x[:, :, :, 1:] - x[:, :, :, :-1] + + zeros = torch.zeros_like(x) + zero_h = zeros[:, :, 0:1, :] + zero_w = zeros[:, :, :, 0:1] + dh = torch.cat([dh, zero_h], axis=2) + dw = torch.cat([dw, zero_w], axis=3) + if absolute: + dh = torch.abs(dh) + dw = torch.abs(dw) + if square: + dh = dh ** 2 + dw = dw ** 2 + if return_sum: + return dh+dw + else: + return dh, dw diff --git a/dynamic_predictor/dust3r/datasets/utils/bremm.png b/dynamic_predictor/dust3r/datasets/utils/bremm.png new file mode 100644 index 0000000000000000000000000000000000000000..d3e5d9e8451234ead45bdb0d875c6f56aefcbc41 Binary files /dev/null and b/dynamic_predictor/dust3r/datasets/utils/bremm.png differ diff --git a/dynamic_predictor/dust3r/datasets/utils/cropping.py b/dynamic_predictor/dust3r/datasets/utils/cropping.py new file mode 100644 index 0000000000000000000000000000000000000000..bc9eb89efd194c3c18f3792f51b7834daf2d73e4 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/cropping.py @@ -0,0 +1,183 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# croppping utilities +# -------------------------------------------------------- +import PIL.Image +import os +os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1" +import cv2 # noqa +import numpy as np # noqa +from dust3r.utils.geometry import colmap_to_opencv_intrinsics, opencv_to_colmap_intrinsics # noqa +try: + lanczos = PIL.Image.Resampling.LANCZOS + bicubic = PIL.Image.Resampling.BICUBIC +except AttributeError: + lanczos = PIL.Image.LANCZOS + bicubic = PIL.Image.BICUBIC + + +class ImageList: + """ Convenience class to aply the same operation to a whole set of images. + """ + + def __init__(self, images): + if not isinstance(images, (tuple, list, set)): + images = [images] + self.images = [] + for image in images: + if not isinstance(image, PIL.Image.Image): + image = PIL.Image.fromarray(image) + self.images.append(image) + + def __len__(self): + return len(self.images) + + def to_pil(self): + return tuple(self.images) if len(self.images) > 1 else self.images[0] + + @property + def size(self): + sizes = [im.size for im in self.images] + assert all(sizes[0] == s for s in sizes) + return sizes[0] + + def resize(self, *args, **kwargs): + return ImageList(self._dispatch('resize', *args, **kwargs)) + + def crop(self, *args, **kwargs): + return ImageList(self._dispatch('crop', *args, **kwargs)) + + def _dispatch(self, func, *args, **kwargs): + return [getattr(im, func)(*args, **kwargs) for im in self.images] + + +def rescale_image_depthmap(image, depthmap, camera_intrinsics, output_resolution, force=True): + """ Jointly rescale a (image, depthmap) + so that (out_width, out_height) >= output_res + """ + image = ImageList(image) + input_resolution = np.array(image.size) # (W,H) + output_resolution = np.array(output_resolution) + if depthmap is not None: + # can also use this with masks instead of depthmaps + assert tuple(depthmap.shape[:2]) == image.size[::-1] + + # define output resolution + assert output_resolution.shape == (2,) + scale_final = max(output_resolution / image.size) + 1e-8 + if scale_final >= 1 and not force: # image is already smaller than what is asked + return (image.to_pil(), depthmap, camera_intrinsics) + output_resolution = np.floor(input_resolution * scale_final).astype(int) + + # first rescale the image so that it contains the crop + image = image.resize(tuple(output_resolution), resample=lanczos if scale_final < 1 else bicubic) + if depthmap is not None: + depthmap = cv2.resize(depthmap, output_resolution, fx=scale_final, + fy=scale_final, interpolation=cv2.INTER_NEAREST) + + # no offset here; simple rescaling + camera_intrinsics = camera_matrix_of_crop( + camera_intrinsics, input_resolution, output_resolution, scaling=scale_final) + + return image.to_pil(), depthmap, camera_intrinsics + +def center_crop_image_depthmap(image, depthmap, camera_intrinsics, crop_scale): + """ + Jointly center-crop an image and its depthmap, and adjust the camera intrinsics accordingly. + + Parameters: + - image: PIL.Image or similar, the input image. + - depthmap: np.ndarray, the corresponding depth map. + - camera_intrinsics: np.ndarray, the 3x3 camera intrinsics matrix. + - crop_scale: float between 0 and 1, the fraction of the image to keep. + + Returns: + - cropped_image: PIL.Image, the center-cropped image. + - cropped_depthmap: np.ndarray, the center-cropped depth map. + - adjusted_intrinsics: np.ndarray, the adjusted camera intrinsics matrix. + """ + # Ensure crop_scale is valid + assert 0 < crop_scale <= 1, "crop_scale must be between 0 and 1" + + # Convert image to ImageList for consistent processing + image = ImageList(image) + input_resolution = np.array(image.size) # (width, height) + if depthmap is not None: + # Ensure depthmap matches the image size + assert depthmap.shape[:2] == tuple(image.size[::-1]), "Depthmap size must match image size" + + # Compute output resolution after cropping + output_resolution = np.floor(input_resolution * crop_scale).astype(int) + # get the correct crop_scale + crop_scale = output_resolution / input_resolution + + # Compute margins (amount to crop from each side) + margins = input_resolution - output_resolution + offset = margins / 2 # Since we are center cropping + + # Calculate the crop bounding box + l, t = offset.astype(int) + r = l + output_resolution[0] + b = t + output_resolution[1] + crop_bbox = (l, t, r, b) + + # Crop the image and depthmap + image = image.crop(crop_bbox) + if depthmap is not None: + depthmap = depthmap[t:b, l:r] + + # Adjust the camera intrinsics + adjusted_intrinsics = camera_intrinsics.copy() + + # Adjust focal lengths (fx, fy) # no need to adjust focal lengths for cropping + # adjusted_intrinsics[0, 0] /= crop_scale[0] # fx + # adjusted_intrinsics[1, 1] /= crop_scale[1] # fy + + # Adjust principal point (cx, cy) + adjusted_intrinsics[0, 2] -= l # cx + adjusted_intrinsics[1, 2] -= t # cy + + return image.to_pil(), depthmap, adjusted_intrinsics + + + +def camera_matrix_of_crop(input_camera_matrix, input_resolution, output_resolution, scaling=1, offset_factor=0.5, offset=None): + # Margins to offset the origin + margins = np.asarray(input_resolution) * scaling - output_resolution + assert np.all(margins >= 0.0) + if offset is None: + offset = offset_factor * margins + + # Generate new camera parameters + output_camera_matrix_colmap = opencv_to_colmap_intrinsics(input_camera_matrix) + output_camera_matrix_colmap[:2, :] *= scaling + output_camera_matrix_colmap[:2, 2] -= offset + output_camera_matrix = colmap_to_opencv_intrinsics(output_camera_matrix_colmap) + + return output_camera_matrix + + +def crop_image_depthmap(image, depthmap, camera_intrinsics, crop_bbox): + """ + Return a crop of the input view. + """ + image = ImageList(image) + l, t, r, b = crop_bbox + + image = image.crop((l, t, r, b)) + depthmap = depthmap[t:b, l:r] + + camera_intrinsics = camera_intrinsics.copy() + camera_intrinsics[0, 2] -= l + camera_intrinsics[1, 2] -= t + + return image.to_pil(), depthmap, camera_intrinsics + + +def bbox_from_intrinsics_in_out(input_camera_matrix, output_camera_matrix, output_resolution): + out_width, out_height = output_resolution + l, t = np.int32(np.round(input_camera_matrix[:2, 2] - output_camera_matrix[:2, 2])) + crop_bbox = (l, t, l + out_width, t + out_height) + return crop_bbox diff --git a/dynamic_predictor/dust3r/datasets/utils/geom.py b/dynamic_predictor/dust3r/datasets/utils/geom.py new file mode 100644 index 0000000000000000000000000000000000000000..a3d7e2f8a384c5a5a8c225eb245f94832bcaa56b --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/geom.py @@ -0,0 +1,572 @@ +import torch +import numpy as np +import torchvision.ops as ops + +def matmul2(mat1, mat2): + return torch.matmul(mat1, mat2) + +def matmul3(mat1, mat2, mat3): + return torch.matmul(mat1, torch.matmul(mat2, mat3)) + +def eye_3x3(B, device='cuda'): + rt = torch.eye(3, device=torch.device(device)).view(1,3,3).repeat([B, 1, 1]) + return rt + +def eye_4x4(B, device='cuda'): + rt = torch.eye(4, device=torch.device(device)).view(1,4,4).repeat([B, 1, 1]) + return rt + +def safe_inverse(a): + inv = a.clone() + r_transpose = a[:3, :3].transpose(0, 1) # inverse of rotation matrix + + inv[:3, :3] = r_transpose + inv[:3, 3:4] = -torch.matmul(r_transpose, a[:3, 3:4]) + + return inv + +def safe_inverse_batch(a): #parallel version + B, _, _ = list(a.shape) + inv = a.clone() + r_transpose = a[:, :3, :3].transpose(1,2) #inverse of rotation matrix + + inv[:, :3, :3] = r_transpose + inv[:, :3, 3:4] = -torch.matmul(r_transpose, a[:, :3, 3:4]) + + return inv + + +def safe_inverse_single(a): + r, t = split_rt_single(a) + t = t.view(3,1) + r_transpose = r.t() + inv = torch.cat([r_transpose, -torch.matmul(r_transpose, t)], 1) + bottom_row = a[3:4, :] # this is [0, 0, 0, 1] + # bottom_row = torch.tensor([0.,0.,0.,1.]).view(1,4) + inv = torch.cat([inv, bottom_row], 0) + return inv + +def split_intrinsics(K): + # K is B x 3 x 3 or B x 4 x 4 + fx = K[:,0,0] + fy = K[:,1,1] + x0 = K[:,0,2] + y0 = K[:,1,2] + return fx, fy, x0, y0 + +def apply_pix_T_cam(pix_T_cam, xyz): + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + + # xyz is shaped B x H*W x 3 + # returns xy, shaped B x H*W x 2 + + B, N, C = list(xyz.shape) + assert(C==3) + + x, y, z = torch.unbind(xyz, axis=-1) + + fx = torch.reshape(fx, [B, 1]) + fy = torch.reshape(fy, [B, 1]) + x0 = torch.reshape(x0, [B, 1]) + y0 = torch.reshape(y0, [B, 1]) + + EPS = 1e-4 + z = torch.clamp(z, min=EPS) + x = (x*fx)/(z)+x0 + y = (y*fy)/(z)+y0 + xy = torch.stack([x, y], axis=-1) + return xy + +def apply_pix_T_cam_py(pix_T_cam, xyz): + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + + # xyz is shaped B x H*W x 3 + # returns xy, shaped B x H*W x 2 + + B, N, C = list(xyz.shape) + assert(C==3) + + x, y, z = xyz[:,:,0], xyz[:,:,1], xyz[:,:,2] + + fx = np.reshape(fx, [B, 1]) + fy = np.reshape(fy, [B, 1]) + x0 = np.reshape(x0, [B, 1]) + y0 = np.reshape(y0, [B, 1]) + + EPS = 1e-4 + z = np.clip(z, EPS, None) + x = (x*fx)/(z)+x0 + y = (y*fy)/(z)+y0 + xy = np.stack([x, y], axis=-1) + return xy + +def get_camM_T_camXs(origin_T_camXs, ind=0): + B, S = list(origin_T_camXs.shape)[0:2] + camM_T_camXs = torch.zeros_like(origin_T_camXs) + for b in list(range(B)): + camM_T_origin = safe_inverse_single(origin_T_camXs[b,ind]) + for s in list(range(S)): + camM_T_camXs[b,s] = torch.matmul(camM_T_origin, origin_T_camXs[b,s]) + return camM_T_camXs + + +def realative_T_py(cam_T1, cam_T2): + cam_T1 = torch.tensor(cam_T1, dtype=torch.float32) + cam_T2 = torch.tensor(cam_T2, dtype=torch.float32) + inv_cam_T1 = safe_inverse(cam_T1) + relative_transform = torch.matmul(inv_cam_T1, cam_T2) + return relative_transform.numpy() + +def apply_4x4(RT, xyz): + B, N, _ = list(xyz.shape) + ones = torch.ones_like(xyz[:,:,0:1]) + xyz1 = torch.cat([xyz, ones], 2) + xyz1_t = torch.transpose(xyz1, 1, 2) + # this is B x 4 x N + xyz2_t = torch.matmul(RT, xyz1_t) + xyz2 = torch.transpose(xyz2_t, 1, 2) + xyz2 = xyz2[:,:,:3] + return xyz2 + +def apply_4x4_py(RT, xyz): + ones = np.ones_like(xyz[:, 0:1]) + xyz1 = np.concatenate([xyz, ones], 1) + xyz1_t = xyz1.transpose(1, 0) + xyz2_t = np.matmul(RT, xyz1_t) + xyz2 = xyz2_t.transpose(1, 0) + xyz2 = xyz2[:, :3] + return xyz2 + +def apply_4x4_py_batch(RT, xyz): + # print('RT', RT.shape) + B, N, _ = list(xyz.shape) + ones = np.ones_like(xyz[:,:,0:1]) + xyz1 = np.concatenate([xyz, ones], 2) + # print('xyz1', xyz1.shape) + xyz1_t = xyz1.transpose(0,2,1) + # print('xyz1_t', xyz1_t.shape) + # this is B x 4 x N + xyz2_t = np.matmul(RT, xyz1_t) + # print('xyz2_t', xyz2_t.shape) + xyz2 = xyz2_t.transpose(0,2,1) + # print('xyz2', xyz2.shape) + xyz2 = xyz2[:,:,:3] + return xyz2 + +def apply_3x3(RT, xy): + B, N, _ = list(xy.shape) + ones = torch.ones_like(xy[:,:,0:1]) + xy1 = torch.cat([xy, ones], 2) + xy1_t = torch.transpose(xy1, 1, 2) + # this is B x 4 x N + xy2_t = torch.matmul(RT, xy1_t) + xy2 = torch.transpose(xy2_t, 1, 2) + xy2 = xy2[:,:,:2] + return xy2 + +def generate_polygon(ctr_x, ctr_y, avg_r, irregularity, spikiness, num_verts): + ''' + Start with the center of the polygon at ctr_x, ctr_y, + Then creates the polygon by sampling points on a circle around the center. + Random noise is added by varying the angular spacing between sequential points, + and by varying the radial distance of each point from the centre. + + Params: + ctr_x, ctr_y - coordinates of the "centre" of the polygon + avg_r - in px, the average radius of this polygon, this roughly controls how large the polygon is, really only useful for order of magnitude. + irregularity - [0,1] indicating how much variance there is in the angular spacing of vertices. [0,1] will map to [0, 2pi/numberOfVerts] + spikiness - [0,1] indicating how much variance there is in each vertex from the circle of radius avg_r. [0,1] will map to [0, avg_r] +pp num_verts + + Returns: + np.array [num_verts, 2] - CCW order. + ''' + # spikiness + spikiness = np.clip(spikiness, 0, 1) * avg_r + + # generate n angle steps + irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / num_verts + lower = (2*np.pi / num_verts) - irregularity + upper = (2*np.pi / num_verts) + irregularity + + # angle steps + angle_steps = np.random.uniform(lower, upper, num_verts) + sc = (2 * np.pi) / angle_steps.sum() + angle_steps *= sc + + # get all radii + angle = np.random.uniform(0, 2*np.pi) + radii = np.clip(np.random.normal(avg_r, spikiness, num_verts), 0, 2 * avg_r) + + # compute all points + points = [] + for i in range(num_verts): + x = ctr_x + radii[i] * np.cos(angle) + y = ctr_y + radii[i] * np.sin(angle) + points.append([x, y]) + angle += angle_steps[i] + + return np.array(points).astype(int) + + +def get_random_affine_2d(B, rot_min=-5.0, rot_max=5.0, tx_min=-0.1, tx_max=0.1, ty_min=-0.1, ty_max=0.1, sx_min=-0.05, sx_max=0.05, sy_min=-0.05, sy_max=0.05, shx_min=-0.05, shx_max=0.05, shy_min=-0.05, shy_max=0.05): + ''' + Params: + rot_min: rotation amount min + rot_max: rotation amount max + + tx_min: translation x min + tx_max: translation x max + + ty_min: translation y min + ty_max: translation y max + + sx_min: scaling x min + sx_max: scaling x max + + sy_min: scaling y min + sy_max: scaling y max + + shx_min: shear x min + shx_max: shear x max + + shy_min: shear y min + shy_max: shear y max + + Returns: + transformation matrix: (B, 3, 3) + ''' + # rotation + if rot_max - rot_min != 0: + rot_amount = np.random.uniform(low=rot_min, high=rot_max, size=B) + rot_amount = np.pi/180.0*rot_amount + else: + rot_amount = rot_min + rotation = np.zeros((B, 3, 3)) # B, 3, 3 + rotation[:, 2, 2] = 1 + rotation[:, 0, 0] = np.cos(rot_amount) + rotation[:, 0, 1] = -np.sin(rot_amount) + rotation[:, 1, 0] = np.sin(rot_amount) + rotation[:, 1, 1] = np.cos(rot_amount) + + # translation + translation = np.zeros((B, 3, 3)) # B, 3, 3 + translation[:, [0,1,2], [0,1,2]] = 1 + if (tx_max - tx_min) > 0: + trans_x = np.random.uniform(low=tx_min, high=tx_max, size=B) + translation[:, 0, 2] = trans_x + # else: + # translation[:, 0, 2] = tx_max + if ty_max - ty_min != 0: + trans_y = np.random.uniform(low=ty_min, high=ty_max, size=B) + translation[:, 1, 2] = trans_y + # else: + # translation[:, 1, 2] = ty_max + + # scaling + scaling = np.zeros((B, 3, 3)) # B, 3, 3 + scaling[:, [0,1,2], [0,1,2]] = 1 + if (sx_max - sx_min) > 0: + scale_x = 1 + np.random.uniform(low=sx_min, high=sx_max, size=B) + scaling[:, 0, 0] = scale_x + # else: + # scaling[:, 0, 0] = sx_max + if (sy_max - sy_min) > 0: + scale_y = 1 + np.random.uniform(low=sy_min, high=sy_max, size=B) + scaling[:, 1, 1] = scale_y + # else: + # scaling[:, 1, 1] = sy_max + + # shear + shear = np.zeros((B, 3, 3)) # B, 3, 3 + shear[:, [0,1,2], [0,1,2]] = 1 + if (shx_max - shx_min) > 0: + shear_x = np.random.uniform(low=shx_min, high=shx_max, size=B) + shear[:, 0, 1] = shear_x + # else: + # shear[:, 0, 1] = shx_max + if (shy_max - shy_min) > 0: + shear_y = np.random.uniform(low=shy_min, high=shy_max, size=B) + shear[:, 1, 0] = shear_y + # else: + # shear[:, 1, 0] = shy_max + + # compose all those + rt = np.einsum("ijk,ikl->ijl", rotation, translation) + ss = np.einsum("ijk,ikl->ijl", scaling, shear) + trans = np.einsum("ijk,ikl->ijl", rt, ss) + + return trans + +def get_centroid_from_box2d(box2d): + ymin = box2d[:,0] + xmin = box2d[:,1] + ymax = box2d[:,2] + xmax = box2d[:,3] + x = (xmin+xmax)/2.0 + y = (ymin+ymax)/2.0 + return y, x + +def normalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin / float(H) + ymax = ymax / float(H) + xmin = xmin / float(W) + xmax = xmax / float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin * float(H) + ymax = ymax * float(H) + xmin = xmin * float(W) + xmax = xmax * float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_box2d(box2d, H, W): + return unnormalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def normalize_box2d(box2d, H, W): + return normalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def get_size_from_box2d(box2d): + ymin = box2d[:,0] + xmin = box2d[:,1] + ymax = box2d[:,2] + xmax = box2d[:,3] + height = ymax-ymin + width = xmax-xmin + return height, width + +def crop_and_resize(im, boxlist, PH, PW, boxlist_is_normalized=False): + B, C, H, W = im.shape + B2, N, D = boxlist.shape + assert(B==B2) + assert(D==4) + # PH, PW is the size to resize to + + # output is B,N,C,PH,PW + + # pt wants xy xy, unnormalized + if boxlist_is_normalized: + boxlist_unnorm = unnormalize_boxlist2d(boxlist, H, W) + else: + boxlist_unnorm = boxlist + + ymin, xmin, ymax, xmax = boxlist_unnorm.unbind(2) + # boxlist_pt = torch.stack([boxlist_unnorm[:,1], boxlist_unnorm[:,0], boxlist_unnorm[:,3], boxlist_unnorm[:,2]], dim=1) + boxlist_pt = torch.stack([xmin, ymin, xmax, ymax], dim=2) + # we want a B-len list of K x 4 arrays + + # print('im', im.shape) + # print('boxlist', boxlist.shape) + # print('boxlist_pt', boxlist_pt.shape) + + # boxlist_pt = list(boxlist_pt.unbind(0)) + + crops = [] + for b in range(B): + crops_b = ops.roi_align(im[b:b+1], [boxlist_pt[b]], output_size=(PH, PW)) + crops.append(crops_b) + # # crops = im + + # print('crops', crops.shape) + # crops = crops.reshape(B,N,C,PH,PW) + + + # crops = [] + # for b in range(B): + # crop_b = ops.roi_align(im[b:b+1], [boxlist_pt[b]], output_size=(PH, PW)) + # print('crop_b', crop_b.shape) + # crops.append(crop_b) + crops = torch.stack(crops, dim=0) + + # print('crops', crops.shape) + # boxlist_list = boxlist_pt.unbind(0) + # print('rgb_crop', rgb_crop.shape) + + return crops + + +# def get_boxlist_from_centroid_and_size(cy, cx, h, w, clip=True): +# # cy,cx are both B,N +# ymin = cy - h/2 +# ymax = cy + h/2 +# xmin = cx - w/2 +# xmax = cx + w/2 + +# box = torch.stack([ymin, xmin, ymax, xmax], dim=-1) +# if clip: +# box = torch.clamp(box, 0, 1) +# return box + + +def get_boxlist_from_centroid_and_size(cy, cx, h, w):#, clip=False): + # cy,cx are the same shape + ymin = cy - h/2 + ymax = cy + h/2 + xmin = cx - w/2 + xmax = cx + w/2 + + # if clip: + # ymin = torch.clamp(ymin, 0, H-1) + # ymax = torch.clamp(ymax, 0, H-1) + # xmin = torch.clamp(xmin, 0, W-1) + # xmax = torch.clamp(xmax, 0, W-1) + + box = torch.stack([ymin, xmin, ymax, xmax], dim=-1) + return box + + +def get_box2d_from_mask(mask, normalize=False): + # mask is B, 1, H, W + + B, C, H, W = mask.shape + assert(C==1) + xy = utils.basic.gridcloud2d(B, H, W, norm=False, device=mask.device) # B, H*W, 2 + + box = torch.zeros((B, 4), dtype=torch.float32, device=mask.device) + for b in range(B): + xy_b = xy[b] # H*W, 2 + mask_b = mask[b].reshape(H*W) + xy_ = xy_b[mask_b > 0] + x_ = xy_[:,0] + y_ = xy_[:,1] + ymin = torch.min(y_) + ymax = torch.max(y_) + xmin = torch.min(x_) + xmax = torch.max(x_) + box[b] = torch.stack([ymin, xmin, ymax, xmax], dim=0) + if normalize: + box = normalize_boxlist2d(box.unsqueeze(1), H, W).squeeze(1) + return box + +def convert_box2d_to_intrinsics(box2d, pix_T_cam, H, W, use_image_aspect_ratio=True, mult_padding=1.0): + # box2d is B x 4, with ymin, xmin, ymax, xmax in normalized coords + # ymin, xmin, ymax, xmax = torch.unbind(box2d, dim=1) + # H, W is the original size of the image + # mult_padding is relative to object size in pixels + + # i assume we're rendering an image the same size as the original (H, W) + + if not mult_padding==1.0: + y, x = get_centroid_from_box2d(box2d) + h, w = get_size_from_box2d(box2d) + box2d = get_box2d_from_centroid_and_size( + y, x, h*mult_padding, w*mult_padding, clip=False) + + if use_image_aspect_ratio: + h, w = get_size_from_box2d(box2d) + y, x = get_centroid_from_box2d(box2d) + + # note h,w are relative right now + # we need to undo this, to see the real ratio + + h = h*float(H) + w = w*float(W) + box_ratio = h/w + im_ratio = H/float(W) + + # print('box_ratio:', box_ratio) + # print('im_ratio:', im_ratio) + + if box_ratio >= im_ratio: + w = h/im_ratio + # print('setting w:', h/im_ratio) + else: + h = w*im_ratio + # print('setting h:', w*im_ratio) + + box2d = get_box2d_from_centroid_and_size( + y, x, h/float(H), w/float(W), clip=False) + + assert(h > 1e-4) + assert(w > 1e-4) + + ymin, xmin, ymax, xmax = torch.unbind(box2d, dim=1) + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + + # the topleft of the new image will now have a different offset from the center of projection + + new_x0 = x0 - xmin*W + new_y0 = y0 - ymin*H + + pix_T_cam = pack_intrinsics(fx, fy, new_x0, new_y0) + # this alone will give me an image in original resolution, + # with its topleft at the box corner + + box_h, box_w = get_size_from_box2d(box2d) + # these are normalized, and shaped B. (e.g., [0.4], [0.3]) + + # we are going to scale the image by the inverse of this, + # since we are zooming into this area + + sy = 1./box_h + sx = 1./box_w + + pix_T_cam = scale_intrinsics(pix_T_cam, sx, sy) + return pix_T_cam, box2d + +def pixels2camera(x,y,z,fx,fy,x0,y0): + # x and y are locations in pixel coordinates, z is a depth in meters + # they can be images or pointclouds + # fx, fy, x0, y0 are camera intrinsics + # returns xyz, sized B x N x 3 + + B = x.shape[0] + + fx = torch.reshape(fx, [B,1]) + fy = torch.reshape(fy, [B,1]) + x0 = torch.reshape(x0, [B,1]) + y0 = torch.reshape(y0, [B,1]) + + x = torch.reshape(x, [B,-1]) + y = torch.reshape(y, [B,-1]) + z = torch.reshape(z, [B,-1]) + + # unproject + x = (z/fx)*(x-x0) + y = (z/fy)*(y-y0) + + xyz = torch.stack([x,y,z], dim=2) + # B x N x 3 + return xyz + +def camera2pixels(xyz, pix_T_cam): + # xyz is shaped B x H*W x 3 + # returns xy, shaped B x H*W x 2 + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + x, y, z = torch.unbind(xyz, dim=-1) + B = list(z.shape)[0] + + fx = torch.reshape(fx, [B,1]) + fy = torch.reshape(fy, [B,1]) + x0 = torch.reshape(x0, [B,1]) + y0 = torch.reshape(y0, [B,1]) + x = torch.reshape(x, [B,-1]) + y = torch.reshape(y, [B,-1]) + z = torch.reshape(z, [B,-1]) + + EPS = 1e-4 + z = torch.clamp(z, min=EPS) + x = (x*fx)/z + x0 + y = (y*fy)/z + y0 + xy = torch.stack([x, y], dim=-1) + return xy + +def depth2pointcloud(z, pix_T_cam): + B, C, H, W = list(z.shape) + device = z.device + y, x = utils.basic.meshgrid2d(B, H, W, device=device) + z = torch.reshape(z, [B, H, W]) + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + xyz = pixels2camera(x, y, z, fx, fy, x0, y0) + return xyz diff --git a/dynamic_predictor/dust3r/datasets/utils/improc.py b/dynamic_predictor/dust3r/datasets/utils/improc.py new file mode 100644 index 0000000000000000000000000000000000000000..0460b85cb65070dd6a33226dae50718fd0aada38 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/improc.py @@ -0,0 +1,1528 @@ +import torch +import numpy as np +import utils.basic +from sklearn.decomposition import PCA +from matplotlib import cm +import matplotlib.pyplot as plt +import cv2 +import torch.nn.functional as F +import torchvision +EPS = 1e-6 + +from skimage.color import ( + rgb2lab, rgb2yuv, rgb2ycbcr, lab2rgb, yuv2rgb, ycbcr2rgb, + rgb2hsv, hsv2rgb, rgb2xyz, xyz2rgb, rgb2hed, hed2rgb) + +def _convert(input_, type_): + return { + 'float': input_.float(), + 'double': input_.double(), + }.get(type_, input_) + +def _generic_transform_sk_3d(transform, in_type='', out_type=''): + def apply_transform_individual(input_): + device = input_.device + input_ = input_.cpu() + input_ = _convert(input_, in_type) + + input_ = input_.permute(1, 2, 0).detach().numpy() + transformed = transform(input_) + output = torch.from_numpy(transformed).float().permute(2, 0, 1) + output = _convert(output, out_type) + return output.to(device) + + def apply_transform(input_): + to_stack = [] + for image in input_: + to_stack.append(apply_transform_individual(image)) + return torch.stack(to_stack) + return apply_transform + +hsv_to_rgb = _generic_transform_sk_3d(hsv2rgb) + +def preprocess_color_tf(x): + import tensorflow as tf + return tf.cast(x,tf.float32) * 1./255 - 0.5 + +def preprocess_color(x): + if isinstance(x, np.ndarray): + return x.astype(np.float32) * 1./255 - 0.5 + else: + return x.float() * 1./255 - 0.5 + +def pca_embed(emb, keep, valid=None): + ## emb -- [S,H/2,W/2,C] + ## keep is the number of principal components to keep + ## Helper function for reduce_emb. + emb = emb + EPS + #emb is B x C x H x W + emb = emb.permute(0, 2, 3, 1).cpu().detach().numpy() #this is B x H x W x C + + if valid: + valid = valid.cpu().detach().numpy().reshape((H*W)) + + emb_reduced = list() + + B, H, W, C = np.shape(emb) + for img in emb: + if np.isnan(img).any(): + emb_reduced.append(np.zeros([H, W, keep])) + continue + + pixels_kd = np.reshape(img, (H*W, C)) + + if valid: + pixels_kd_pca = pixels_kd[valid] + else: + pixels_kd_pca = pixels_kd + + P = PCA(keep) + P.fit(pixels_kd_pca) + + if valid: + pixels3d = P.transform(pixels_kd)*valid + else: + pixels3d = P.transform(pixels_kd) + + out_img = np.reshape(pixels3d, [H,W,keep]).astype(np.float32) + if np.isnan(out_img).any(): + emb_reduced.append(np.zeros([H, W, keep])) + continue + + emb_reduced.append(out_img) + + emb_reduced = np.stack(emb_reduced, axis=0).astype(np.float32) + + return torch.from_numpy(emb_reduced).permute(0, 3, 1, 2) + +def pca_embed_together(emb, keep): + ## emb -- [S,H/2,W/2,C] + ## keep is the number of principal components to keep + ## Helper function for reduce_emb. + emb = emb + EPS + #emb is B x C x H x W + emb = emb.permute(0, 2, 3, 1).cpu().detach().numpy() #this is B x H x W x C + + B, H, W, C = np.shape(emb) + if np.isnan(emb).any(): + return torch.zeros(B, keep, H, W) + + pixelskd = np.reshape(emb, (B*H*W, C)) + P = PCA(keep) + P.fit(pixelskd) + pixels3d = P.transform(pixelskd) + out_img = np.reshape(pixels3d, [B,H,W,keep]).astype(np.float32) + + if np.isnan(out_img).any(): + return torch.zeros(B, keep, H, W) + + return torch.from_numpy(out_img).permute(0, 3, 1, 2) + +def reduce_emb(emb, valid=None, inbound=None, together=False): + ## emb -- [S,C,H/2,W/2], inbound -- [S,1,H/2,W/2] + ## Reduce number of chans to 3 with PCA. For vis. + # S,H,W,C = emb.shape.as_list() + S, C, H, W = list(emb.size()) + keep = 3 + + if together: + reduced_emb = pca_embed_together(emb, keep) + else: + reduced_emb = pca_embed(emb, keep, valid) #not im + + reduced_emb = utils.basic.normalize(reduced_emb) - 0.5 + if inbound is not None: + emb_inbound = emb*inbound + else: + emb_inbound = None + + return reduced_emb, emb_inbound + +def get_feat_pca(feat, valid=None): + B, C, D, W = list(feat.size()) + # feat is B x C x D x W. If 3D input, average it through Height dimension before passing into this function. + + pca, _ = reduce_emb(feat, valid=valid,inbound=None, together=True) + # pca is B x 3 x W x D + return pca + +def gif_and_tile(ims, just_gif=False): + S = len(ims) + # each im is B x H x W x C + # i want a gif in the left, and the tiled frames on the right + # for the gif tool, this means making a B x S x H x W tensor + # where the leftmost part is sequential and the rest is tiled + gif = torch.stack(ims, dim=1) + if just_gif: + return gif + til = torch.cat(ims, dim=2) + til = til.unsqueeze(dim=1).repeat(1, S, 1, 1, 1) + im = torch.cat([gif, til], dim=3) + return im + +def back2color(i, blacken_zeros=False): + if blacken_zeros: + const = torch.tensor([-0.5]) + i = torch.where(i==0.0, const.cuda() if i.is_cuda else const, i) + return back2color(i) + else: + return ((i+0.5)*255).type(torch.ByteTensor) + +def convert_occ_to_height(occ, reduce_axis=3): + B, C, D, H, W = list(occ.shape) + assert(C==1) + # note that height increases DOWNWARD in the tensor + # (like pixel/camera coordinates) + + G = list(occ.shape)[reduce_axis] + values = torch.linspace(float(G), 1.0, steps=G, dtype=torch.float32, device=occ.device) + if reduce_axis==2: + # fro view + values = values.view(1, 1, G, 1, 1) + elif reduce_axis==3: + # top view + values = values.view(1, 1, 1, G, 1) + elif reduce_axis==4: + # lateral view + values = values.view(1, 1, 1, 1, G) + else: + assert(False) # you have to reduce one of the spatial dims (2-4) + values = torch.max(occ*values, dim=reduce_axis)[0]/float(G) + # values = values.view([B, C, D, W]) + return values + +def xy2heatmap(xy, sigma, grid_xs, grid_ys, norm=False): + # xy is B x N x 2, containing float x and y coordinates of N things + # grid_xs and grid_ys are B x N x Y x X + + B, N, Y, X = list(grid_xs.shape) + + mu_x = xy[:,:,0].clone() + mu_y = xy[:,:,1].clone() + + x_valid = (mu_x>-0.5) & (mu_x-0.5) & (mu_y 0.5).float() + return prior + +def seq2color(im, norm=True, colormap='coolwarm'): + B, S, H, W = list(im.shape) + # S is sequential + + # prep a mask of the valid pixels, so we can blacken the invalids later + mask = torch.max(im, dim=1, keepdim=True)[0] + + # turn the S dim into an explicit sequence + coeffs = np.linspace(1.0, float(S), S).astype(np.float32)/float(S) + + # # increase the spacing from the center + # coeffs[:int(S/2)] -= 2.0 + # coeffs[int(S/2)+1:] += 2.0 + + coeffs = torch.from_numpy(coeffs).float().cuda() + coeffs = coeffs.reshape(1, S, 1, 1).repeat(B, 1, H, W) + # scale each channel by the right coeff + im = im * coeffs + # now im is in [1/S, 1], except for the invalid parts which are 0 + # keep the highest valid coeff at each pixel + im = torch.max(im, dim=1, keepdim=True)[0] + + out = [] + for b in range(B): + im_ = im[b] + # move channels out to last dim_ + im_ = im_.detach().cpu().numpy() + im_ = np.squeeze(im_) + # im_ is H x W + if colormap=='coolwarm': + im_ = cm.coolwarm(im_)[:, :, :3] + elif colormap=='PiYG': + im_ = cm.PiYG(im_)[:, :, :3] + elif colormap=='winter': + im_ = cm.winter(im_)[:, :, :3] + elif colormap=='spring': + im_ = cm.spring(im_)[:, :, :3] + elif colormap=='onediff': + im_ = np.reshape(im_, (-1)) + im0_ = cm.spring(im_)[:, :3] + im1_ = cm.winter(im_)[:, :3] + im1_[im_==1/float(S)] = im0_[im_==1/float(S)] + im_ = np.reshape(im1_, (H, W, 3)) + else: + assert(False) # invalid colormap + # move channels into dim 0 + im_ = np.transpose(im_, [2, 0, 1]) + im_ = torch.from_numpy(im_).float().cuda() + out.append(im_) + out = torch.stack(out, dim=0) + + # blacken the invalid pixels, instead of using the 0-color + out = out*mask + # out = out*255.0 + + # put it in [-0.5, 0.5] + out = out - 0.5 + + return out + +def colorize(d): + # this is actually just grayscale right now + + if d.ndim==2: + d = d.unsqueeze(dim=0) + else: + assert(d.ndim==3) + + # color_map = cm.get_cmap('plasma') + color_map = cm.get_cmap('inferno') + # S1, D = traj.shape + + # print('d1', d.shape) + C,H,W = d.shape + assert(C==1) + d = d.reshape(-1) + d = d.detach().cpu().numpy() + # print('d2', d.shape) + color = np.array(color_map(d)) * 255 # rgba + # print('color1', color.shape) + color = np.reshape(color[:,:3], [H*W, 3]) + # print('color2', color.shape) + color = torch.from_numpy(color).permute(1,0).reshape(3,H,W) + # # gather + # cm = matplotlib.cm.get_cmap(cmap if cmap is not None else 'gray') + # if cmap=='RdBu' or cmap=='RdYlGn': + # colors = cm(np.arange(256))[:, :3] + # else: + # colors = cm.colors + # colors = np.array(colors).astype(np.float32) + # colors = np.reshape(colors, [-1, 3]) + # colors = tf.constant(colors, dtype=tf.float32) + + # value = tf.gather(colors, indices) + # colorize(value, normalize=True, vmin=None, vmax=None, cmap=None, vals=255) + + # copy to the three chans + # d = d.repeat(3, 1, 1) + return color + + +def oned2inferno(d, norm=True, do_colorize=False): + # convert a 1chan input to a 3chan image output + + # if it's just B x H x W, add a C dim + if d.ndim==3: + d = d.unsqueeze(dim=1) + # d should be B x C x H x W, where C=1 + B, C, H, W = list(d.shape) + assert(C==1) + + if norm: + d = utils.basic.normalize(d) + + if do_colorize: + rgb = torch.zeros(B, 3, H, W) + for b in list(range(B)): + rgb[b] = colorize(d[b]) + else: + rgb = d.repeat(1, 3, 1, 1)*255.0 + # rgb = (255.0*rgb).type(torch.ByteTensor) + rgb = rgb.type(torch.ByteTensor) + + # rgb = tf.cast(255.0*rgb, tf.uint8) + # rgb = tf.reshape(rgb, [-1, hyp.H, hyp.W, 3]) + # rgb = tf.expand_dims(rgb, axis=0) + return rgb + +def oned2gray(d, norm=True): + # convert a 1chan input to a 3chan image output + + # if it's just B x H x W, add a C dim + if d.ndim==3: + d = d.unsqueeze(dim=1) + # d should be B x C x H x W, where C=1 + B, C, H, W = list(d.shape) + assert(C==1) + + if norm: + d = utils.basic.normalize(d) + + rgb = d.repeat(1,3,1,1) + rgb = (255.0*rgb).type(torch.ByteTensor) + return rgb + + +def draw_frame_id_on_vis(vis, frame_id, scale=0.5, left=5, top=20): + + rgb = vis.detach().cpu().numpy()[0] + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) + color = (255, 255, 255) + # print('putting frame id', frame_id) + + frame_str = utils.basic.strnum(frame_id) + + text_color_bg = (0,0,0) + font = cv2.FONT_HERSHEY_SIMPLEX + text_size, _ = cv2.getTextSize(frame_str, font, scale, 1) + text_w, text_h = text_size + cv2.rectangle(rgb, (left, top-text_h), (left + text_w, top+1), text_color_bg, -1) + + cv2.putText( + rgb, + frame_str, + (left, top), # from left, from top + font, + scale, # font scale (float) + color, + 1) # font thickness (int) + rgb = cv2.cvtColor(rgb.astype(np.uint8), cv2.COLOR_BGR2RGB) + vis = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + return vis + +COLORMAP_FILE = "./utils/bremm.png" +class ColorMap2d: + def __init__(self, filename=None): + self._colormap_file = filename or COLORMAP_FILE + self._img = plt.imread(self._colormap_file) + + self._height = self._img.shape[0] + self._width = self._img.shape[1] + + def __call__(self, X): + assert len(X.shape) == 2 + output = np.zeros((X.shape[0], 3)) + for i in range(X.shape[0]): + x, y = X[i, :] + xp = int((self._width-1) * x) + yp = int((self._height-1) * y) + xp = np.clip(xp, 0, self._width-1) + yp = np.clip(yp, 0, self._height-1) + output[i, :] = self._img[yp, xp] + return output + +def get_n_colors(N, sequential=False): + label_colors = [] + for ii in range(N): + if sequential: + rgb = cm.winter(ii/(N-1)) + rgb = (np.array(rgb) * 255).astype(np.uint8)[:3] + else: + rgb = np.zeros(3) + while np.sum(rgb) < 128: # ensure min brightness + rgb = np.random.randint(0,256,3) + label_colors.append(rgb) + return label_colors + +class Summ_writer(object): + def __init__(self, writer, global_step, log_freq=10, fps=8, scalar_freq=100, just_gif=False): + self.writer = writer + self.global_step = global_step + self.log_freq = log_freq + self.fps = fps + self.just_gif = just_gif + self.maxwidth = 10000 + self.save_this = (self.global_step % self.log_freq == 0) + self.scalar_freq = max(scalar_freq,1) + + + def summ_gif(self, name, tensor, blacken_zeros=False): + # tensor should be in B x S x C x H x W + + assert tensor.dtype in {torch.uint8,torch.float32} + shape = list(tensor.shape) + + if tensor.dtype == torch.float32: + tensor = back2color(tensor, blacken_zeros=blacken_zeros) + + video_to_write = tensor[0:1] + + S = video_to_write.shape[1] + if S==1: + # video_to_write is 1 x 1 x C x H x W + self.writer.add_image(name, video_to_write[0,0], global_step=self.global_step) + else: + self.writer.add_video(name, video_to_write, fps=self.fps, global_step=self.global_step) + + return video_to_write + + def draw_boxlist2d_on_image(self, rgb, boxlist, scores=None, tids=None, linewidth=1): + B, C, H, W = list(rgb.shape) + assert(C==3) + B2, N, D = list(boxlist.shape) + assert(B2==B) + assert(D==4) # ymin, xmin, ymax, xmax + + rgb = back2color(rgb) + if scores is None: + scores = torch.ones(B2, N).float() + if tids is None: + tids = torch.arange(N).reshape(1,N).repeat(B2,N).long() + # tids = torch.zeros(B2, N).long() + out = self.draw_boxlist2d_on_image_py( + rgb[0].cpu().detach().numpy(), + boxlist[0].cpu().detach().numpy(), + scores[0].cpu().detach().numpy(), + tids[0].cpu().detach().numpy(), + linewidth=linewidth) + out = torch.from_numpy(out).type(torch.ByteTensor).permute(2, 0, 1) + out = torch.unsqueeze(out, dim=0) + out = preprocess_color(out) + out = torch.reshape(out, [1, C, H, W]) + return out + + def draw_boxlist2d_on_image_py(self, rgb, boxlist, scores, tids, linewidth=1): + # all inputs are numpy tensors + # rgb is H x W x 3 + # boxlist is N x 4 + # scores is N + # tids is N + + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + # rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) + + rgb = rgb.astype(np.uint8).copy() + + + H, W, C = rgb.shape + assert(C==3) + N, D = boxlist.shape + assert(D==4) + + # color_map = cm.get_cmap('tab20') + # color_map = cm.get_cmap('set1') + color_map = cm.get_cmap('Accent') + color_map = color_map.colors + # print('color_map', color_map) + + # draw + for ind, box in enumerate(boxlist): + # box is 4 + if not np.isclose(scores[ind], 0.0): + # box = utils.geom.scale_box2d(box, H, W) + ymin, xmin, ymax, xmax = box + + # ymin, ymax = ymin*H, ymax*H + # xmin, xmax = xmin*W, xmax*W + + # print 'score = %.2f' % scores[ind] + # color_id = tids[ind] % 20 + color_id = tids[ind] + color = color_map[color_id] + color = np.array(color)*255.0 + color = color.round() + # color = color.astype(np.uint8) + # color = color[::-1] + # print('color', color) + + # print 'tid = %d; score = %.3f' % (tids[ind], scores[ind]) + + # if False: + if scores[ind] < 1.0: # not gt + cv2.putText(rgb, + # '%d (%.2f)' % (tids[ind], scores[ind]), + '%.2f' % (scores[ind]), + (int(xmin), int(ymin)), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, # font size + color), + #1) # font weight + + xmin = np.clip(int(xmin), 0, W-1) + xmax = np.clip(int(xmax), 0, W-1) + ymin = np.clip(int(ymin), 0, H-1) + ymax = np.clip(int(ymax), 0, H-1) + + cv2.line(rgb, (xmin, ymin), (xmin, ymax), color, linewidth, cv2.LINE_AA) + cv2.line(rgb, (xmin, ymin), (xmax, ymin), color, linewidth, cv2.LINE_AA) + cv2.line(rgb, (xmax, ymin), (xmax, ymax), color, linewidth, cv2.LINE_AA) + cv2.line(rgb, (xmax, ymax), (xmin, ymax), color, linewidth, cv2.LINE_AA) + + # rgb = cv2.cvtColor(rgb.astype(np.uint8), cv2.COLOR_BGR2RGB) + return rgb + + def summ_boxlist2d(self, name, rgb, boxlist, scores=None, tids=None, frame_id=None, only_return=False, linewidth=2): + B, C, H, W = list(rgb.shape) + boxlist_vis = self.draw_boxlist2d_on_image(rgb, boxlist, scores=scores, tids=tids, linewidth=linewidth) + return self.summ_rgb(name, boxlist_vis, frame_id=frame_id, only_return=only_return) + + def summ_rgbs(self, name, ims, frame_ids=None, blacken_zeros=False, only_return=False): + if self.save_this: + + ims = gif_and_tile(ims, just_gif=self.just_gif) + vis = ims + + assert vis.dtype in {torch.uint8,torch.float32} + + if vis.dtype == torch.float32: + vis = back2color(vis, blacken_zeros) + + B, S, C, H, W = list(vis.shape) + + if frame_ids is not None: + assert(len(frame_ids)==S) + for s in range(S): + vis[:,s] = draw_frame_id_on_vis(vis[:,s], frame_ids[s]) + + if int(W) > self.maxwidth: + vis = vis[:,:,:,:self.maxwidth] + + if only_return: + return vis + else: + return self.summ_gif(name, vis, blacken_zeros) + + def summ_rgb(self, name, ims, blacken_zeros=False, frame_id=None, only_return=False, halfres=False): + if self.save_this: + assert ims.dtype in {torch.uint8,torch.float32} + + if ims.dtype == torch.float32: + ims = back2color(ims, blacken_zeros) + + #ims is B x C x H x W + vis = ims[0:1] # just the first one + B, C, H, W = list(vis.shape) + + if halfres: + vis = F.interpolate(vis, scale_factor=0.5) + + if frame_id is not None: + vis = draw_frame_id_on_vis(vis, frame_id) + + if int(W) > self.maxwidth: + vis = vis[:,:,:,:self.maxwidth] + + if only_return: + return vis + else: + return self.summ_gif(name, vis.unsqueeze(1), blacken_zeros) + + def flow2color(self, flow, clip=50.0): + """ + :param flow: Optical flow tensor. + :return: RGB image normalized between 0 and 1. + """ + + # flow is B x C x H x W + + B, C, H, W = list(flow.size()) + + flow = flow.clone().detach() + + abs_image = torch.abs(flow) + flow_mean = abs_image.mean(dim=[1,2,3]) + flow_std = abs_image.std(dim=[1,2,3]) + + if clip: + flow = torch.clamp(flow, -clip, clip)/clip + else: + # Apply some kind of normalization. Divide by the perceived maximum (mean + std*2) + flow_max = flow_mean + flow_std*2 + 1e-10 + for b in range(B): + flow[b] = flow[b].clamp(-flow_max[b].item(), flow_max[b].item()) / flow_max[b].clamp(min=1) + + radius = torch.sqrt(torch.sum(flow**2, dim=1, keepdim=True)) #B x 1 x H x W + radius_clipped = torch.clamp(radius, 0.0, 1.0) + + angle = torch.atan2(flow[:, 1:], flow[:, 0:1]) / np.pi #B x 1 x H x W + + hue = torch.clamp((angle + 1.0) / 2.0, 0.0, 1.0) + saturation = torch.ones_like(hue) * 0.75 + value = radius_clipped + hsv = torch.cat([hue, saturation, value], dim=1) #B x 3 x H x W + + #flow = tf.image.hsv_to_rgb(hsv) + flow = hsv_to_rgb(hsv) + flow = (flow*255.0).type(torch.ByteTensor) + return flow + + def summ_flow(self, name, im, clip=0.0, only_return=False, frame_id=None): + # flow is B x C x D x W + if self.save_this: + return self.summ_rgb(name, self.flow2color(im, clip=clip), only_return=only_return, frame_id=frame_id) + else: + return None + + def summ_oneds(self, name, ims, frame_ids=None, bev=False, fro=False, logvis=False, reduce_max=False, max_val=0.0, norm=True, only_return=False, do_colorize=False): + if self.save_this: + if bev: + B, C, H, _, W = list(ims[0].shape) + if reduce_max: + ims = [torch.max(im, dim=3)[0] for im in ims] + else: + ims = [torch.mean(im, dim=3) for im in ims] + elif fro: + B, C, _, H, W = list(ims[0].shape) + if reduce_max: + ims = [torch.max(im, dim=2)[0] for im in ims] + else: + ims = [torch.mean(im, dim=2) for im in ims] + + + if len(ims) != 1: # sequence + im = gif_and_tile(ims, just_gif=self.just_gif) + else: + im = torch.stack(ims, dim=1) # single frame + + B, S, C, H, W = list(im.shape) + + if logvis and max_val: + max_val = np.log(max_val) + im = torch.log(torch.clamp(im, 0)+1.0) + im = torch.clamp(im, 0, max_val) + im = im/max_val + norm = False + elif max_val: + im = torch.clamp(im, 0, max_val) + im = im/max_val + norm = False + + if norm: + # normalize before oned2inferno, + # so that the ranges are similar within B across S + im = utils.basic.normalize(im) + + im = im.view(B*S, C, H, W) + vis = oned2inferno(im, norm=norm, do_colorize=do_colorize) + vis = vis.view(B, S, 3, H, W) + + if frame_ids is not None: + assert(len(frame_ids)==S) + for s in range(S): + vis[:,s] = draw_frame_id_on_vis(vis[:,s], frame_ids[s]) + + if W > self.maxwidth: + vis = vis[...,:self.maxwidth] + + if only_return: + return vis + else: + self.summ_gif(name, vis) + + def summ_oned(self, name, im, bev=False, fro=False, logvis=False, max_val=0, max_along_y=False, norm=True, frame_id=None, only_return=False): + if self.save_this: + + if bev: + B, C, H, _, W = list(im.shape) + if max_along_y: + im = torch.max(im, dim=3)[0] + else: + im = torch.mean(im, dim=3) + elif fro: + B, C, _, H, W = list(im.shape) + if max_along_y: + im = torch.max(im, dim=2)[0] + else: + im = torch.mean(im, dim=2) + else: + B, C, H, W = list(im.shape) + + im = im[0:1] # just the first one + assert(C==1) + + if logvis and max_val: + max_val = np.log(max_val) + im = torch.log(im) + im = torch.clamp(im, 0, max_val) + im = im/max_val + norm = False + elif max_val: + im = torch.clamp(im, 0, max_val)/max_val + norm = False + + vis = oned2inferno(im, norm=norm) + if W > self.maxwidth: + vis = vis[...,:self.maxwidth] + return self.summ_rgb(name, vis, blacken_zeros=False, frame_id=frame_id, only_return=only_return) + + def summ_feats(self, name, feats, valids=None, pca=True, fro=False, only_return=False, frame_ids=None): + if self.save_this: + if valids is not None: + valids = torch.stack(valids, dim=1) + + feats = torch.stack(feats, dim=1) + # feats leads with B x S x C + + if feats.ndim==6: + + # feats is B x S x C x D x H x W + if fro: + reduce_dim = 3 + else: + reduce_dim = 4 + + if valids is None: + feats = torch.mean(feats, dim=reduce_dim) + else: + valids = valids.repeat(1, 1, feats.size()[2], 1, 1, 1) + feats = utils.basic.reduce_masked_mean(feats, valids, dim=reduce_dim) + + B, S, C, D, W = list(feats.size()) + + if not pca: + # feats leads with B x S x C + feats = torch.mean(torch.abs(feats), dim=2, keepdims=True) + # feats leads with B x S x 1 + feats = torch.unbind(feats, dim=1) + return self.summ_oneds(name=name, ims=feats, norm=True, only_return=only_return, frame_ids=frame_ids) + + else: + __p = lambda x: utils.basic.pack_seqdim(x, B) + __u = lambda x: utils.basic.unpack_seqdim(x, B) + + feats_ = __p(feats) + + if valids is None: + feats_pca_ = get_feat_pca(feats_) + else: + valids_ = __p(valids) + feats_pca_ = get_feat_pca(feats_, valids) + + feats_pca = __u(feats_pca_) + + return self.summ_rgbs(name=name, ims=torch.unbind(feats_pca, dim=1), only_return=only_return, frame_ids=frame_ids) + + def summ_feat(self, name, feat, valid=None, pca=True, only_return=False, bev=False, fro=False, frame_id=None): + if self.save_this: + if feat.ndim==5: # B x C x D x H x W + + if bev: + reduce_axis = 3 + elif fro: + reduce_axis = 2 + else: + # default to bev + reduce_axis = 3 + + if valid is None: + feat = torch.mean(feat, dim=reduce_axis) + else: + valid = valid.repeat(1, feat.size()[1], 1, 1, 1) + feat = utils.basic.reduce_masked_mean(feat, valid, dim=reduce_axis) + + B, C, D, W = list(feat.shape) + + if not pca: + feat = torch.mean(torch.abs(feat), dim=1, keepdims=True) + # feat is B x 1 x D x W + return self.summ_oned(name=name, im=feat, norm=True, only_return=only_return, frame_id=frame_id) + else: + feat_pca = get_feat_pca(feat, valid) + return self.summ_rgb(name, feat_pca, only_return=only_return, frame_id=frame_id) + + def summ_scalar(self, name, value): + if (not (isinstance(value, int) or isinstance(value, float) or isinstance(value, np.float32))) and ('torch' in value.type()): + value = value.detach().cpu().numpy() + if not np.isnan(value): + if (self.log_freq == 1): + self.writer.add_scalar(name, value, global_step=self.global_step) + elif self.save_this or np.mod(self.global_step, self.scalar_freq)==0: + self.writer.add_scalar(name, value, global_step=self.global_step) + + def summ_seg(self, name, seg, only_return=False, frame_id=None, colormap='tab20', label_colors=None): + if not self.save_this: + return + + B,H,W = seg.shape + + if label_colors is None: + custom_label_colors = False + # label_colors = get_n_colors(int(torch.max(seg).item()), sequential=True) + label_colors = cm.get_cmap(colormap).colors + label_colors = [[int(i*255) for i in l] for l in label_colors] + else: + custom_label_colors = True + # label_colors = matplotlib.cm.get_cmap(colormap).colors + # label_colors = [[int(i*255) for i in l] for l in label_colors] + # print('label_colors', label_colors) + + # label_colors = [ + # (0, 0, 0), # None + # (70, 70, 70), # Buildings + # (190, 153, 153), # Fences + # (72, 0, 90), # Other + # (220, 20, 60), # Pedestrians + # (153, 153, 153), # Poles + # (157, 234, 50), # RoadLines + # (128, 64, 128), # Roads + # (244, 35, 232), # Sidewalks + # (107, 142, 35), # Vegetation + # (0, 0, 255), # Vehicles + # (102, 102, 156), # Walls + # (220, 220, 0) # TrafficSigns + # ] + + r = torch.zeros_like(seg,dtype=torch.uint8) + g = torch.zeros_like(seg,dtype=torch.uint8) + b = torch.zeros_like(seg,dtype=torch.uint8) + + for label in range(0,len(label_colors)): + if (not custom_label_colors):# and (N > 20): + label_ = label % 20 + else: + label_ = label + + idx = (seg == label+1) + r[idx] = label_colors[label_][0] + g[idx] = label_colors[label_][1] + b[idx] = label_colors[label_][2] + + rgb = torch.stack([r,g,b],axis=1) + return self.summ_rgb(name,rgb,only_return=only_return, frame_id=frame_id) + + def summ_pts_on_rgb(self, name, trajs, rgb, valids=None, frame_id=None, only_return=False, show_dots=True, cmap='coolwarm', linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, C, H, W = rgb.shape + B, S, N, D = trajs.shape + + rgb = rgb[0] # C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + valids = valids.long().detach().cpu().numpy() # S, N + + rgb = rgb.astype(np.uint8).copy() + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + valid = valids[:,i] # S + + color_map = cm.get_cmap(cmap) + color = np.array(color_map(i)[:3]) * 255 # rgb + for s in range(S): + if valid[s]: + cv2.circle(rgb, (int(traj[s,0]), int(traj[s,1])), linewidth, color, -1) + rgb = torch.from_numpy(rgb).permute(2,0,1).unsqueeze(0) + rgb = preprocess_color(rgb) + return self.summ_rgb(name, rgb, only_return=only_return, frame_id=frame_id) + + def summ_pts_on_rgbs(self, name, trajs, rgbs, valids=None, frame_ids=None, only_return=False, show_dots=True, cmap='coolwarm', linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, S, C, H, W = rgbs.shape + B, S2, N, D = trajs.shape + assert(S==S2) + + rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + rgbs_color = [] + for rgb in rgbs: + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgbs_color.append(rgb) # each element 3 x H x W + + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + valids = valids.long().detach().cpu().numpy() # S, N + + rgbs_color = [rgb.astype(np.uint8).copy() for rgb in rgbs_color] + + for i in range(N): + traj = trajs[:,i] # S,2 + valid = valids[:,i] # S + + color_map = cm.get_cmap(cmap) + color = np.array(color_map(0)[:3]) * 255 # rgb + for s in range(S): + if valid[s]: + cv2.circle(rgbs_color[s], (traj[s,0], traj[s,1]), linewidth, color, -1) + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + + def summ_traj2ds_on_rgbs(self, name, trajs, rgbs, valids=None, frame_ids=None, only_return=False, show_dots=False, cmap='coolwarm', vals=None, linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, S, C, H, W = rgbs.shape + B, S2, N, D = trajs.shape + assert(S==S2) + + rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + if vals is not None: + vals = vals[0] # N + # print('vals', vals.shape) + + rgbs_color = [] + for rgb in rgbs: + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgbs_color.append(rgb) # each element 3 x H x W + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i].long().detach().cpu().numpy() # S, 2 + valid = valids[:,i].long().detach().cpu().numpy() # S + + # print('traj', traj.shape) + # print('valid', valid.shape) + + if vals is not None: + # val = vals[:,i].float().detach().cpu().numpy() # [] + val = vals[i].float().detach().cpu().numpy() # [] + # print('val', val.shape) + else: + val = None + + for t in range(S): + if valid[t]: + # traj_seq = traj[max(t-16,0):t+1] + traj_seq = traj[max(t-8,0):t+1] + val_seq = np.linspace(0,1,len(traj_seq)) + # if t<2: + # val_seq = np.zeros_like(val_seq) + # print('val_seq', val_seq) + # val_seq = 1.0 + # val_seq = np.arange(8)/8.0 + # val_seq = val_seq[-len(traj_seq):] + # rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + # input() + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + # vis = visibles[:,i] # S + vis = torch.ones_like(traj[:,0]) # S + valid = valids[:,i] # S + rgbs_color = self.draw_circ_on_images_py(rgbs_color, traj, vis, S=0, show_dots=show_dots, cmap=cmap_, linewidth=linewidth) + + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + + def summ_traj2ds_on_rgbs_py(self, name, trajs, rgbs_color, valids=None, frame_ids=None, only_return=False, show_dots=False, cmap='coolwarm', vals=None, linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + # B, S, C, H, W = rgbs.shape + B, S, N, D = trajs.shape + # assert(S==S2) + + # rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + if vals is not None: + vals = vals[0] # N + # print('vals', vals.shape) + + # rgbs_color = [] + # for rgb in rgbs: + # rgb = back2color(rgb).detach().cpu().numpy() + # rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + # rgbs_color.append(rgb) # each element 3 x H x W + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i].long().detach().cpu().numpy() # S, 2 + valid = valids[:,i].long().detach().cpu().numpy() # S + + # print('traj', traj.shape) + # print('valid', valid.shape) + + if vals is not None: + # val = vals[:,i].float().detach().cpu().numpy() # [] + val = vals[i].float().detach().cpu().numpy() # [] + # print('val', val.shape) + else: + val = None + + for t in range(S): + # if valid[t]: + # traj_seq = traj[max(t-16,0):t+1] + traj_seq = traj[max(t-8,0):t+1] + val_seq = np.linspace(0,1,len(traj_seq)) + # if t<2: + # val_seq = np.zeros_like(val_seq) + # print('val_seq', val_seq) + # val_seq = 1.0 + # val_seq = np.arange(8)/8.0 + # val_seq = val_seq[-len(traj_seq):] + # rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + # input() + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + # vis = visibles[:,i] # S + vis = torch.ones_like(traj[:,0]) # S + valid = valids[:,i] # S + rgbs_color = self.draw_circ_on_images_py(rgbs_color, traj, vis, S=0, show_dots=show_dots, cmap=cmap_, linewidth=linewidth) + + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + + def summ_traj2ds_on_rgbs2(self, name, trajs, visibles, rgbs, valids=None, frame_ids=None, only_return=False, show_dots=True, cmap=None, linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, S, C, H, W = rgbs.shape + B, S2, N, D = trajs.shape + assert(S==S2) + + rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + visibles = visibles[0] # S, N + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + rgbs_color = [] + for rgb in rgbs: + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgbs_color.append(rgb) # each element 3 x H x W + + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + visibles = visibles.float().detach().cpu().numpy() # S, N + valids = valids.long().detach().cpu().numpy() # S, N + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + vis = visibles[:,i] # S + valid = valids[:,i] # S + rgbs_color = self.draw_traj_on_images_py(rgbs_color, traj, S=S, show_dots=show_dots, cmap=cmap_, linewidth=linewidth) + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + vis = visibles[:,i] # S + valid = valids[:,i] # S + if valid[0]: + rgbs_color = self.draw_circ_on_images_py(rgbs_color, traj, vis, S=S, show_dots=show_dots, cmap=None, linewidth=linewidth) + + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + def summ_traj2ds_on_rgb(self, name, trajs, rgb, valids=None, show_dots=False, show_lines=True, frame_id=None, only_return=False, cmap='coolwarm', linewidth=1): + # trajs is B, S, N, 2 + # rgb is B, C, H, W + B, C, H, W = rgb.shape + B, S, N, D = trajs.shape + + rgb = rgb[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) + else: + valids = valids[0] + + rgb_color = back2color(rgb).detach().cpu().numpy() + rgb_color = np.transpose(rgb_color, [1, 2, 0]) # put channels last + + # using maxdist will dampen the colors for short motions + norms = torch.sqrt(1e-4 + torch.sum((trajs[-1] - trajs[0])**2, dim=1)) # N + maxdist = torch.quantile(norms, 0.95).detach().cpu().numpy() + maxdist = None + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + valids = valids.long().detach().cpu().numpy() # S, N + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S, 2 + valid = valids[:,i] # S + if valid[0]==1: + traj = traj[valid>0] + rgb_color = self.draw_traj_on_image_py( + rgb_color, traj, S=S, show_dots=show_dots, show_lines=show_lines, cmap=cmap_, maxdist=maxdist, linewidth=linewidth) + + rgb_color = torch.from_numpy(rgb_color).permute(2, 0, 1).unsqueeze(0) + rgb = preprocess_color(rgb_color) + return self.summ_rgb(name, rgb, only_return=only_return, frame_id=frame_id) + + def draw_traj_on_image_py(self, rgb, traj, S=50, linewidth=1, show_dots=False, show_lines=True, cmap='coolwarm', val=None, maxdist=None): + # all inputs are numpy tensors + # rgb is 3 x H x W + # traj is S x 2 + + H, W, C = rgb.shape + assert(C==3) + + rgb = rgb.astype(np.uint8).copy() + + S1, D = traj.shape + assert(D==2) + + color_map = cm.get_cmap(cmap) + S1, D = traj.shape + + for s in range(S1): + if val is not None: + color = np.array(color_map(val[s])[:3]) * 255 # rgb + else: + if maxdist is not None: + val = (np.sqrt(np.sum((traj[s]-traj[0])**2))/maxdist).clip(0,1) + color = np.array(color_map(val)[:3]) * 255 # rgb + else: + color = np.array(color_map((s)/max(1,float(S-2)))[:3]) * 255 # rgb + + if show_lines and s<(S1-1): + cv2.line(rgb, + (int(traj[s,0]), int(traj[s,1])), + (int(traj[s+1,0]), int(traj[s+1,1])), + color, + linewidth, + cv2.LINE_AA) + if show_dots: + cv2.circle(rgb, (int(traj[s,0]), int(traj[s,1])), linewidth, np.array(color_map(1)[:3])*255, -1) + + # if maxdist is not None: + # val = (np.sqrt(np.sum((traj[-1]-traj[0])**2))/maxdist).clip(0,1) + # color = np.array(color_map(val)[:3]) * 255 # rgb + # else: + # # draw the endpoint of traj, using the next color (which may be the last color) + # color = np.array(color_map((S1-1)/max(1,float(S-2)))[:3]) * 255 # rgb + + # # emphasize endpoint + # cv2.circle(rgb, (traj[-1,0], traj[-1,1]), linewidth*2, color, -1) + + return rgb + + + + def draw_traj_on_images_py(self, rgbs, traj, S=50, linewidth=1, show_dots=False, cmap='coolwarm', maxdist=None): + # all inputs are numpy tensors + # rgbs is a list of H,W,3 + # traj is S,2 + H, W, C = rgbs[0].shape + assert(C==3) + + rgbs = [rgb.astype(np.uint8).copy() for rgb in rgbs] + + S1, D = traj.shape + assert(D==2) + + x = int(np.clip(traj[0,0], 0, W-1)) + y = int(np.clip(traj[0,1], 0, H-1)) + color = rgbs[0][y,x] + color = (int(color[0]),int(color[1]),int(color[2])) + for s in range(S): + # bak_color = np.array(color_map(1.0)[:3]) * 255 # rgb + # cv2.circle(rgbs[s], (traj[s,0], traj[s,1]), linewidth*4, bak_color, -1) + cv2.polylines(rgbs[s], + [traj[:s+1]], + False, + color, + linewidth, + cv2.LINE_AA) + return rgbs + + def draw_circs_on_image_py(self, rgb, xy, colors=None, linewidth=10, radius=3, show_dots=False, maxdist=None): + # all inputs are numpy tensors + # rgbs is a list of 3,H,W + # xy is N,2 + H, W, C = rgb.shape + assert(C==3) + + rgb = rgb.astype(np.uint8).copy() + + N, D = xy.shape + assert(D==2) + + + xy = xy.astype(np.float32) + xy[:,0] = np.clip(xy[:,0], 0, W-1) + xy[:,1] = np.clip(xy[:,1], 0, H-1) + xy = xy.astype(np.int32) + + + + if colors is None: + colors = get_n_colors(N) + + for n in range(N): + color = colors[n] + # print('color', color) + # color = (color[0]*255).astype(np.uint8) + color = (int(color[0]),int(color[1]),int(color[2])) + + # x = int(np.clip(xy[0,0], 0, W-1)) + # y = int(np.clip(xy[0,1], 0, H-1)) + # color_ = rgbs[0][y,x] + # color_ = (int(color_[0]),int(color_[1]),int(color_[2])) + # color_ = (int(color_[0]),int(color_[1]),int(color_[2])) + + cv2.circle(rgb, (xy[n,0], xy[n,1]), linewidth, color, 3) + # vis_color = int(np.squeeze(vis[s])*255) + # vis_color = (vis_color,vis_color,vis_color) + # cv2.circle(rgbs[s], (traj[s,0], traj[s,1]), linewidth+1, vis_color, -1) + return rgb + + def draw_circ_on_images_py(self, rgbs, traj, vis, S=50, linewidth=1, show_dots=False, cmap=None, maxdist=None): + # all inputs are numpy tensors + # rgbs is a list of 3,H,W + # traj is S,2 + H, W, C = rgbs[0].shape + assert(C==3) + + rgbs = [rgb.astype(np.uint8).copy() for rgb in rgbs] + + S1, D = traj.shape + assert(D==2) + + if cmap is None: + bremm = ColorMap2d() + traj_ = traj[0:1].astype(np.float32) + traj_[:,0] /= float(W) + traj_[:,1] /= float(H) + color = bremm(traj_) + # print('color', color) + color = (color[0]*255).astype(np.uint8) + # color = (int(color[0]),int(color[1]),int(color[2])) + color = (int(color[2]),int(color[1]),int(color[0])) + + for s in range(S1): + if cmap is not None: + color_map = cm.get_cmap(cmap) + # color = np.array(color_map(s/(S-1))[:3]) * 255 # rgb + color = np.array(color_map((s+1)/max(1,float(S-1)))[:3]) * 255 # rgb + # color = color.astype(np.uint8) + # color = (color[0], color[1], color[2]) + # print('color', color) + # import ipdb; ipdb.set_trace() + + cv2.circle(rgbs[s], (int(traj[s,0]), int(traj[s,1])), linewidth+1, color, -1) + # vis_color = int(np.squeeze(vis[s])*255) + # vis_color = (vis_color,vis_color,vis_color) + # cv2.circle(rgbs[s], (int(traj[s,0]), int(traj[s,1])), linewidth+1, vis_color, -1) + + return rgbs + + def summ_traj_as_crops(self, name, trajs_e, rgbs, frame_id=None, only_return=False, show_circ=False, trajs_g=None, is_g=False): + B, S, N, D = trajs_e.shape + assert(N==1) + assert(D==2) + + rgbs_vis = [] + n = 0 + pad_amount = 100 + trajs_e_py = trajs_e[0].detach().cpu().numpy() + # trajs_e_py = np.clip(trajs_e_py, min=pad_amount/2, max=pad_amoun + trajs_e_py = trajs_e_py + pad_amount + + if trajs_g is not None: + trajs_g_py = trajs_g[0].detach().cpu().numpy() + trajs_g_py = trajs_g_py + pad_amount + + for s in range(S): + rgb = rgbs[0,s].detach().cpu().numpy() + # print('orig rgb', rgb.shape) + rgb = np.transpose(rgb,(1,2,0)) # H, W, 3 + + rgb = np.pad(rgb, ((pad_amount,pad_amount),(pad_amount,pad_amount),(0,0))) + # print('pad rgb', rgb.shape) + H, W, C = rgb.shape + + if trajs_g is not None: + xy_g = trajs_g_py[s,n] + xy_g[0] = np.clip(xy_g[0], pad_amount, W-pad_amount) + xy_g[1] = np.clip(xy_g[1], pad_amount, H-pad_amount) + rgb = self.draw_circs_on_image_py(rgb, xy_g.reshape(1,2), colors=[(0,255,0)], linewidth=2, radius=3) + + xy_e = trajs_e_py[s,n] + xy_e[0] = np.clip(xy_e[0], pad_amount, W-pad_amount) + xy_e[1] = np.clip(xy_e[1], pad_amount, H-pad_amount) + + if show_circ: + if is_g: + rgb = self.draw_circs_on_image_py(rgb, xy_e.reshape(1,2), colors=[(0,255,0)], linewidth=2, radius=3) + else: + rgb = self.draw_circs_on_image_py(rgb, xy_e.reshape(1,2), colors=[(255,0,255)], linewidth=2, radius=3) + + + xmin = int(xy_e[0])-pad_amount//2 + xmax = int(xy_e[0])+pad_amount//2 + ymin = int(xy_e[1])-pad_amount//2 + ymax = int(xy_e[1])+pad_amount//2 + + rgb_ = rgb[ymin:ymax, xmin:xmax] + + H_, W_ = rgb_.shape[:2] + # if np.any(rgb_.shape==0): + # input() + if H_==0 or W_==0: + import ipdb; ipdb.set_trace() + + rgb_ = rgb_.transpose(2,0,1) + rgb_ = torch.from_numpy(rgb_) + + rgbs_vis.append(rgb_) + + # nrow = int(np.sqrt(S)*(16.0/9)/2.0) + nrow = int(np.sqrt(S)*1.5) + grid_img = torchvision.utils.make_grid(torch.stack(rgbs_vis, dim=0), nrow=nrow).unsqueeze(0) + # print('grid_img', grid_img.shape) + return self.summ_rgb(name, grid_img.byte(), frame_id=frame_id, only_return=only_return) + + def summ_occ(self, name, occ, reduce_axes=[3], bev=False, fro=False, pro=False, frame_id=None, only_return=False): + if self.save_this: + B, C, D, H, W = list(occ.shape) + if bev: + reduce_axes = [3] + elif fro: + reduce_axes = [2] + elif pro: + reduce_axes = [4] + for reduce_axis in reduce_axes: + height = convert_occ_to_height(occ, reduce_axis=reduce_axis) + if reduce_axis == reduce_axes[-1]: + return self.summ_oned(name=('%s_ax%d' % (name, reduce_axis)), im=height, norm=False, frame_id=frame_id, only_return=only_return) + else: + self.summ_oned(name=('%s_ax%d' % (name, reduce_axis)), im=height, norm=False, frame_id=frame_id, only_return=only_return) + +def erode2d(im, times=1, device='cuda'): + weights2d = torch.ones(1, 1, 3, 3, device=device) + for time in range(times): + im = 1.0 - F.conv2d(1.0 - im, weights2d, padding=1).clamp(0, 1) + return im + +def dilate2d(im, times=1, device='cuda', mode='square'): + weights2d = torch.ones(1, 1, 3, 3, device=device) + if mode=='cross': + weights2d[:,:,0,0] = 0.0 + weights2d[:,:,0,2] = 0.0 + weights2d[:,:,2,0] = 0.0 + weights2d[:,:,2,2] = 0.0 + for time in range(times): + im = F.conv2d(im, weights2d, padding=1).clamp(0, 1) + return im + + diff --git a/dynamic_predictor/dust3r/datasets/utils/misc.py b/dynamic_predictor/dust3r/datasets/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..adc31966644a4639152770f8067d74a24f36b8a2 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/misc.py @@ -0,0 +1,166 @@ +import torch +import numpy as np +import math +from prettytable import PrettyTable + +def count_parameters(model): + table = PrettyTable(["Modules", "Parameters"]) + total_params = 0 + for name, parameter in model.named_parameters(): + if not parameter.requires_grad: + continue + param = parameter.numel() + if param > 100000: + table.add_row([name, param]) + total_params+=param + print(table) + print('total params: %.2f M' % (total_params/1000000.0)) + return total_params + +def posemb_sincos_2d_xy(xy, C, temperature=10000, dtype=torch.float32, cat_coords=False): + device = xy.device + dtype = xy.dtype + B, S, D = xy.shape + assert(D==2) + x = xy[:,:,0] + y = xy[:,:,1] + assert (C % 4) == 0, 'feature dimension must be multiple of 4 for sincos emb' + omega = torch.arange(C // 4, device=device) / (C // 4 - 1) + omega = 1. / (temperature ** omega) + + y = y.flatten()[:, None] * omega[None, :] + x = x.flatten()[:, None] * omega[None, :] + pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) + pe = pe.reshape(B,S,C).type(dtype) + if cat_coords: + pe = torch.cat([pe, xy], dim=2) # B,N,C+2 + return pe + +class SimplePool(): + def __init__(self, pool_size, version='pt'): + self.pool_size = pool_size + self.version = version + self.items = [] + + if not (version=='pt' or version=='np'): + print('version = %s; please choose pt or np') + assert(False) # please choose pt or np + + def __len__(self): + return len(self.items) + + def mean(self, min_size=1): + if min_size=='half': + pool_size_thresh = self.pool_size/2 + else: + pool_size_thresh = min_size + + if self.version=='np': + if len(self.items) >= pool_size_thresh: + return np.sum(self.items)/float(len(self.items)) + else: + return np.nan + if self.version=='pt': + if len(self.items) >= pool_size_thresh: + return torch.sum(self.items)/float(len(self.items)) + else: + return torch.from_numpy(np.nan) + + def sample(self, with_replacement=True): + idx = np.random.randint(len(self.items)) + if with_replacement: + return self.items[idx] + else: + return self.items.pop(idx) + + def fetch(self, num=None): + if self.version=='pt': + item_array = torch.stack(self.items) + elif self.version=='np': + item_array = np.stack(self.items) + if num is not None: + # there better be some items + assert(len(self.items) >= num) + + # if there are not that many elements just return however many there are + if len(self.items) < num: + return item_array + else: + idxs = np.random.randint(len(self.items), size=num) + return item_array[idxs] + else: + return item_array + + def is_full(self): + full = len(self.items)==self.pool_size + return full + + def empty(self): + self.items = [] + + def update(self, items): + for item in items: + if len(self.items) < self.pool_size: + # the pool is not full, so let's add this in + self.items.append(item) + else: + # the pool is full + # pop from the front + self.items.pop(0) + # add to the back + self.items.append(item) + return self.items + +def farthest_point_sample(xyz, npoint, include_ends=False, deterministic=False): + """ + Input: + xyz: pointcloud data, [B, N, C], where C is probably 3 + npoint: number of samples + Return: + inds: sampled pointcloud index, [B, npoint] + """ + device = xyz.device + B, N, C = xyz.shape + xyz = xyz.float() + inds = torch.zeros(B, npoint, dtype=torch.long).to(device) + distance = torch.ones(B, N).to(device) * 1e10 + if deterministic: + farthest = torch.randint(0, 1, (B,), dtype=torch.long).to(device) + else: + farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) + batch_indices = torch.arange(B, dtype=torch.long).to(device) + for i in range(npoint): + if include_ends: + if i==0: + farthest = 0 + elif i==1: + farthest = N-1 + inds[:, i] = farthest + centroid = xyz[batch_indices, farthest, :].view(B, 1, C) + dist = torch.sum((xyz - centroid) ** 2, -1) + mask = dist < distance + distance[mask] = dist[mask] + farthest = torch.max(distance, -1)[1] + + if npoint > N: + # if we need more samples, make them random + distance += torch.randn_like(distance) + return inds + +def farthest_point_sample_py(xyz, npoint): + N,C = xyz.shape + inds = np.zeros(npoint, dtype=np.int32) + distance = np.ones(N) * 1e10 + farthest = np.random.randint(0, N, dtype=np.int32) + for i in range(npoint): + inds[i] = farthest + centroid = xyz[farthest, :].reshape(1,C) + dist = np.sum((xyz - centroid) ** 2, -1) + mask = dist < distance + distance[mask] = dist[mask] + farthest = np.argmax(distance, -1) + if npoint > N: + # if we need more samples, make them random + distance += np.random.randn(*distance.shape) + return inds + diff --git a/dynamic_predictor/dust3r/datasets/utils/samp.py b/dynamic_predictor/dust3r/datasets/utils/samp.py new file mode 100644 index 0000000000000000000000000000000000000000..3632c9c1164638aec4c1caf3de2bfdbcb4ee6126 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/samp.py @@ -0,0 +1,152 @@ +import torch +import utils.basic +import torch.nn.functional as F + +def bilinear_sample2d(im, x, y, return_inbounds=False): + # x and y are each B, N + # output is B, C, N + B, C, H, W = list(im.shape) + N = list(x.shape)[1] + + x = x.float() + y = y.float() + H_f = torch.tensor(H, dtype=torch.float32) + W_f = torch.tensor(W, dtype=torch.float32) + + # inbound_mask = (x>-0.5).float()*(y>-0.5).float()*(x -0.5).byte() & (x < float(W_f - 0.5)).byte() + y_valid = (y > -0.5).byte() & (y < float(H_f - 0.5)).byte() + inbounds = (x_valid & y_valid).float() + inbounds = inbounds.reshape(B, N) # something seems wrong here for B>1; i'm getting an error here (or downstream if i put -1) + return output, inbounds + + return output # B, C, N + +def paste_crop_on_canvas(crop, box2d_unnorm, H, W, fast=True, mask=None, canvas=None): + # this is the inverse of crop_and_resize_box2d + B, C, Y, X = list(crop.shape) + B2, D = list(box2d_unnorm.shape) + assert(B == B2) + assert(D == 4) + + # here, we want to place the crop into a bigger image, + # at the location specified by the box2d. + + if canvas is None: + canvas = torch.zeros((B, C, H, W), device=crop.device) + else: + B2, C2, H2, W2 = canvas.shape + assert(B==B2) + assert(C==C2) + assert(H==H2) + assert(W==W2) + + # box2d_unnorm = utils.geom.unnormalize_box2d(box2d, H, W) + + if fast: + ymin = box2d_unnorm[:, 0].long() + xmin = box2d_unnorm[:, 1].long() + ymax = box2d_unnorm[:, 2].long() + xmax = box2d_unnorm[:, 3].long() + w = (xmax - xmin).float() + h = (ymax - ymin).float() + + grids = utils.basic.gridcloud2d(B, H, W) + grids_flat = grids.reshape(B, -1, 2) + # grids_flat[:, :, 0] = (grids_flat[:, :, 0] - xmin.float().unsqueeze(1)) / w.unsqueeze(1) * X + # grids_flat[:, :, 1] = (grids_flat[:, :, 1] - ymin.float().unsqueeze(1)) / h.unsqueeze(1) * Y + + # for each pixel in the main image, + # grids_flat tells us where to sample in the crop image + + # print('grids_flat', grids_flat.shape) + # print('crop', crop.shape) + + grids_flat[:, :, 0] = (grids_flat[:, :, 0] - xmin.float().unsqueeze(1)) / w.unsqueeze(1) * 2.0 - 1.0 + grids_flat[:, :, 1] = (grids_flat[:, :, 1] - ymin.float().unsqueeze(1)) / h.unsqueeze(1) * 2.0 - 1.0 + + grid = grids_flat.reshape(B,H,W,2) + + canvas = F.grid_sample(crop, grid, align_corners=False) + # print('canvas', canvas.shape) + + # if mask is None: + # crop_resamp, inb = bilinear_sample2d(crop, grids_flat[:, :, 0], grids_flat[:, :, 1], return_inbounds=True) + # crop_resamp = crop_resamp.reshape(B, C, H, W) + # inb = inb.reshape(B, 1, H, W) + # canvas = canvas * (1 - inb) + crop_resamp * inb + # else: + # full_resamp = bilinear_sample2d(torch.cat([crop, mask], dim=1), grids_flat[:, :, 0], grids_flat[:, :, 1]) + # full_resamp = full_resamp.reshape(B, C+1, H, W) + # crop_resamp = full_resamp[:,:3] + # mask_resamp = full_resamp[:,3:4] + # canvas = canvas * (1 - mask_resamp) + crop_resamp * mask_resamp + else: + for b in range(B): + ymin = box2d_unnorm[b, 0].long() + xmin = box2d_unnorm[b, 1].long() + ymax = box2d_unnorm[b, 2].long() + xmax = box2d_unnorm[b, 3].long() + + crop_b = F.interpolate(crop[b:b + 1], (ymax - ymin, xmax - xmin)).squeeze(0) + + # print('canvas[b,:,...', canvas[b,:,ymin:ymax,xmin:xmax].shape) + # print('crop_b', crop_b.shape) + + canvas[b, :, ymin:ymax, xmin:xmax] = crop_b + return canvas diff --git a/dynamic_predictor/dust3r/datasets/utils/transforms.py b/dynamic_predictor/dust3r/datasets/utils/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..eb34f2f01d3f8f829ba71a7e03e181bf18f72c25 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/transforms.py @@ -0,0 +1,11 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# DUST3R default transforms +# -------------------------------------------------------- +import torchvision.transforms as tvf +from dust3r.utils.image import ImgNorm + +# define the standard image transforms +ColorJitter = tvf.Compose([tvf.ColorJitter(0.5, 0.5, 0.5, 0.1), ImgNorm]) diff --git a/dynamic_predictor/dust3r/datasets/utils/vox.py b/dynamic_predictor/dust3r/datasets/utils/vox.py new file mode 100644 index 0000000000000000000000000000000000000000..203097b8736eabc2158950a11f4600b7848f119e --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/utils/vox.py @@ -0,0 +1,500 @@ +import numpy as np +import torch +import torch.nn.functional as F + +import utils.geom + +class Vox_util(object): + def __init__(self, Z, Y, X, scene_centroid, bounds, pad=None, assert_cube=False): + self.XMIN, self.XMAX, self.YMIN, self.YMAX, self.ZMIN, self.ZMAX = bounds + B, D = list(scene_centroid.shape) + self.Z, self.Y, self.X = Z, Y, X + + scene_centroid = scene_centroid.detach().cpu().numpy() + x_centroid, y_centroid, z_centroid = scene_centroid[0] + self.XMIN += x_centroid + self.XMAX += x_centroid + self.YMIN += y_centroid + self.YMAX += y_centroid + self.ZMIN += z_centroid + self.ZMAX += z_centroid + + self.default_vox_size_X = (self.XMAX-self.XMIN)/float(X) + self.default_vox_size_Y = (self.YMAX-self.YMIN)/float(Y) + self.default_vox_size_Z = (self.ZMAX-self.ZMIN)/float(Z) + + if pad: + Z_pad, Y_pad, X_pad = pad + self.ZMIN -= self.default_vox_size_Z * Z_pad + self.ZMAX += self.default_vox_size_Z * Z_pad + self.YMIN -= self.default_vox_size_Y * Y_pad + self.YMAX += self.default_vox_size_Y * Y_pad + self.XMIN -= self.default_vox_size_X * X_pad + self.XMAX += self.default_vox_size_X * X_pad + + if assert_cube: + # we assume cube voxels + if (not np.isclose(self.default_vox_size_X, self.default_vox_size_Y)) or (not np.isclose(self.default_vox_size_X, self.default_vox_size_Z)): + print('Z, Y, X', Z, Y, X) + print('bounds for this iter:', + 'X = %.2f to %.2f' % (self.XMIN, self.XMAX), + 'Y = %.2f to %.2f' % (self.YMIN, self.YMAX), + 'Z = %.2f to %.2f' % (self.ZMIN, self.ZMAX), + ) + print('self.default_vox_size_X', self.default_vox_size_X) + print('self.default_vox_size_Y', self.default_vox_size_Y) + print('self.default_vox_size_Z', self.default_vox_size_Z) + assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Y)) + assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Z)) + + def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False): + # xyz is B x N x 3, in ref coordinates + # transforms ref coordinates into mem coordinates + B, N, C = list(xyz.shape) + device = xyz.device + assert(C==3) + mem_T_ref = self.get_mem_T_ref(B, Z, Y, X, assert_cube=assert_cube, device=device) + xyz = utils.geom.apply_4x4(mem_T_ref, xyz) + return xyz + + def Mem2Ref(self, xyz_mem, Z, Y, X, assert_cube=False): + # xyz is B x N x 3, in mem coordinates + # transforms mem coordinates into ref coordinates + B, N, C = list(xyz_mem.shape) + ref_T_mem = self.get_ref_T_mem(B, Z, Y, X, assert_cube=assert_cube, device=xyz_mem.device) + xyz_ref = utils.geom.apply_4x4(ref_T_mem, xyz_mem) + return xyz_ref + + def get_mem_T_ref(self, B, Z, Y, X, assert_cube=False, device='cuda'): + vox_size_X = (self.XMAX-self.XMIN)/float(X) + vox_size_Y = (self.YMAX-self.YMIN)/float(Y) + vox_size_Z = (self.ZMAX-self.ZMIN)/float(Z) + + if assert_cube: + if (not np.isclose(vox_size_X, vox_size_Y)) or (not np.isclose(vox_size_X, vox_size_Z)): + print('Z, Y, X', Z, Y, X) + print('bounds for this iter:', + 'X = %.2f to %.2f' % (self.XMIN, self.XMAX), + 'Y = %.2f to %.2f' % (self.YMIN, self.YMAX), + 'Z = %.2f to %.2f' % (self.ZMIN, self.ZMAX), + ) + print('vox_size_X', vox_size_X) + print('vox_size_Y', vox_size_Y) + print('vox_size_Z', vox_size_Z) + assert(np.isclose(vox_size_X, vox_size_Y)) + assert(np.isclose(vox_size_X, vox_size_Z)) + + # translation + # (this makes the left edge of the leftmost voxel correspond to XMIN) + center_T_ref = utils.geom.eye_4x4(B, device=device) + center_T_ref[:,0,3] = -self.XMIN-vox_size_X/2.0 + center_T_ref[:,1,3] = -self.YMIN-vox_size_Y/2.0 + center_T_ref[:,2,3] = -self.ZMIN-vox_size_Z/2.0 + + # scaling + # (this makes the right edge of the rightmost voxel correspond to XMAX) + mem_T_center = utils.geom.eye_4x4(B, device=device) + mem_T_center[:,0,0] = 1./vox_size_X + mem_T_center[:,1,1] = 1./vox_size_Y + mem_T_center[:,2,2] = 1./vox_size_Z + mem_T_ref = utils.geom.matmul2(mem_T_center, center_T_ref) + + return mem_T_ref + + def get_ref_T_mem(self, B, Z, Y, X, assert_cube=False, device='cuda'): + mem_T_ref = self.get_mem_T_ref(B, Z, Y, X, assert_cube=assert_cube, device=device) + # note safe_inverse is inapplicable here, + # since the transform is nonrigid + ref_T_mem = mem_T_ref.inverse() + return ref_T_mem + + def get_inbounds(self, xyz, Z, Y, X, already_mem=False, padding=0.0, assert_cube=False): + # xyz is B x N x 3 + # padding should be 0 unless you are trying to account for some later cropping + if not already_mem: + xyz = self.Ref2Mem(xyz, Z, Y, X, assert_cube=assert_cube) + + x = xyz[:,:,0] + y = xyz[:,:,1] + z = xyz[:,:,2] + + x_valid = ((x-padding)>-0.5).byte() & ((x+padding)-0.5).byte() & ((y+padding)-0.5).byte() & ((z+padding) 0: + # only take points that are already near centers + xyz_round = torch.round(xyz) # B, N, 3 + dist = torch.norm(xyz_round - xyz, dim=2) + mask[dist > clean_eps] = 0 + + # set the invalid guys to zero + # we then need to zero out 0,0,0 + # (this method seems a bit clumsy) + x = x*mask + y = y*mask + z = z*mask + + x = torch.round(x) + y = torch.round(y) + z = torch.round(z) + x = torch.clamp(x, 0, X-1).int() + y = torch.clamp(y, 0, Y-1).int() + z = torch.clamp(z, 0, Z-1).int() + + x = x.view(B*N) + y = y.view(B*N) + z = z.view(B*N) + + dim3 = X + dim2 = X * Y + dim1 = X * Y * Z + + base = torch.arange(0, B, dtype=torch.int32, device=xyz.device)*dim1 + base = torch.reshape(base, [B, 1]).repeat([1, N]).view(B*N) + + vox_inds = base + z * dim2 + y * dim3 + x + voxels = torch.zeros(B*Z*Y*X, device=xyz.device).float() + voxels[vox_inds.long()] = 1.0 + # zero out the singularity + voxels[base.long()] = 0.0 + voxels = voxels.reshape(B, 1, Z, Y, X) + # B x 1 x Z x Y x X + return voxels + + def get_feat_occupancy(self, xyz, feat, Z, Y, X, clean_eps=0, xyz_zero=None): + # xyz is B x N x 3 and in mem coords + # feat is B x N x D + # we want to fill a voxel tensor with 1's at these inds + B, N, C = list(xyz.shape) + B2, N2, D2 = list(feat.shape) + assert(C==3) + assert(B==B2) + assert(N==N2) + + # these papers say simple 1/0 occupancy is ok: + # http://openaccess.thecvf.com/content_cvpr_2018/papers/Yang_PIXOR_Real-Time_3d_CVPR_2018_paper.pdf + # http://openaccess.thecvf.com/content_cvpr_2018/papers/Luo_Fast_and_Furious_CVPR_2018_paper.pdf + # cont fusion says they do 8-neighbor interp + # voxelnet does occupancy but with a bit of randomness in terms of the reflectance value i think + + inbounds = self.get_inbounds(xyz, Z, Y, X, already_mem=True) + x, y, z = xyz[:,:,0], xyz[:,:,1], xyz[:,:,2] + mask = torch.zeros_like(x) + mask[inbounds] = 1.0 + + if xyz_zero is not None: + # only take points that are beyond a thresh of zero + dist = torch.norm(xyz_zero-xyz, dim=2) + mask[dist < 0.1] = 0 + + if clean_eps > 0: + # only take points that are already near centers + xyz_round = torch.round(xyz) # B, N, 3 + dist = torch.norm(xyz_round - xyz, dim=2) + mask[dist > clean_eps] = 0 + + # set the invalid guys to zero + # we then need to zero out 0,0,0 + # (this method seems a bit clumsy) + x = x*mask # B, N + y = y*mask + z = z*mask + feat = feat*mask.unsqueeze(-1) # B, N, D + + x = torch.round(x) + y = torch.round(y) + z = torch.round(z) + x = torch.clamp(x, 0, X-1).int() + y = torch.clamp(y, 0, Y-1).int() + z = torch.clamp(z, 0, Z-1).int() + + # permute point orders + perm = torch.randperm(N) + x = x[:, perm] + y = y[:, perm] + z = z[:, perm] + feat = feat[:, perm] + + x = x.view(B*N) + y = y.view(B*N) + z = z.view(B*N) + feat = feat.view(B*N, -1) + + dim3 = X + dim2 = X * Y + dim1 = X * Y * Z + + base = torch.arange(0, B, dtype=torch.int32, device=xyz.device)*dim1 + base = torch.reshape(base, [B, 1]).repeat([1, N]).view(B*N) + + vox_inds = base + z * dim2 + y * dim3 + x + feat_voxels = torch.zeros((B*Z*Y*X, D2), device=xyz.device).float() + feat_voxels[vox_inds.long()] = feat + # zero out the singularity + feat_voxels[base.long()] = 0.0 + feat_voxels = feat_voxels.reshape(B, Z, Y, X, D2).permute(0, 4, 1, 2, 3) + # B x C x Z x Y x X + return feat_voxels + + def unproject_image_to_mem(self, rgb_camB, pixB_T_camA, camB_T_camA, Z, Y, X, assert_cube=False, xyz_camA=None): + # rgb_camB is B x C x H x W + # pixB_T_camA is B x 4 x 4 + + # rgb lives in B pixel coords + # we want everything in A memory coords + + # this puts each C-dim pixel in the rgb_camB + # along a ray in the voxelgrid + B, C, H, W = list(rgb_camB.shape) + + if xyz_camA is None: + xyz_memA = utils.basic.gridcloud3d(B, Z, Y, X, norm=False, device=pixB_T_camA.device) + xyz_camA = self.Mem2Ref(xyz_memA, Z, Y, X, assert_cube=assert_cube) + + xyz_camB = utils.geom.apply_4x4(camB_T_camA, xyz_camA) + z = xyz_camB[:,:,2] + + xyz_pixB = utils.geom.apply_4x4(pixB_T_camA, xyz_camA) + normalizer = torch.unsqueeze(xyz_pixB[:,:,2], 2) + EPS=1e-6 + # z = xyz_pixB[:,:,2] + xy_pixB = xyz_pixB[:,:,:2]/torch.clamp(normalizer, min=EPS) + # this is B x N x 2 + # this is the (floating point) pixel coordinate of each voxel + x, y = xy_pixB[:,:,0], xy_pixB[:,:,1] + # these are B x N + + x_valid = (x>-0.5).bool() & (x-0.5).bool() & (y0.0).bool() + valid_mem = (x_valid & y_valid & z_valid).reshape(B, 1, Z, Y, X).float() + + if (0): + # handwritten version + values = torch.zeros([B, C, Z*Y*X], dtype=torch.float32) + for b in list(range(B)): + values[b] = utils.samp.bilinear_sample_single(rgb_camB[b], x_pixB[b], y_pixB[b]) + else: + # native pytorch version + y_pixB, x_pixB = utils.basic.normalize_grid2d(y, x, H, W) + # since we want a 3d output, we need 5d tensors + z_pixB = torch.zeros_like(x) + xyz_pixB = torch.stack([x_pixB, y_pixB, z_pixB], axis=2) + rgb_camB = rgb_camB.unsqueeze(2) + xyz_pixB = torch.reshape(xyz_pixB, [B, Z, Y, X, 3]) + values = F.grid_sample(rgb_camB, xyz_pixB, align_corners=False) + + values = torch.reshape(values, (B, C, Z, Y, X)) + values = values * valid_mem + return values + + def warp_tiled_to_mem(self, rgb_tileB, pixB_T_camA, camB_T_camA, Z, Y, X, DMIN, DMAX, assert_cube=False): + # rgb_tileB is B,C,D,H,W + # pixB_T_camA is B,4,4 + # camB_T_camA is B,4,4 + + # rgb_tileB lives in B pixel coords but it has been tiled across the Z dimension + # we want everything in A memory coords + + # this resamples the so that each C-dim pixel in rgb_tilB + # is put into its correct place in the voxelgrid + # (using the pinhole camera model) + + B, C, D, H, W = list(rgb_tileB.shape) + + xyz_memA = utils.basic.gridcloud3d(B, Z, Y, X, norm=False, device=pixB_T_camA.device) + + xyz_camA = self.Mem2Ref(xyz_memA, Z, Y, X, assert_cube=assert_cube) + + xyz_camB = utils.geom.apply_4x4(camB_T_camA, xyz_camA) + z_camB = xyz_camB[:,:,2] + + # rgb_tileB has depth=DMIN in tile 0, and depth=DMAX in tile D-1 + z_tileB = (D-1.0) * (z_camB-float(DMIN)) / float(DMAX-DMIN) + + xyz_pixB = utils.geom.apply_4x4(pixB_T_camA, xyz_camA) + normalizer = torch.unsqueeze(xyz_pixB[:,:,2], 2) + EPS=1e-6 + # z = xyz_pixB[:,:,2] + xy_pixB = xyz_pixB[:,:,:2]/torch.clamp(normalizer, min=EPS) + # this is B x N x 2 + # this is the (floating point) pixel coordinate of each voxel + x, y = xy_pixB[:,:,0], xy_pixB[:,:,1] + # these are B x N + + x_valid = (x>-0.5).bool() & (x-0.5).bool() & (y0.0).bool() + valid_mem = (x_valid & y_valid & z_valid).reshape(B, 1, Z, Y, X).float() + + z_tileB, y_pixB, x_pixB = utils.basic.normalize_grid3d(z_tileB, y, x, D, H, W) + xyz_pixB = torch.stack([x_pixB, y_pixB, z_tileB], axis=2) + xyz_pixB = torch.reshape(xyz_pixB, [B, Z, Y, X, 3]) + values = F.grid_sample(rgb_tileB, xyz_pixB, align_corners=False) + + values = torch.reshape(values, (B, C, Z, Y, X)) + values = values * valid_mem + return values + + + def apply_mem_T_ref_to_lrtlist(self, lrtlist_cam, Z, Y, X, assert_cube=False): + # lrtlist is B x N x 19, in cam coordinates + # transforms them into mem coordinates, including a scale change for the lengths + B, N, C = list(lrtlist_cam.shape) + assert(C==19) + mem_T_cam = self.get_mem_T_ref(B, Z, Y, X, assert_cube=assert_cube, device=lrtlist_cam.device) + + def xyz2circles(self, xyz, radius, Z, Y, X, soft=True, already_mem=True, also_offset=False, grid=None): + # xyz is B x N x 3 + # radius is B x N or broadcastably so + # output is B x N x Z x Y x X + B, N, D = list(xyz.shape) + assert(D==3) + if not already_mem: + xyz = self.Ref2Mem(xyz, Z, Y, X) + + if grid is None: + grid_z, grid_y, grid_x = utils.basic.meshgrid3d(B, Z, Y, X, stack=False, norm=False, device=xyz.device) + # note the default stack is on -1 + grid = torch.stack([grid_x, grid_y, grid_z], dim=1) + # this is B x 3 x Z x Y x X + + xyz = xyz.reshape(B, N, 3, 1, 1, 1) + grid = grid.reshape(B, 1, 3, Z, Y, X) + # this is B x N x Z x Y x X + + # round the xyzs, so that at least one value matches the grid perfectly, + # and we get a value of 1 there (since exp(0)==1) + xyz = xyz.round() + + if torch.is_tensor(radius): + radius = radius.clamp(min=0.01) + + if soft: + off = grid - xyz # B,N,3,Z,Y,X + # interpret radius as sigma + dist_grid = torch.sum(off**2, dim=2, keepdim=False) + # this is B x N x Z x Y x X + if torch.is_tensor(radius): + radius = radius.reshape(B, N, 1, 1, 1) + mask = torch.exp(-dist_grid/(2*radius*radius)) + # zero out near zero + mask[mask < 0.001] = 0.0 + # h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + # h[h < np.finfo(h.dtype).eps * h.max()] = 0 + # return h + if also_offset: + return mask, off + else: + return mask + else: + assert(False) # something is wrong with this. come back later to debug + + dist_grid = torch.norm(grid - xyz, dim=2, keepdim=False) + # this is 0 at/near the xyz, and increases by 1 for each voxel away + + radius = radius.reshape(B, N, 1, 1, 1) + + within_radius_mask = (dist_grid < radius).float() + within_radius_mask = torch.sum(within_radius_mask, dim=1, keepdim=True).clamp(0, 1) + return within_radius_mask + + def xyz2circles_bev(self, xyz, radius, Z, Y, X, already_mem=True, also_offset=False): + # xyz is B x N x 3 + # radius is B x N or broadcastably so + # output is B x N x Z x Y x X + B, N, D = list(xyz.shape) + assert(D==3) + if not already_mem: + xyz = self.Ref2Mem(xyz, Z, Y, X) + + xz = torch.stack([xyz[:,:,0], xyz[:,:,2]], dim=2) + + grid_z, grid_x = utils.basic.meshgrid2d(B, Z, X, stack=False, norm=False, device=xyz.device) + # note the default stack is on -1 + grid = torch.stack([grid_x, grid_z], dim=1) + # this is B x 2 x Z x X + + xz = xz.reshape(B, N, 2, 1, 1) + grid = grid.reshape(B, 1, 2, Z, X) + # these are ready to broadcast to B x N x Z x X + + # round the points, so that at least one value matches the grid perfectly, + # and we get a value of 1 there (since exp(0)==1) + xz = xz.round() + + if torch.is_tensor(radius): + radius = radius.clamp(min=0.01) + + off = grid - xz # B,N,2,Z,X + # interpret radius as sigma + dist_grid = torch.sum(off**2, dim=2, keepdim=False) + # this is B x N x Z x X + if torch.is_tensor(radius): + radius = radius.reshape(B, N, 1, 1, 1) + mask = torch.exp(-dist_grid/(2*radius*radius)) + # zero out near zero + mask[mask < 0.001] = 0.0 + + # add a Y dim + mask = mask.unsqueeze(-2) + off = off.unsqueeze(-2) + # # B,N,2,Z,1,X + + if also_offset: + return mask, off + else: + return mask + diff --git a/dynamic_predictor/dust3r/datasets/waymo.py b/dynamic_predictor/dust3r/datasets/waymo.py new file mode 100644 index 0000000000000000000000000000000000000000..8a782ff8de4c676067b1f3123706473776081a92 --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/waymo.py @@ -0,0 +1,100 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed WayMo +# dataset at https://github.com/waymo-research/waymo-open-dataset +# See datasets_preprocess/preprocess_waymo.py +# -------------------------------------------------------- +import sys +sys.path.append('.') +import os +import os.path as osp +import numpy as np + +from dust3r.datasets.base.base_stereo_view_dataset import BaseStereoViewDataset +from dust3r.utils.image import imread_cv2 + + +class Waymo (BaseStereoViewDataset): + """ Dataset of outdoor street scenes, 5 images each time + """ + + def __init__(self, *args, ROOT, pairs_npz_name='waymo_pairs_video.npz', **kwargs): + self.ROOT = ROOT + self.pairs_npz_name = pairs_npz_name + super().__init__(*args, **kwargs) + self._load_data() + + def _load_data(self): + with np.load(osp.join(self.ROOT, self.pairs_npz_name)) as data: + self.scenes = data['scenes'] + self.frames = data['frames'] + self.inv_frames = {frame: i for i, frame in enumerate(data['frames'])} + self.pairs = data['pairs'] # (array of (scene_id, img1_id, img2_id) + assert self.pairs[:, 0].max() == len(self.scenes) - 1 + print(f'Loaded {self.get_stats()}') + + def __len__(self): + return len(self.pairs) + + def get_stats(self): + return f'{len(self)} pairs from {len(self.scenes)} scenes' + + def _get_views(self, pair_idx, resolution, rng): + seq, img1, img2 = self.pairs[pair_idx] + seq_path = osp.join(self.ROOT, self.scenes[seq]) + + views = [] + + for view_index in [img1, img2]: + impath = self.frames[view_index] + image = imread_cv2(osp.join(seq_path, impath + ".jpg")) + depthmap = imread_cv2(osp.join(seq_path, impath + ".exr")) + camera_params = np.load(osp.join(seq_path, impath + ".npz")) + + intrinsics = np.float32(camera_params['intrinsics']) + camera_pose = np.float32(camera_params['cam2world']) + + image, depthmap, intrinsics = self._crop_resize_if_necessary( + image, depthmap, intrinsics, resolution, rng, info=(seq_path, impath)) + + views.append(dict( + img=image, + depthmap=depthmap, + camera_pose=camera_pose, # cam2world + camera_intrinsics=intrinsics, + dataset='Waymo', + label=osp.relpath(seq_path, self.ROOT), + instance=impath)) + + return views + + +if __name__ == '__main__': + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = Waymo(split='train', ROOT="data/waymo_processed", resolution=512, aug_crop=16) + idxs = np.arange(0, len(dataset)-1, (len(dataset)-1)//10) + for idx in idxs: + views = dataset[idx] + assert len(views) == 2 + print(idx, view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx * 255, (1 - idx) * 255, 0), + image=colors, + cam_size=cam_size) + os.makedirs('./tmp/waymo', exist_ok=True) + path = f"./tmp/waymo/waymo_scene_{idx}.glb" + viz.save_glb(path) diff --git a/dynamic_predictor/dust3r/datasets/wildrgbd.py b/dynamic_predictor/dust3r/datasets/wildrgbd.py new file mode 100644 index 0000000000000000000000000000000000000000..c41dd0b78402bf8ff1e62c6a50de338aa916e0af --- /dev/null +++ b/dynamic_predictor/dust3r/datasets/wildrgbd.py @@ -0,0 +1,67 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Dataloader for preprocessed WildRGB-D +# dataset at https://github.com/wildrgbd/wildrgbd/ +# See datasets_preprocess/preprocess_wildrgbd.py +# -------------------------------------------------------- +import os.path as osp + +import cv2 +import numpy as np + +from dust3r.datasets.co3d import Co3d +from dust3r.utils.image import imread_cv2 + + +class WildRGBD(Co3d): + def __init__(self, mask_bg=True, *args, ROOT, **kwargs): + super().__init__(mask_bg, *args, ROOT=ROOT, **kwargs) + self.dataset_label = 'WildRGBD' + + def _get_metadatapath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'metadata', f'{view_idx:0>5d}.npz') + + def _get_impath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'rgb', f'{view_idx:0>5d}.jpg') + + def _get_depthpath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'depth', f'{view_idx:0>5d}.png') + + def _get_maskpath(self, obj, instance, view_idx): + return osp.join(self.ROOT, obj, instance, 'masks', f'{view_idx:0>5d}.png') + + def _read_depthmap(self, depthpath, input_metadata): + # We store depths in the depth scale of 1000. + # That is, when we load depth image and divide by 1000, we could get depth in meters. + depthmap = imread_cv2(depthpath, cv2.IMREAD_UNCHANGED) + depthmap = depthmap.astype(np.float32) / 1000.0 + return depthmap + + +if __name__ == "__main__": + from dust3r.datasets.base.base_stereo_view_dataset import view_name + from dust3r.viz import SceneViz, auto_cam_size + from dust3r.utils.image import rgb + + dataset = WildRGBD(split='train', ROOT="data/wildrgbd_processed", resolution=224, aug_crop=16) + + for idx in np.random.permutation(len(dataset)): + views = dataset[idx] + assert len(views) == 2 + print(view_name(views[0]), view_name(views[1])) + viz = SceneViz() + poses = [views[view_idx]['camera_pose'] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.001) + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'] + valid_mask = views[view_idx]['valid_mask'] + colors = rgb(views[view_idx]['img']) + viz.add_pointcloud(pts3d, colors, valid_mask) + viz.add_camera(pose_c2w=views[view_idx]['camera_pose'], + focal=views[view_idx]['camera_intrinsics'][0, 0], + color=(idx * 255, (1 - idx) * 255, 0), + image=colors, + cam_size=cam_size) + viz.show() diff --git a/dynamic_predictor/dust3r/depth_eval.py b/dynamic_predictor/dust3r/depth_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..6ce1596dc403b0370958e2d32a3bd2d8333a6af4 --- /dev/null +++ b/dynamic_predictor/dust3r/depth_eval.py @@ -0,0 +1,337 @@ +import torch +import numpy as np +import cv2 +import glob +from pathlib import Path +from tqdm import tqdm +from dust3r.image_pairs import make_pairs +from dust3r.inference import inference +from dust3r.utils.image import load_images, rgb, enlarge_seg_masks +from copy import deepcopy +from scipy.optimize import minimize +import os +from collections import defaultdict +import dust3r.eval_metadata +from dust3r.eval_metadata import dataset_metadata + +def eval_mono_depth_estimation(args, model, device): + metadata = dataset_metadata.get(args.eval_dataset) + if metadata is None: + raise ValueError(f"Unknown dataset: {args.eval_dataset}") + + img_path = metadata.get('img_path') + if 'img_path_func' in metadata: + img_path = metadata['img_path_func'](args) + + process_func = metadata.get('process_func') + if process_func is None: + raise ValueError(f"No processing function defined for dataset: {args.eval_dataset}") + + for filelist, save_dir in process_func(args, img_path): + Path(save_dir).mkdir(parents=True, exist_ok=True) + eval_mono_depth(args, model, device, filelist, save_dir=save_dir) + + +def eval_mono_depth(args, model, device, filelist, save_dir=None): + model.eval() + load_img_size = 512 + for file in tqdm(filelist): + # construct the "image pair" for the single image + file = [file] + imgs = load_images(file, size=load_img_size, verbose=False, crop= not args.no_crop) + imgs = [imgs[0], deepcopy(imgs[0])] + imgs[1]['idx'] = 1 + + pairs = make_pairs(imgs, symmetrize=True, prefilter=None) + output = inference(pairs, model, device, batch_size=1, verbose=False) + depth_map = output['pred1']['pts3d'][...,-1].mean(dim=0) + + if save_dir is not None: + #save the depth map to the save_dir as npy + np.save(f"{save_dir}/{file[0].split('/')[-1].replace('.png','depth.npy')}", depth_map.cpu().numpy()) + # also save the png + depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min()) + depth_map = (depth_map * 255).cpu().numpy().astype(np.uint8) + cv2.imwrite(f"{save_dir}/{file[0].split('/')[-1].replace('.png','depth.png')}", depth_map) + + + +## used for calculating the depth evaluation metrics + + +def group_by_directory(pathes, idx=-1): + """ + Groups the file paths based on the second-to-last directory in their paths. + + Parameters: + - pathes (list): List of file paths. + + Returns: + - dict: A dictionary where keys are the second-to-last directory names and values are lists of file paths. + """ + grouped_pathes = defaultdict(list) + + for path in pathes: + # Extract the second-to-last directory + dir_name = os.path.dirname(path).split('/')[idx] + grouped_pathes[dir_name].append(path) + + return grouped_pathes + + +def depth2disparity(depth, return_mask=False): + if isinstance(depth, torch.Tensor): + disparity = torch.zeros_like(depth) + elif isinstance(depth, np.ndarray): + disparity = np.zeros_like(depth) + non_negtive_mask = depth > 0 + disparity[non_negtive_mask] = 1.0 / depth[non_negtive_mask] + if return_mask: + return disparity, non_negtive_mask + else: + return disparity + +def absolute_error_loss(params, predicted_depth, ground_truth_depth): + s, t = params + + predicted_aligned = s * predicted_depth + t + + abs_error = np.abs(predicted_aligned - ground_truth_depth) + return np.sum(abs_error) + +def absolute_value_scaling(predicted_depth, ground_truth_depth, s=1, t=0): + predicted_depth_np = predicted_depth.cpu().numpy().reshape(-1) + ground_truth_depth_np = ground_truth_depth.cpu().numpy().reshape(-1) + + initial_params = [s, t] # s = 1, t = 0 + + result = minimize(absolute_error_loss, initial_params, args=(predicted_depth_np, ground_truth_depth_np)) + + s, t = result.x + return s, t + +def absolute_value_scaling2(predicted_depth, ground_truth_depth, s_init=1.0, t_init=0.0, lr=1e-4, max_iters=1000, tol=1e-6): + # Initialize s and t as torch tensors with requires_grad=True + s = torch.tensor([s_init], requires_grad=True, device=predicted_depth.device, dtype=predicted_depth.dtype) + t = torch.tensor([t_init], requires_grad=True, device=predicted_depth.device, dtype=predicted_depth.dtype) + + optimizer = torch.optim.Adam([s, t], lr=lr) + + prev_loss = None + + for i in range(max_iters): + optimizer.zero_grad() + + # Compute predicted aligned depth + predicted_aligned = s * predicted_depth + t + + # Compute absolute error + abs_error = torch.abs(predicted_aligned - ground_truth_depth) + + # Compute loss + loss = torch.sum(abs_error) + + # Backpropagate + loss.backward() + + # Update parameters + optimizer.step() + + # Check convergence + if prev_loss is not None and torch.abs(prev_loss - loss) < tol: + break + + prev_loss = loss.item() + + return s.detach().item(), t.detach().item() + +def depth_evaluation(predicted_depth_original, ground_truth_depth_original, max_depth=80, custom_mask=None, post_clip_min=None, post_clip_max=None, pre_clip_min=None, pre_clip_max=None, + align_with_lstsq=False, align_with_lad=False, align_with_lad2=False, lr=1e-4, max_iters=1000, use_gpu=False, align_with_scale=False, + disp_input=False): + """ + Evaluate the depth map using various metrics and return a depth error parity map, with an option for least squares alignment. + + Args: + predicted_depth (numpy.ndarray or torch.Tensor): The predicted depth map. + ground_truth_depth (numpy.ndarray or torch.Tensor): The ground truth depth map. + max_depth (float): The maximum depth value to consider. Default is 80 meters. + align_with_lstsq (bool): If True, perform least squares alignment of the predicted depth with ground truth. + + Returns: + dict: A dictionary containing the evaluation metrics. + torch.Tensor: The depth error parity map. + """ + + if isinstance(predicted_depth_original, np.ndarray): + predicted_depth_original = torch.from_numpy(predicted_depth_original) + if isinstance(ground_truth_depth_original, np.ndarray): + ground_truth_depth_original = torch.from_numpy(ground_truth_depth_original) + if custom_mask is not None and isinstance(custom_mask, np.ndarray): + custom_mask = torch.from_numpy(custom_mask) + + # if the dimension is 3, flatten to 2d along the batch dimension + if predicted_depth_original.dim() == 3: + _, h, w = predicted_depth_original.shape + predicted_depth_original = predicted_depth_original.view(-1, w) + ground_truth_depth_original = ground_truth_depth_original.view(-1, w) + if custom_mask is not None: + custom_mask = custom_mask.view(-1, w) + + # put to device + if use_gpu: + predicted_depth_original = predicted_depth_original.cuda() + ground_truth_depth_original = ground_truth_depth_original.cuda() + + # Filter out depths greater than max_depth + if max_depth is not None: + mask = (ground_truth_depth_original > 0) & (ground_truth_depth_original < max_depth) + else: + mask = (ground_truth_depth_original > 0) + + + predicted_depth = predicted_depth_original[mask] + ground_truth_depth = ground_truth_depth_original[mask] + + # Clip the depth values + if pre_clip_min is not None: + predicted_depth = torch.clamp(predicted_depth, min=pre_clip_min) + if pre_clip_max is not None: + predicted_depth = torch.clamp(predicted_depth, max=pre_clip_max) + + if disp_input: # align the pred to gt in the disparity space + real_gt = ground_truth_depth.clone() + ground_truth_depth = 1 / (ground_truth_depth + 1e-8) + + # various alignment methods + if align_with_lstsq: + # Convert to numpy for lstsq + predicted_depth_np = predicted_depth.cpu().numpy().reshape(-1, 1) + ground_truth_depth_np = ground_truth_depth.cpu().numpy().reshape(-1, 1) + + # Add a column of ones for the shift term + A = np.hstack([predicted_depth_np, np.ones_like(predicted_depth_np)]) + + # Solve for scale (s) and shift (t) using least squares + result = np.linalg.lstsq(A, ground_truth_depth_np, rcond=None) + s, t = result[0][0], result[0][1] + + # convert to torch tensor + s = torch.tensor(s, device=predicted_depth_original.device) + t = torch.tensor(t, device=predicted_depth_original.device) + + # Apply scale and shift + predicted_depth = s * predicted_depth + t + elif align_with_lad: + s, t = absolute_value_scaling(predicted_depth, ground_truth_depth, s=torch.median(ground_truth_depth) / torch.median(predicted_depth)) + predicted_depth = s * predicted_depth + t + elif align_with_lad2: + s_init = (torch.median(ground_truth_depth) / torch.median(predicted_depth)).item() + s, t = absolute_value_scaling2(predicted_depth, ground_truth_depth, s_init=s_init, lr=lr, max_iters=max_iters) + predicted_depth = s * predicted_depth + t + elif align_with_scale: + # Compute initial scale factor 's' using the closed-form solution (L2 norm) + dot_pred_gt = torch.nanmean(ground_truth_depth) + dot_pred_pred = torch.nanmean(predicted_depth) + s = dot_pred_gt / dot_pred_pred + + # Iterative reweighted least squares using the Weiszfeld method + for _ in range(10): + # Compute residuals between scaled predictions and ground truth + residuals = s * predicted_depth - ground_truth_depth + abs_residuals = residuals.abs() + 1e-8 # Add small constant to avoid division by zero + + # Compute weights inversely proportional to the residuals + weights = 1.0 / abs_residuals + + # Update 's' using weighted sums + weighted_dot_pred_gt = torch.sum(weights * predicted_depth * ground_truth_depth) + weighted_dot_pred_pred = torch.sum(weights * predicted_depth ** 2) + s = weighted_dot_pred_gt / weighted_dot_pred_pred + + # Optionally clip 's' to prevent extreme scaling + s = s.clamp(min=1e-3) + + # Detach 's' if you want to stop gradients from flowing through it + s = s.detach() + + # Apply the scale factor to the predicted depth + predicted_depth = s * predicted_depth + + else: + # Align the predicted depth with the ground truth using median scaling + scale_factor = torch.median(ground_truth_depth) / torch.median(predicted_depth) + predicted_depth *= scale_factor + + if disp_input: + # convert back to depth + ground_truth_depth = real_gt + predicted_depth = depth2disparity(predicted_depth) + + # Clip the predicted depth values + if post_clip_min is not None: + predicted_depth = torch.clamp(predicted_depth, min=post_clip_min) + if post_clip_max is not None: + predicted_depth = torch.clamp(predicted_depth, max=post_clip_max) + + if custom_mask is not None: + assert custom_mask.shape == ground_truth_depth_original.shape + mask_within_mask = custom_mask.cpu()[mask] + predicted_depth = predicted_depth[mask_within_mask] + ground_truth_depth = ground_truth_depth[mask_within_mask] + + # Calculate the metrics + abs_rel = torch.mean(torch.abs(predicted_depth - ground_truth_depth) / ground_truth_depth).item() + sq_rel = torch.mean(((predicted_depth - ground_truth_depth) ** 2) / ground_truth_depth).item() + + # Correct RMSE calculation + rmse = torch.sqrt(torch.mean((predicted_depth - ground_truth_depth) ** 2)).item() + + # Clip the depth values to avoid log(0) + predicted_depth = torch.clamp(predicted_depth, min=1e-5) + log_rmse = torch.sqrt(torch.mean((torch.log(predicted_depth) - torch.log(ground_truth_depth)) ** 2)).item() + + # Calculate the accuracy thresholds + max_ratio = torch.maximum(predicted_depth / ground_truth_depth, ground_truth_depth / predicted_depth) + threshold_1 = torch.mean((max_ratio < 1.25).float()).item() + threshold_2 = torch.mean((max_ratio < 1.25 ** 2).float()).item() + threshold_3 = torch.mean((max_ratio < 1.25 ** 3).float()).item() + + # Compute the depth error parity map + if align_with_lstsq or align_with_lad or align_with_lad2: + predicted_depth_original = predicted_depth_original * s + t + if disp_input: predicted_depth_original = depth2disparity(predicted_depth_original) + depth_error_parity_map = torch.abs(predicted_depth_original - ground_truth_depth_original) / ground_truth_depth_original + elif align_with_scale: + predicted_depth_original = predicted_depth_original * s + if disp_input: predicted_depth_original = depth2disparity(predicted_depth_original) + depth_error_parity_map = torch.abs(predicted_depth_original - ground_truth_depth_original) / ground_truth_depth_original + else: + predicted_depth_original = predicted_depth_original * scale_factor + if disp_input: predicted_depth_original = depth2disparity(predicted_depth_original) + depth_error_parity_map = torch.abs(predicted_depth_original - ground_truth_depth_original) / ground_truth_depth_original + + # Reshape the depth_error_parity_map back to the original image size + depth_error_parity_map_full = torch.zeros_like(ground_truth_depth_original) + depth_error_parity_map_full = torch.where(mask, depth_error_parity_map, depth_error_parity_map_full) + + predict_depth_map_full = predicted_depth_original + + gt_depth_map_full = torch.zeros_like(ground_truth_depth_original) + gt_depth_map_full = torch.where(mask, ground_truth_depth_original, gt_depth_map_full) + + num_valid_pixels = torch.sum(mask).item() if custom_mask is None else torch.sum(mask_within_mask).item() + if num_valid_pixels == 0: + abs_rel, sq_rel, rmse, log_rmse, threshold_1, threshold_2, threshold_3 = 0, 0, 0, 0, 0, 0, 0 + + results = { + 'Abs Rel': abs_rel, + 'Sq Rel': sq_rel, + 'RMSE': rmse, + 'Log RMSE': log_rmse, + 'δ < 1.25': threshold_1, + 'δ < 1.25^2': threshold_2, + 'δ < 1.25^3': threshold_3, + 'valid_pixels': num_valid_pixels + } + + return results, depth_error_parity_map_full, predict_depth_map_full, gt_depth_map_full diff --git a/dynamic_predictor/dust3r/eval_metadata.py b/dynamic_predictor/dust3r/eval_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..e7205d4d0eaeb098a73c46a6d833f2b6a27712d0 --- /dev/null +++ b/dynamic_predictor/dust3r/eval_metadata.py @@ -0,0 +1,132 @@ +import os +import glob +from tqdm import tqdm + +# Define the merged dataset metadata dictionary +dataset_metadata = { + 'davis': { + 'img_path': "data/davis/DAVIS/JPEGImages/480p", + 'mask_path': "data/davis/DAVIS/masked_images/480p", + 'dir_path_func': lambda img_path, seq: os.path.join(img_path, seq), + 'gt_traj_func': lambda img_path, anno_path, seq: None, + 'traj_format': None, + 'seq_list': ["blackswan", "camel", "car-shadow", "dog", "horsejump-high", "motocross-jump", "parkour", "soapbox"], + 'full_seq': False, + 'mask_path_seq_func': lambda mask_path, seq: os.path.join(mask_path, seq), + 'skip_condition': None, + 'process_func': None, # Not used in mono depth estimation + }, + 'kitti': { + 'img_path': "data/kitti/depth_selection/val_selection_cropped/image_gathered", # Default path + 'mask_path': None, + 'dir_path_func': lambda img_path, seq: os.path.join(img_path, seq), + 'gt_traj_func': lambda img_path, anno_path, seq: None, + 'traj_format': None, + 'seq_list': None, + 'full_seq': True, + 'mask_path_seq_func': lambda mask_path, seq: None, + 'skip_condition': None, + 'process_func': lambda args, img_path: process_kitti(args, img_path), + }, + 'bonn': { + 'img_path': "data/bonn/rgbd_bonn_dataset", + 'mask_path': None, + 'dir_path_func': lambda img_path, seq: os.path.join(img_path, f'rgbd_bonn_{seq}', 'rgb_110'), + 'gt_traj_func': lambda img_path, anno_path, seq: os.path.join(img_path, f'rgbd_bonn_{seq}', 'groundtruth_110.txt'), + 'traj_format': 'tum', + 'seq_list': ["balloon2", "crowd2", "crowd3", "person_tracking2", "synchronous"], + 'full_seq': False, + 'mask_path_seq_func': lambda mask_path, seq: None, + 'skip_condition': None, + 'process_func': lambda args, img_path: process_bonn(args, img_path), + }, + 'nyu': { + 'img_path': "data/nyu-v2/val/nyu_images", + 'mask_path': None, + 'process_func': lambda args, img_path: process_nyu(args, img_path), + }, + 'scannet': { + 'img_path': "data/scannetv2", + 'mask_path': None, + 'dir_path_func': lambda img_path, seq: os.path.join(img_path, seq, 'color_90'), + 'gt_traj_func': lambda img_path, anno_path, seq: os.path.join(img_path, seq, 'pose_90.txt'), + 'traj_format': 'replica', + 'seq_list': None, + 'full_seq': True, + 'mask_path_seq_func': lambda mask_path, seq: None, + 'skip_condition': lambda save_dir, seq: os.path.exists(os.path.join(save_dir, seq)), + 'process_func': lambda args, img_path: process_scannet(args, img_path), + }, + 'tum': { + 'img_path': "data/tum", + 'mask_path': None, + 'dir_path_func': lambda img_path, seq: os.path.join(img_path, seq, 'rgb_90'), + 'gt_traj_func': lambda img_path, anno_path, seq: os.path.join(img_path, seq, 'groundtruth_90.txt'), + 'traj_format': 'tum', + 'seq_list': None, + 'full_seq': True, + 'mask_path_seq_func': lambda mask_path, seq: None, + 'skip_condition': None, + 'process_func': None, + }, + 'sintel': { + 'img_path': "data/sintel/training/final", + 'anno_path': "data/sintel/training/camdata_left", + 'mask_path': None, + 'dir_path_func': lambda img_path, seq: os.path.join(img_path, seq), + 'gt_traj_func': lambda img_path, anno_path, seq: os.path.join(anno_path, seq), + 'traj_format': None, + 'seq_list': ["alley_2", "ambush_4", "ambush_5", "ambush_6", "cave_2", "cave_4", "market_2", + "market_5", "market_6", "shaman_3", "sleeping_1", "sleeping_2", "temple_2", "temple_3"], + 'full_seq': False, + 'mask_path_seq_func': lambda mask_path, seq: None, + 'skip_condition': None, + 'process_func': lambda args, img_path: process_sintel(args, img_path), + }, +} + +# Define processing functions for each dataset +def process_kitti(args, img_path): + for dir in tqdm(sorted(glob.glob(f'{img_path}/*'))): + filelist = sorted(glob.glob(f'{dir}/*.png')) + save_dir = f'{args.output_dir}/{os.path.basename(dir)}' + yield filelist, save_dir + +def process_bonn(args, img_path): + if args.full_seq: + for dir in tqdm(sorted(glob.glob(f'{img_path}/*/'))): + filelist = sorted(glob.glob(f'{dir}/rgb/*.png')) + save_dir = f'{args.output_dir}/{os.path.basename(os.path.dirname(dir))}' + yield filelist, save_dir + else: + seq_list = ["balloon2", "crowd2", "crowd3", "person_tracking2", "synchronous"] if args.seq_list is None else args.seq_list + for seq in tqdm(seq_list): + filelist = sorted(glob.glob(f'{img_path}/rgbd_bonn_{seq}/rgb_110/*.png')) + save_dir = f'{args.output_dir}/{seq}' + yield filelist, save_dir + +def process_nyu(args, img_path): + filelist = sorted(glob.glob(f'{img_path}/*.png')) + save_dir = f'{args.output_dir}' + yield filelist, save_dir + +def process_scannet(args, img_path): + seq_list = sorted(glob.glob(f'{img_path}/*')) + for seq in tqdm(seq_list): + filelist = sorted(glob.glob(f'{seq}/color_90/*.jpg')) + save_dir = f'{args.output_dir}/{os.path.basename(seq)}' + yield filelist, save_dir + +def process_sintel(args, img_path): + if args.full_seq: + for dir in tqdm(sorted(glob.glob(f'{img_path}/*/'))): + filelist = sorted(glob.glob(f'{dir}/*.png')) + save_dir = f'{args.output_dir}/{os.path.basename(os.path.dirname(dir))}' + yield filelist, save_dir + else: + seq_list = ["alley_2", "ambush_4", "ambush_5", "ambush_6", "cave_2", "cave_4", "market_2", + "market_5", "market_6", "shaman_3", "sleeping_1", "sleeping_2", "temple_2", "temple_3"] + for seq in tqdm(seq_list): + filelist = sorted(glob.glob(f'{img_path}/{seq}/*.png')) + save_dir = f'{args.output_dir}/{seq}' + yield filelist, save_dir \ No newline at end of file diff --git a/dynamic_predictor/dust3r/heads/__init__.py b/dynamic_predictor/dust3r/heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed716e3adba60ee2c6ef2764597b16bf5189d239 --- /dev/null +++ b/dynamic_predictor/dust3r/heads/__init__.py @@ -0,0 +1,19 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# head factory +# -------------------------------------------------------- +from .linear_head import LinearPts3d +from .dpt_head import create_dpt_head + + +def head_factory(head_type, output_mode, net, has_conf=False, has_mask=False): + """" build a prediction head for the decoder + """ + if head_type == 'linear' and output_mode == 'pts3d': + return LinearPts3d(net, has_conf, has_mask) + elif head_type == 'dpt' and output_mode == 'pts3d': + return create_dpt_head(net, has_conf=has_conf, has_mask=has_mask) + else: + raise NotImplementedError(f"unexpected {head_type=} and {output_mode=}") diff --git a/dynamic_predictor/dust3r/heads/dpt_head.py b/dynamic_predictor/dust3r/heads/dpt_head.py new file mode 100644 index 0000000000000000000000000000000000000000..bf42670dcd84fb3cac02d022121861e723296995 --- /dev/null +++ b/dynamic_predictor/dust3r/heads/dpt_head.py @@ -0,0 +1,377 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# dpt head implementation for DUST3R +# Downstream heads assume inputs of size B x N x C (where N is the number of tokens) ; +# or if it takes as input the output at every layer, the attribute return_all_layers should be set to True +# the forward function also takes as input a dictionnary img_info with key "height" and "width" +# for PixelwiseTask, the output will be of dimension B x num_channels x H x W +# -------------------------------------------------------- +from einops import rearrange +from typing import List +import torch +import torch.nn as nn +from dust3r.heads.postprocess import postprocess, mask_postprocess +import dust3r.utils.path_to_croco # noqa: F401 +from models.dpt_block import DPTOutputAdapter # noqa + + +class DPTOutputAdapter_mask(DPTOutputAdapter): + + def init(self, dim_tokens_enc=768): + super().init(dim_tokens_enc) + # these are duplicated weights + del self.act_1_postprocess + del self.act_2_postprocess + del self.act_3_postprocess + del self.act_4_postprocess + + def forward(self, layers: List[torch.Tensor], image_size=None): + # assert self.dim_tokens_enc is not None, 'Need to call init(dim_tokens_enc) function first' + # # H, W = input_info['image_size'] + # image_size = self.image_size if image_size is None else image_size + # H, W = image_size + # # Number of patches in height and width + # N_H = H // (self.stride_level * self.P_H) + # N_W = W // (self.stride_level * self.P_W) + + # # Hook decoder onto 4 layers from specified ViT layers + # layers = [encoder_tokens[hook] for hook in self.hooks] + # # [torch.Size([2, 576, 1024]), torch.Size([2, 576, 768]), torch.Size([2, 576, 768]), torch.Size([2, 576, 768])] + + # # Extract only task-relevant tokens and ignore global tokens. + # layers = [self.adapt_tokens(l) for l in layers] + # # [torch.Size([2, 576, 1024]), torch.Size([2, 576, 768]), torch.Size([2, 576, 768]), torch.Size([2, 576, 768])] + + # # Reshape tokens to spatial representation + # layers = [rearrange(l, 'b (nh nw) c -> b c nh nw', nh=N_H, nw=N_W) for l in layers] + # # [torch.Size([2, 96, 72, 128]), torch.Size([2, 192, 36, 64]), torch.Size([2, 384, 18, 32]), torch.Size([2, 768, 9, 16])] + + # feat_rearranged = layers.clone() + + # layers = [self.act_postprocess[idx](l) for idx, l in enumerate(layers)] + # [torch.Size([2, 256, 72, 128]), torch.Size([2, 256, 36, 64]), torch.Size([2, 256, 18, 32]), torch.Size([2, 256, 9, 16])] + + # Project layers to chosen feature dim + layers = [self.scratch.layer_rn[idx](l) for idx, l in enumerate(layers)] + # [torch.Size([2, 256, 72, 128]), torch.Size([2, 256, 36, 64]), torch.Size([2, 256, 18, 32]), torch.Size([2, 256, 9, 16])] + + + # Fuse layers using refinement stages + path_4 = self.scratch.refinenet4(layers[3])[:, :, :layers[2].shape[2], :layers[2].shape[3]] + # torch.Size([2, 256, 18, 32]) + + path_3 = self.scratch.refinenet3(path_4, layers[2]) + # torch.Size([2, 256, 36, 64]) + + path_2 = self.scratch.refinenet2(path_3, layers[1]) + path_1 = self.scratch.refinenet1(path_2, layers[0]) + + # Output head + out = self.head(path_1) + + return out + +class DPTOutputAdapter_mask_full(DPTOutputAdapter): + + def init(self, dim_tokens_enc=768): + super().init(dim_tokens_enc) + # these are duplicated weights + del self.act_1_postprocess + del self.act_2_postprocess + del self.act_3_postprocess + del self.act_4_postprocess + + def forward(self, encoder_tokens: List[torch.Tensor], image_size=None): + assert self.dim_tokens_enc is not None, 'Need to call init(dim_tokens_enc) function first' + # H, W = input_info['image_size'] + image_size = self.image_size if image_size is None else image_size + H, W = image_size + # Number of patches in height and width + N_H = H // (self.stride_level * self.P_H) + N_W = W // (self.stride_level * self.P_W) + + # Hook decoder onto 4 layers from specified ViT layers + layers = [encoder_tokens[hook] for hook in self.hooks] + + # Extract only task-relevant tokens and ignore global tokens. + # [torch.Size([2, 768, 1024]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768])] + layers = [self.adapt_tokens(l) for l in layers] + + # Reshape tokens to spatial representation [torch.Size([2, 768, 1024]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768])] + layers = [rearrange(l, 'b (nh nw) c -> b c nh nw', nh=N_H, nw=N_W) for l in layers] + # [torch.Size([2, 1024, 24, 32]), torch.Size([2, 768, 24, 32]), torch.Size([2, 768, 24, 32]), torch.Size([2, 768, 24, 32])] + + + layers = [self.act_postprocess[idx](l) for idx, l in enumerate(layers)] + # [torch.Size([2, 96, 96, 128]), torch.Size([2, 192, 48, 64]), torch.Size([2, 384, 24, 32]), torch.Size([2, 768, 12, 16])] + + # Project layers to chosen feature dim + layers = [self.scratch.layer_rn[idx](l) for idx, l in enumerate(layers)] + # [torch.Size([2, 256, 96, 128]), torch.Size([2, 256, 48, 64]), torch.Size([2, 256, 24, 32]), torch.Size([2, 256, 12, 16])] + + + # Fuse layers using refinement stages + path_4 = self.scratch.refinenet4(layers[3])[:, :, :layers[2].shape[2], :layers[2].shape[3]] + # torch.Size([2, 256, 18, 32]) + + path_3 = self.scratch.refinenet3(path_4, layers[2]) + # torch.Size([2, 256, 36, 64]) + + path_2 = self.scratch.refinenet2(path_3, layers[1]) + path_1 = self.scratch.refinenet1(path_2, layers[0]) + + # Output head + out = self.head(path_1) + + return out + +class DPTOutputAdapter_fix(DPTOutputAdapter): + """ + Adapt croco's DPTOutputAdapter implementation for dust3r: + remove duplicated weigths, and fix forward for dust3r + """ + + def init(self, dim_tokens_enc=768): + super().init(dim_tokens_enc) + # these are duplicated weights + del self.act_1_postprocess + del self.act_2_postprocess + del self.act_3_postprocess + del self.act_4_postprocess + + def forward(self, encoder_tokens: List[torch.Tensor], image_size=None): + assert self.dim_tokens_enc is not None, 'Need to call init(dim_tokens_enc) function first' + # H, W = input_info['image_size'] + image_size = self.image_size if image_size is None else image_size + H, W = image_size + # Number of patches in height and width + N_H = H // (self.stride_level * self.P_H) + N_W = W // (self.stride_level * self.P_W) + + # Hook decoder onto 4 layers from specified ViT layers + layers = [encoder_tokens[hook] for hook in self.hooks] + + # Extract only task-relevant tokens and ignore global tokens. + # [torch.Size([2, 768, 1024]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768])] + layers = [self.adapt_tokens(l) for l in layers] + + # Reshape tokens to spatial representation [torch.Size([2, 768, 1024]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768]), torch.Size([2, 768, 768])] + layers = [rearrange(l, 'b (nh nw) c -> b c nh nw', nh=N_H, nw=N_W) for l in layers] + # [torch.Size([2, 1024, 24, 32]), torch.Size([2, 768, 24, 32]), torch.Size([2, 768, 24, 32]), torch.Size([2, 768, 24, 32])] + + + layers = [self.act_postprocess[idx](l) for idx, l in enumerate(layers)] + # [torch.Size([2, 96, 96, 128]), torch.Size([2, 192, 48, 64]), torch.Size([2, 384, 24, 32]), torch.Size([2, 768, 12, 16])] + + # Project layers to chosen feature dim + layers = [self.scratch.layer_rn[idx](l) for idx, l in enumerate(layers)] + # [torch.Size([2, 256, 96, 128]), torch.Size([2, 256, 48, 64]), torch.Size([2, 256, 24, 32]), torch.Size([2, 256, 12, 16])] + + + # Fuse layers using refinement stages + path_4 = self.scratch.refinenet4(layers[3])[:, :, :layers[2].shape[2], :layers[2].shape[3]] + # torch.Size([2, 256, 18, 32]) + + path_3 = self.scratch.refinenet3(path_4, layers[2]) + # torch.Size([2, 256, 36, 64]) + + path_2 = self.scratch.refinenet2(path_3, layers[1]) + path_1 = self.scratch.refinenet1(path_2, layers[0]) + + # Output head + out = self.head(path_1) + + return out + + +class PixelwiseTaskWithDPT(nn.Module): + """ DPT module for dust3r, can return 3D points + confidence for all pixels""" + + def __init__(self, *, n_cls_token=0, hooks_idx=None, dim_tokens=None, + output_width_ratio=1, num_channels=1, postprocess=None, depth_mode=None, conf_mode=None, mask_mode=None, **kwargs): + super(PixelwiseTaskWithDPT, self).__init__() + self.return_all_layers = True # backbone needs to return all layers + self.postprocess = postprocess + self.depth_mode = depth_mode + self.conf_mode = conf_mode + self.mask_mode = mask_mode + + assert n_cls_token == 0, "Not implemented" + dpt_args = dict(output_width_ratio=output_width_ratio, + num_channels=num_channels, + **kwargs) + if hooks_idx is not None: + dpt_args.update(hooks=hooks_idx) + self.dpt = DPTOutputAdapter_fix(**dpt_args) + dpt_init_args = {} if dim_tokens is None else {'dim_tokens_enc': dim_tokens} + self.dpt.init(**dpt_init_args) + + def forward(self, x, img_info): + out = self.dpt(x, image_size=(img_info[0], img_info[1])) + if self.postprocess: + out = self.postprocess(out, self.depth_mode, self.conf_mode, self.mask_mode) + return out + +class PixelwiseTaskWithDPT_Mask_bce(nn.Module): + """ DPT module for dust3r, can return 3D points + confidence for all pixels""" + + def __init__(self, *, n_cls_token=0, hooks_idx=None, dim_tokens=None, + output_width_ratio=1, num_channels=1, postprocess=None, depth_mode=None, conf_mode=None, mask_mode=None, **kwargs): + super(PixelwiseTaskWithDPT_Mask_bce, self).__init__() + self.return_all_layers = True # backbone needs to return all layers + self.postprocess = postprocess + self.depth_mode = depth_mode + self.conf_mode = conf_mode + self.mask_mode = ('sigmoid', 0, 1) + + assert n_cls_token == 0, "Not implemented" + dpt_args = dict(output_width_ratio=output_width_ratio, + num_channels=num_channels, + **kwargs) + if hooks_idx is not None: + dpt_args.update(hooks=hooks_idx) + self.dpt = DPTOutputAdapter_mask(**dpt_args) + dpt_init_args = {} if dim_tokens is None else {'dim_tokens_enc': dim_tokens} + self.dpt.init(**dpt_init_args) + + def forward(self, x, img_info): + out = self.dpt(x, image_size=(img_info[0], img_info[1])) + if self.postprocess: + out = self.postprocess(out, self.depth_mode, self.conf_mode, self.mask_mode) + return out + +class PixelwiseTaskWithDPT_Mask(nn.Module): + """ DPT module for dust3r, can return 3D points + confidence for all pixels""" + + def __init__(self, *, n_cls_token=0, hooks_idx=None, dim_tokens=None, + output_width_ratio=1, num_channels=1, postprocess=None, depth_mode=None, conf_mode=None, mask_mode=None, **kwargs): + super(PixelwiseTaskWithDPT_Mask, self).__init__() + self.return_all_layers = True # backbone needs to return all layers + self.postprocess = postprocess + self.depth_mode = depth_mode + self.conf_mode = conf_mode + self.mask_mode = mask_mode + + assert n_cls_token == 0, "Not implemented" + dpt_args = dict(output_width_ratio=output_width_ratio, + num_channels=num_channels, + **kwargs) + if hooks_idx is not None: + dpt_args.update(hooks=hooks_idx) + self.dpt = DPTOutputAdapter_mask(**dpt_args) + dpt_init_args = {} if dim_tokens is None else {'dim_tokens_enc': dim_tokens} + self.dpt.init(**dpt_init_args) + + def forward(self, x, img_info): + out = self.dpt(x, image_size=(img_info[0], img_info[1])) + + return out + + +class PixelwiseTaskWithDPT_Mask_full(nn.Module): + """ DPT module for dust3r, can return 3D points + confidence for all pixels""" + + def __init__(self, *, n_cls_token=0, hooks_idx=None, dim_tokens=None, + output_width_ratio=1, num_channels=1, postprocess=None, depth_mode=None, conf_mode=None, mask_mode=None, **kwargs): + super(PixelwiseTaskWithDPT_Mask_full, self).__init__() + self.return_all_layers = True # backbone needs to return all layers + self.postprocess = postprocess + self.depth_mode = depth_mode + self.conf_mode = conf_mode + self.mask_mode = mask_mode + + assert n_cls_token == 0, "Not implemented" + dpt_args = dict(output_width_ratio=output_width_ratio, + num_channels=num_channels, + **kwargs) + if hooks_idx is not None: + dpt_args.update(hooks=hooks_idx) + self.dpt = DPTOutputAdapter_mask_full(**dpt_args) + dpt_init_args = {} if dim_tokens is None else {'dim_tokens_enc': dim_tokens} + self.dpt.init(**dpt_init_args) + + def forward(self, x, img_info): + out = self.dpt(x, image_size=(img_info[0], img_info[1])) + + return out + +def create_dpt_head(net, has_conf=False, has_mask=False, num_channels=1): + """ + return PixelwiseTaskWithDPT for given net params + """ + if has_mask: + assert net.dec_depth > 9 + l2 = net.dec_depth + feature_dim = 256 + last_dim = feature_dim//2 + ed = net.enc_embed_dim + dd = net.dec_embed_dim + + return PixelwiseTaskWithDPT(num_channels=num_channels, + feature_dim=feature_dim, + last_dim=last_dim, + hooks_idx=[0, l2*2//4, l2*3//4, l2], + dim_tokens=[ed, dd, dd, dd], + postprocess=mask_postprocess, + mask_mode=net.mask_mode, + head_type='semseg') + else: + assert net.dec_depth > 9 + l2 = net.dec_depth + feature_dim = 256 + last_dim = feature_dim//2 + out_nchan = 3 + ed = net.enc_embed_dim + dd = net.dec_embed_dim + return PixelwiseTaskWithDPT(num_channels=out_nchan + has_conf, + feature_dim=feature_dim, + last_dim=last_dim, + hooks_idx=[0, l2*2//4, l2*3//4, l2], + dim_tokens=[ed, dd, dd, dd], + postprocess=postprocess, + depth_mode=net.depth_mode, + conf_mode=net.conf_mode, + head_type='regression') + + +def create_dpt_head_mask(net): + """ + return PixelwiseTaskWithDPT for given net params + """ + assert net.dec_depth > 9 + l2 = net.dec_depth + feature_dim = 256 + last_dim = feature_dim//2 + ed = net.enc_embed_dim + dd = net.dec_embed_dim + + return PixelwiseTaskWithDPT_Mask(num_channels=2, + feature_dim=feature_dim, + last_dim=last_dim, + hooks_idx=[0, l2*2//4, l2*3//4, l2], + dim_tokens=[ed, dd, dd, dd], + postprocess=None, + mask_mode=None, + head_type='semseg') + +def create_dpt_head_mask_full(net): + """ + return PixelwiseTaskWithDPT for given net params + """ + assert net.dec_depth > 9 + l2 = net.dec_depth + feature_dim = 256 + last_dim = feature_dim//2 + ed = net.enc_embed_dim + dd = net.dec_embed_dim + + return PixelwiseTaskWithDPT_Mask_full(num_channels=2, + feature_dim=feature_dim, + last_dim=last_dim, + hooks_idx=[0, l2*2//4, l2*3//4, l2], + dim_tokens=[ed, dd, dd, dd], + postprocess=None, + mask_mode=None, + head_type='semseg') \ No newline at end of file diff --git a/dynamic_predictor/dust3r/heads/linear_head.py b/dynamic_predictor/dust3r/heads/linear_head.py new file mode 100644 index 0000000000000000000000000000000000000000..6b697f29eaa6f43fad0a3e27a8d9b8f1a602a833 --- /dev/null +++ b/dynamic_predictor/dust3r/heads/linear_head.py @@ -0,0 +1,41 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# linear head implementation for DUST3R +# -------------------------------------------------------- +import torch.nn as nn +import torch.nn.functional as F +from dust3r.heads.postprocess import postprocess + + +class LinearPts3d (nn.Module): + """ + Linear head for dust3r + Each token outputs: - 16x16 3D points (+ confidence) + """ + + def __init__(self, net, has_conf=False): + super().__init__() + self.patch_size = net.patch_embed.patch_size[0] + self.depth_mode = net.depth_mode + self.conf_mode = net.conf_mode + self.has_conf = has_conf + + self.proj = nn.Linear(net.dec_embed_dim, (3 + has_conf)*self.patch_size**2) + + def setup(self, croconet): + pass + + def forward(self, decout, img_shape): + H, W = img_shape + tokens = decout[-1] + B, S, D = tokens.shape + + # extract 3D points + feat = self.proj(tokens) # B,S,D + feat = feat.transpose(-1, -2).view(B, -1, H//self.patch_size, W//self.patch_size) + feat = F.pixel_shuffle(feat, self.patch_size) # B,3,H,W + + # permute + norm depth + return postprocess(feat, self.depth_mode, self.conf_mode) diff --git a/dynamic_predictor/dust3r/heads/postprocess.py b/dynamic_predictor/dust3r/heads/postprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..9b72a3b806e513f0668839505c96f2716f6d3098 --- /dev/null +++ b/dynamic_predictor/dust3r/heads/postprocess.py @@ -0,0 +1,67 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# post process function for all heads: extract 3D points/confidence from output +# -------------------------------------------------------- +import torch + + +def postprocess(out, depth_mode, conf_mode, mask_mode): + """ + extract 3D points/confidence from prediction head output + """ + fmap = out.permute(0, 2, 3, 1) # B,H,W,3 + res = dict(pts3d=reg_dense_depth(fmap[:, :, :, 0:3], mode=depth_mode)) + + if conf_mode is not None: + res['conf'] = reg_dense_conf(fmap[:, :, :, 3], mode=conf_mode) + + return res + +def mask_postprocess(out, depth_mode, conf_mode, mask_mode): + """ + extract 3D mask from prediction head output + """ + fmap = out.permute(0, 2, 3, 1) # B,H,W,3 + res = dict(mask=reg_dense_conf(fmap[:, :, :, 0], mode=mask_mode)) + + return res + +def reg_dense_depth(xyz, mode): + """ + extract 3D points from prediction head output + """ + mode, vmin, vmax = mode + + no_bounds = (vmin == -float('inf')) and (vmax == float('inf')) + assert no_bounds + + if mode == 'linear': + if no_bounds: + return xyz # [-inf, +inf] + return xyz.clip(min=vmin, max=vmax) + + # distance to origin + d = xyz.norm(dim=-1, keepdim=True) + xyz = xyz / d.clip(min=1e-8) + + if mode == 'square': + return xyz * d.square() + + if mode == 'exp': + return xyz * torch.expm1(d) + + raise ValueError(f'bad {mode=}') + + +def reg_dense_conf(x, mode): + """ + extract confidence from prediction head output + """ + mode, vmin, vmax = mode + if mode == 'exp': + return vmin + x.exp().clip(max=vmax-vmin) + if mode == 'sigmoid': + return (vmax - vmin) * torch.sigmoid(x) + vmin + raise ValueError(f'bad {mode=}') diff --git a/dynamic_predictor/dust3r/image_pairs.py b/dynamic_predictor/dust3r/image_pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2f4fbe22e9c4a0f33801ecde4711341f134f65 --- /dev/null +++ b/dynamic_predictor/dust3r/image_pairs.py @@ -0,0 +1,112 @@ +# -------------------------------------------------------- +# utilities needed to load image pairs +# -------------------------------------------------------- +import numpy as np +import torch + + +def make_pairs(imgs, scene_graph='complete', prefilter=None, symmetrize=True, force_symmetrize=False): + pairs = [] + if scene_graph == 'complete': # complete graph + for i in range(len(imgs)): + for j in range(i): + pairs.append((imgs[i], imgs[j])) + elif scene_graph.startswith('swin'): + iscyclic = not scene_graph.endswith('noncyclic') + try: + winsize = int(scene_graph.split('-')[1]) + except Exception as e: + winsize = 3 + pairsid = set() + if scene_graph.startswith('swinstride'): + stride = 2 + elif scene_graph.startswith('swin2stride'): + stride = 3 + else: + stride = 1 + if scene_graph.startswith('swinskip_start'): + start = 2 + else: + start = 1 + for i in range(len(imgs)): + for j in range(start, stride*winsize + start, stride): + idx = (i + j) + if iscyclic: + idx = idx % len(imgs) # explicit loop closure + if idx >= len(imgs): + continue + pairsid.add((i, idx) if i < idx else (idx, i)) + for i, j in pairsid: + pairs.append((imgs[i], imgs[j])) + elif scene_graph.startswith('logwin'): + iscyclic = not scene_graph.endswith('noncyclic') + try: + winsize = int(scene_graph.split('-')[1]) + except Exception as e: + winsize = 3 + offsets = [2**i for i in range(winsize)] + pairsid = set() + for i in range(len(imgs)): + ixs_l = [i - off for off in offsets] + ixs_r = [i + off for off in offsets] + for j in ixs_l + ixs_r: + if iscyclic: + j = j % len(imgs) # Explicit loop closure + if j < 0 or j >= len(imgs) or j == i: + continue + pairsid.add((i, j) if i < j else (j, i)) + for i, j in pairsid: + pairs.append((imgs[i], imgs[j])) + elif scene_graph.startswith('oneref'): + refid = int(scene_graph.split('-')[1]) if '-' in scene_graph else 0 + for j in range(len(imgs)): + if j != refid: + pairs.append((imgs[refid], imgs[j])) + + if (symmetrize and not scene_graph.startswith('oneref') and not scene_graph.startswith('swin-1')) or len(imgs) == 2 or force_symmetrize: + pairs += [(img2, img1) for img1, img2 in pairs] + + # now, remove edges + if isinstance(prefilter, str) and prefilter.startswith('seq'): + pairs = filter_pairs_seq(pairs, int(prefilter[3:])) + + if isinstance(prefilter, str) and prefilter.startswith('cyc'): + pairs = filter_pairs_seq(pairs, int(prefilter[3:]), cyclic=True) + + return pairs + + +def sel(x, kept): + if isinstance(x, dict): + return {k: sel(v, kept) for k, v in x.items()} + if isinstance(x, (torch.Tensor, np.ndarray)): + return x[kept] + if isinstance(x, (tuple, list)): + return type(x)([x[k] for k in kept]) + + +def _filter_edges_seq(edges, seq_dis_thr, cyclic=False): + # number of images + n = max(max(e) for e in edges) + 1 + + kept = [] + for e, (i, j) in enumerate(edges): + dis = abs(i - j) + if cyclic: + dis = min(dis, abs(i + n - j), abs(i - n - j)) + if dis <= seq_dis_thr: + kept.append(e) + return kept + + +def filter_pairs_seq(pairs, seq_dis_thr, cyclic=False): + edges = [(img1['idx'], img2['idx']) for img1, img2 in pairs] + kept = _filter_edges_seq(edges, seq_dis_thr, cyclic=cyclic) + return [pairs[i] for i in kept] + + +def filter_edges_seq(view1, view2, pred1, pred2, seq_dis_thr, cyclic=False): + edges = [(int(i), int(j)) for i, j in zip(view1['idx'], view2['idx'])] + kept = _filter_edges_seq(edges, seq_dis_thr, cyclic=cyclic) + print(f'>> Filtering edges more than {seq_dis_thr} frames apart: kept {len(kept)}/{len(edges)} edges') + return sel(view1, kept), sel(view2, kept), sel(pred1, kept), sel(pred2, kept) diff --git a/dynamic_predictor/dust3r/inference.py b/dynamic_predictor/dust3r/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..9a6967485c37ade82653d541a7dfc37c87185716 --- /dev/null +++ b/dynamic_predictor/dust3r/inference.py @@ -0,0 +1,252 @@ +# -------------------------------------------------------- +# utilities needed for the inference +# -------------------------------------------------------- +import tqdm +import torch +from dust3r.utils.device import to_cpu, collate_with_cat +from dust3r.utils.misc import invalid_to_nans +from dust3r.utils.geometry import depthmap_to_pts3d, geotrf +from dust3r.viz import SceneViz, auto_cam_size +from dust3r.utils.image import rgb + +import numpy as np +import torch +from PIL import Image + +def _interleave_imgs(img1, img2): + res = {} + for key, value1 in img1.items(): + value2 = img2[key] + if isinstance(value1, torch.Tensor) and value1.ndim == value2.ndim: + value = torch.stack((value1, value2), dim=1).flatten(0, 1) + else: + value = [x for pair in zip(value1, value2) for x in pair] + res[key] = value + return res + + +def make_batch_symmetric(batch): + view1, view2 = batch + view1, view2 = (_interleave_imgs(view1, view2), _interleave_imgs(view2, view1)) + return view1, view2 + + +def mask_to_color(mask): + colors = np.zeros((*mask.shape, 3)) + colors[:,:,0] = mask.cpu().detach() # Green channel weighted by mmask + return colors + +def visualize_results_mmask(view1, view2, pred1, pred2, save_dir='./tmp', save_name=None, visualize_type='gt'): + # visualize_type: 'gt' or 'pred' + viz1 = SceneViz() + viz2 = SceneViz() + viz = [viz1, viz2] + views = [view1, view2] + poses = [views[view_idx]['camera_pose'][0] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.5) + if visualize_type == 'pred': + cam_size *= 0.1 + views[0]['pts3d'] = geotrf(poses[0], pred1['pts3d']) # convert from X_camera1 to X_world + views[1]['pts3d'] = geotrf(poses[0], pred2['pts3d_in_other_view']) + mmask = [pred1['dynamic_mask'], pred2['dynamic_mask']] + else: + mmask = [view1['dynamic_mask'], view2['dynamic_mask']] + + images = [] + save_paths = [] + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'][0] + valid_mask = views[view_idx]['valid_mask'][0] + colors = rgb(views[view_idx]['img'][0]) + + alpha = 0.5 # You can adjust the alpha value as needed + mmask_color = mask_to_color(mmask[view_idx][0]) + colors = alpha * colors + (1 - alpha) * mmask_color + images.append(colors) + # viz[view_idx].add_pointcloud(pts3d, colors, valid_mask) + # # viz.add_camera(pose_c2w=views[view_idx]['camera_pose'][0], + # # focal=views[view_idx]['camera_intrinsics'][0, 0], + # # color=(255, 0, 0), + # # image=colors, + # # cam_size=cam_size) + save_name = f'{views[0]["dataset"][0]}_{views[0]["label"][0]}_{views[0]["instance"][0]}_{views[1]["instance"][0]}_{visualize_type}_{view_idx}' + # save_path = save_dir+'/'+save_name+'_mmask.glb' + # # print(f'Saving visualization to {save_path}') + # viz[view_idx].save_glb(save_path) + # save_paths.append(save_path) + + # Save the RGB image multiplied by 255 to a file + rgb_image = (colors * 255).astype(np.uint8) + img = Image.fromarray(rgb_image) + img.save(save_dir+'/'+save_name+'_mmask.png') + + return images[0], images[1] + +def visualize_results(view1, view2, pred1, pred2, save_dir='./tmp', save_name=None, visualize_type='gt'): + # visualize_type: 'gt' or 'pred' + viz1 = SceneViz() + viz2 = SceneViz() + viz = [viz1, viz2] + views = [view1, view2] + poses = [views[view_idx]['camera_pose'][0] for view_idx in [0, 1]] + cam_size = max(auto_cam_size(poses), 0.5) + if visualize_type == 'pred': + cam_size *= 0.1 + views[0]['pts3d'] = geotrf(poses[0], pred1['pts3d']) # convert from X_camera1 to X_world + views[1]['pts3d'] = geotrf(poses[0], pred2['pts3d_in_other_view']) + + save_paths = [] + images = [] + for view_idx in [0, 1]: + pts3d = views[view_idx]['pts3d'][0] + valid_mask = views[view_idx]['valid_mask'][0] + colors = rgb(views[view_idx]['img'][0]) + images.append(colors) + + + # viz[view_idx].add_pointcloud(pts3d, colors, valid_mask) + # viz[view_idx].add_camera(pose_c2w=views[view_idx]['camera_pose'][0], + # focal=views[view_idx]['camera_intrinsics'][0, 0], + # color=(255, 0, 0), + # image=colors, + # cam_size=cam_size) + if save_name is None: + save_name = f'{views[0]["dataset"][0]}_{views[0]["label"][0]}_{views[0]["instance"][0]}_{views[1]["instance"][0]}_{visualize_type}_{view_idx}' + # save_path = save_dir+'/'+save_name+'.glb' + # # print(f'Saving visualization to {save_path}') + # viz[view_idx].save_glb(save_path) + # save_paths.append(save_path) + + # Save the RGB image multiplied by 255 to a file + rgb_image = (colors * 255).astype(np.uint8) + img = Image.fromarray(rgb_image) + img.save(save_dir+'/'+save_name+'.png') + + + return images[0], images[1] + +def loss_of_one_batch(batch, model, criterion, device, symmetrize_batch=False, use_amp=False, ret=None): + view1, view2 = batch + ignore_keys = set(['depthmap', 'dataset', 'label', 'instance', 'idx', 'true_shape', 'rng']) + for view in batch: + for name in view.keys(): # pseudo_focal + if name in ignore_keys: + continue + view[name] = view[name].to(device, non_blocking=True) + + if symmetrize_batch: + view1, view2 = make_batch_symmetric(batch) + + with torch.amp.autocast(enabled=bool(use_amp), device_type="cuda"): + + # Export the model + + pred1, pred2 = model(view1, view2) + + # loss is supposed to be symmetric + with torch.amp.autocast(enabled=False, device_type="cuda"): + loss = criterion(view1, view2, pred1, pred2) if criterion is not None else None + + result = dict(view1=view1, view2=view2, pred1=pred1, pred2=pred2, loss=loss) + + return result[ret] if ret else result + + +@torch.no_grad() +def inference(pairs, model, device, batch_size=8, verbose=True): + if verbose: + print(f'>> Inference with model on {len(pairs)} image pairs') + result = [] + + # first, check if all images have the same size + multiple_shapes = not (check_if_same_size(pairs)) + if multiple_shapes: # force bs=1 + batch_size = 1 + + for i in tqdm.trange(0, len(pairs), batch_size, disable=not verbose): + + res = loss_of_one_batch(collate_with_cat(pairs[i:i+batch_size]), model, None, device) + + result.append(to_cpu(res)) + + result = collate_with_cat(result, lists=multiple_shapes) + + return result + + +def check_if_same_size(pairs): + shapes1 = [img1['img'].shape[-2:] for img1, img2 in pairs] + shapes2 = [img2['img'].shape[-2:] for img1, img2 in pairs] + return all(shapes1[0] == s for s in shapes1) and all(shapes2[0] == s for s in shapes2) + + +def get_pred_pts3d(gt, pred, use_pose=False): + if 'depth' in pred and 'pseudo_focal' in pred: + try: + pp = gt['camera_intrinsics'][..., :2, 2] + except KeyError: + pp = None + pts3d = depthmap_to_pts3d(**pred, pp=pp) + + elif 'pts3d' in pred: + # pts3d from my camera + pts3d = pred['pts3d'] + + elif 'pts3d_in_other_view' in pred: + # pts3d from the other camera, already transformed + assert use_pose is True + return pred['pts3d_in_other_view'] # return! + + if use_pose: + camera_pose = pred.get('camera_pose') + assert camera_pose is not None + pts3d = geotrf(camera_pose, pts3d) + + return pts3d + + +def find_opt_scaling(gt_pts1, gt_pts2, pr_pts1, pr_pts2=None, fit_mode='weiszfeld_stop_grad', valid1=None, valid2=None): + assert gt_pts1.ndim == pr_pts1.ndim == 4 + assert gt_pts1.shape == pr_pts1.shape + if gt_pts2 is not None: + assert gt_pts2.ndim == pr_pts2.ndim == 4 + assert gt_pts2.shape == pr_pts2.shape + + # concat the pointcloud + nan_gt_pts1 = invalid_to_nans(gt_pts1, valid1).flatten(1, 2) + nan_gt_pts2 = invalid_to_nans(gt_pts2, valid2).flatten(1, 2) if gt_pts2 is not None else None + + pr_pts1 = invalid_to_nans(pr_pts1, valid1).flatten(1, 2) + pr_pts2 = invalid_to_nans(pr_pts2, valid2).flatten(1, 2) if pr_pts2 is not None else None + + all_gt = torch.cat((nan_gt_pts1, nan_gt_pts2), dim=1) if gt_pts2 is not None else nan_gt_pts1 + all_pr = torch.cat((pr_pts1, pr_pts2), dim=1) if pr_pts2 is not None else pr_pts1 + + dot_gt_pr = (all_pr * all_gt).sum(dim=-1) + dot_gt_gt = all_gt.square().sum(dim=-1) + + if fit_mode.startswith('avg'): + # scaling = (all_pr / all_gt).view(B, -1).mean(dim=1) + scaling = dot_gt_pr.nanmean(dim=1) / dot_gt_gt.nanmean(dim=1) + elif fit_mode.startswith('median'): + scaling = (dot_gt_pr / dot_gt_gt).nanmedian(dim=1).values + elif fit_mode.startswith('weiszfeld'): + # init scaling with l2 closed form + scaling = dot_gt_pr.nanmean(dim=1) / dot_gt_gt.nanmean(dim=1) + # iterative re-weighted least-squares + for iter in range(10): + # re-weighting by inverse of distance + dis = (all_pr - scaling.view(-1, 1, 1) * all_gt).norm(dim=-1) + # print(dis.nanmean(-1)) + w = dis.clip_(min=1e-8).reciprocal() + # update the scaling with the new weights + scaling = (w * dot_gt_pr).nanmean(dim=1) / (w * dot_gt_gt).nanmean(dim=1) + else: + raise ValueError(f'bad {fit_mode=}') + + if fit_mode.endswith('stop_grad'): + scaling = scaling.detach() + + scaling = scaling.clip(min=1e-3) + # assert scaling.isfinite().all(), bb() + return scaling diff --git a/dynamic_predictor/dust3r/losses.py b/dynamic_predictor/dust3r/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..a20ccf1778ca146fda117cea9ef03ba946642734 --- /dev/null +++ b/dynamic_predictor/dust3r/losses.py @@ -0,0 +1,459 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Implementation of DUSt3R training losses +# -------------------------------------------------------- +from copy import copy, deepcopy +import torch +import torch.nn as nn + +from dust3r.inference import get_pred_pts3d, find_opt_scaling +from dust3r.utils.geometry import inv, geotrf, normalize_pointcloud +from dust3r.utils.geometry import get_joint_pointcloud_depth, get_joint_pointcloud_center_scale + + +def Sum(*losses_and_masks): + loss, mask = losses_and_masks[0] + if loss.ndim > 0: + # we are actually returning the loss for every pixels + return losses_and_masks + else: + # we are returning the global loss + for loss2, mask2 in losses_and_masks[1:]: + loss = loss + loss2 + return loss + + +class BaseCriterion(nn.Module): + def __init__(self, reduction='mean'): + super().__init__() + self.reduction = reduction + + +class LLoss (BaseCriterion): + """ L-norm loss + """ + + def forward(self, a, b): + assert a.shape == b.shape and a.ndim >= 2 and 1 <= a.shape[-1] <= 3, f'Bad shape = {a.shape}' + dist = self.distance(a, b) + assert dist.ndim == a.ndim - 1 # one dimension less + if self.reduction == 'none': + return dist + if self.reduction == 'sum': + return dist.sum() + if self.reduction == 'mean': + return dist.mean() if dist.numel() > 0 else dist.new_zeros(()) + raise ValueError(f'bad {self.reduction=} mode') + + def distance(self, a, b): + raise NotImplementedError() + + +class L21Loss (LLoss): + """ Euclidean distance between 3d points """ + + def distance(self, a, b): + return torch.norm(a - b, dim=-1) # normalized L2 distance + + +L21 = L21Loss() + + +class Criterion (nn.Module): + def __init__(self, criterion=None): + super().__init__() + assert isinstance(criterion, BaseCriterion), f'{criterion} is not a proper criterion!' + self.criterion = copy(criterion) + + def get_name(self): + return f'{type(self).__name__}({self.criterion})' + + def with_reduction(self, mode='none'): + res = loss = deepcopy(self) + while loss is not None: + assert isinstance(loss, Criterion) + loss.criterion.reduction = mode # make it return the loss for each sample + loss = loss._loss2 # we assume loss is a Multiloss + return res + + +class MultiLoss (nn.Module): + """ Easily combinable losses (also keep track of individual loss values): + loss = MyLoss1() + 0.1*MyLoss2() + Usage: + Inherit from this class and override get_name() and compute_loss() + """ + + def __init__(self): + super().__init__() + self._alpha = 1 + self._loss2 = None + + def compute_loss(self, *args, **kwargs): + raise NotImplementedError() + + def get_name(self): + raise NotImplementedError() + + def __mul__(self, alpha): + assert isinstance(alpha, (int, float)) + res = copy(self) + res._alpha = alpha + return res + __rmul__ = __mul__ # same + + def __add__(self, loss2): + assert isinstance(loss2, MultiLoss) + res = cur = copy(self) + # find the end of the chain + while cur._loss2 is not None: + cur = cur._loss2 + cur._loss2 = loss2 + return res + + def __repr__(self): + name = self.get_name() + if self._alpha != 1: + name = f'{self._alpha:g}*{name}' + if self._loss2: + name = f'{name} + {self._loss2}' + return name + + def forward(self, *args, **kwargs): + loss = self.compute_loss(*args, **kwargs) + if isinstance(loss, tuple): + loss, details = loss + elif loss.ndim == 0: + details = {self.get_name(): float(loss)} + else: + details = {} + loss = loss * self._alpha + + if self._loss2: + loss2, details2 = self._loss2(*args, **kwargs) + loss = loss + loss2 + details |= details2 + + return loss, details + + +class Regr3D (Criterion, MultiLoss): + """ Ensure that all 3D points are correct. + Asymmetric loss: view1 is supposed to be the anchor. + + P1 = RT1 @ D1, point clouds at world coord_frame + P2 = RT2 @ D2 + loss1 = (I @ pred_D1) - (RT1^-1 @ RT1 @ D1), I is identical matrix + loss2 = (RT21 @ pred_D2) - (RT1^-1 @ P2) + = (RT21 @ pred_D2) - (RT1^-1 @ RT2 @ D2) + """ + + def __init__(self, criterion, norm_mode='avg_dis', gt_scale=False): + super().__init__(criterion) + self.norm_mode = norm_mode + self.gt_scale = gt_scale + + def get_all_pts3d(self, gt1, gt2, pred1, pred2, dist_clip=None): + # everything is normalized w.r.t. camera of view1 + in_camera1 = inv(gt1['camera_pose']) + gt_pts1 = geotrf(in_camera1, gt1['pts3d']) # B,H,W,3 + gt_pts2 = geotrf(in_camera1, gt2['pts3d']) # B,H,W,3 + + valid1 = gt1['valid_mask'].clone() + valid2 = gt2['valid_mask'].clone() + + if dist_clip is not None: + # points that are too far-away == invalid + dis1 = gt_pts1.norm(dim=-1) # (B, H, W) + dis2 = gt_pts2.norm(dim=-1) # (B, H, W) + valid1 = valid1 & (dis1 <= dist_clip) + valid2 = valid2 & (dis2 <= dist_clip) + + pr_pts1 = get_pred_pts3d(gt1, pred1, use_pose=False) + pr_pts2 = get_pred_pts3d(gt2, pred2, use_pose=True) + + # normalize 3d points + if self.norm_mode: + pr_pts1, pr_pts2 = normalize_pointcloud(pr_pts1, pr_pts2, self.norm_mode, valid1, valid2) + if self.norm_mode and not self.gt_scale: + gt_pts1, gt_pts2 = normalize_pointcloud(gt_pts1, gt_pts2, self.norm_mode, valid1, valid2) + + return gt_pts1, gt_pts2, pr_pts1, pr_pts2, valid1, valid2, {} + + def compute_loss(self, gt1, gt2, pred1, pred2, **kw): + gt_pts1, gt_pts2, pred_pts1, pred_pts2, mask1, mask2, monitoring = \ + self.get_all_pts3d(gt1, gt2, pred1, pred2, **kw) + # loss on img1 side + l1 = self.criterion(pred_pts1[mask1], gt_pts1[mask1]) + # loss on gt2 side + l2 = self.criterion(pred_pts2[mask2], gt_pts2[mask2]) + self_name = type(self).__name__ + details = {self_name + '_pts3d_1': float(l1.mean()), self_name + '_pts3d_2': float(l2.mean())} + return Sum((l1, mask1), (l2, mask2)), (details | monitoring) + +class Regr3D_MMask (Criterion, MultiLoss): + """ Ensure that all 3D points are correct. + Asymmetric loss: view1 is supposed to be the anchor. + + P1 = RT1 @ D1, point clouds at world coord_frame + P2 = RT2 @ D2 + loss1 = (I @ pred_D1) - (RT1^-1 @ RT1 @ D1), I is identical matrix + loss2 = (RT21 @ pred_D2) - (RT1^-1 @ P2) + = (RT21 @ pred_D2) - (RT1^-1 @ RT2 @ D2) + """ + + def __init__(self, criterion, norm_mode='avg_dis', gt_scale=False): + super().__init__(criterion) + self.norm_mode = norm_mode + self.gt_scale = gt_scale + self.mask_loss = nn.BCELoss() + + def get_all_pts3d(self, gt1, gt2, pred1, pred2, dist_clip=None): + # everything is normalized w.r.t. camera of view1 + in_camera1 = inv(gt1['camera_pose']) + gt_pts1 = geotrf(in_camera1, gt1['pts3d']) # B,H,W,3 + gt_pts2 = geotrf(in_camera1, gt2['pts3d']) # B,H,W,3 + + valid1 = gt1['valid_mask'].clone() + valid2 = gt2['valid_mask'].clone() + + if dist_clip is not None: + # points that are too far-away == invalid + dis1 = gt_pts1.norm(dim=-1) # (B, H, W) + dis2 = gt_pts2.norm(dim=-1) # (B, H, W) + valid1 = valid1 & (dis1 <= dist_clip) + valid2 = valid2 & (dis2 <= dist_clip) + + pr_pts1 = get_pred_pts3d(gt1, pred1, use_pose=False) + pr_pts2 = get_pred_pts3d(gt2, pred2, use_pose=True) + + # normalize 3d points + if self.norm_mode: + pr_pts1, pr_pts2 = normalize_pointcloud(pr_pts1, pr_pts2, self.norm_mode, valid1, valid2) + if self.norm_mode and not self.gt_scale: + gt_pts1, gt_pts2 = normalize_pointcloud(gt_pts1, gt_pts2, self.norm_mode, valid1, valid2) + + gt_mmask_pts1, gt_mmask_pts2 = gt1['dynamic_mask'], gt2['dynamic_mask'] + mmask_pts1, mmask_pts2 = pred1['dynamic_mask'], pred2['dynamic_mask'] + # try: + # assert torch.all((gt_mmask_pts1 >= 0) & (gt_mmask_pts1 <= 1)), "All elements in gt_mmask_pts1 must be in the range 0-1" + # except AssertionError: + # print(f"gt_mmask_pts1 min: {gt_mmask_pts1.min()}, max: {gt_mmask_pts1.max()}") + # raise + + # try: + # assert torch.all((gt_mmask_pts2 >= 0) & (gt_mmask_pts2 <= 1)), "All elements in gt_mmask_pts2 must be in the range 0-1" + # except AssertionError: + # print(f"gt_mmask_pts2 min: {gt_mmask_pts2.min()}, max: {gt_mmask_pts2.max()}") + # raise + + # try: + # assert torch.all((mmask_pts1 >= 0) & (mmask_pts1 <= 1)), "All elements in mmask_pts1 must be in the range 0-1" + # except AssertionError: + # print(f"mmask_pts1 min: {mmask_pts1.min()}, max: {mmask_pts1.max()}") + # raise + + # try: + # assert torch.all((mmask_pts2 >= 0) & (mmask_pts2 <= 1)), "All elements in mmask_pts2 must be in the range 0-1" + # except AssertionError: + # print(f"mmask_pts2 min: {mmask_pts2.min()}, max: {mmask_pts2.max()}") + # raise + + + + return gt_pts1, gt_pts2, pr_pts1, pr_pts2, gt_mmask_pts1, gt_mmask_pts2, mmask_pts1, mmask_pts2, valid1, valid2, {} + + def compute_loss(self, gt1, gt2, pred1, pred2, **kw): + self.mask_loss.reduction = self.criterion.reduction + gt_pts1, gt_pts2, pred_pts1, pred_pts2, gt_mmask_pts1, gt_mmask_pts2, mmask_pts1, mmask_pts2, mask1, mask2, monitoring = \ + self.get_all_pts3d(gt1, gt2, pred1, pred2, **kw) + + # Check the valid mask (whether the element is nan of gt_mmask_pts1) + mask1 = mask1 & ~torch.isnan(gt_mmask_pts1) + mask2 = mask2 & ~torch.isnan(gt_mmask_pts2) + + # loss on img1 side + l1 = self.criterion(pred_pts1[mask1], gt_pts1[mask1]) + mmask_l1 = self.mask_loss(mmask_pts1[mask1], gt_mmask_pts1[mask1].float()) + + # loss on gt2 side + l2 = self.criterion(pred_pts2[mask2], gt_pts2[mask2]) + mmask_l2 = self.mask_loss(mmask_pts2[mask2], gt_mmask_pts2[mask2].float()) + + self_name = type(self).__name__ + details = {self_name + '_pts3d_1': float(l1.mean()), self_name + '_pts3d_2': float(l2.mean()), + self_name + '_mmask_1': float(mmask_l1.mean()), self_name + '_mmask_2': float(mmask_l2.mean())} + return Sum((l1, mask1), (l2, mask2), (mmask_l1, mask1), (mmask_l2, mask2)), (details | monitoring) + +class ConfLoss (MultiLoss): + """ Weighted regression by learned confidence. + Assuming the input pixel_loss is a pixel-level regression loss. + + Principle: + high-confidence means high conf = 0.1 ==> conf_loss = x / 10 + alpha*log(10) + low confidence means low conf = 10 ==> conf_loss = x * 10 - alpha*log(10) + + alpha: hyperparameter + """ + + def __init__(self, pixel_loss, alpha=1): + super().__init__() + assert alpha > 0 + self.alpha = alpha + self.pixel_loss = pixel_loss.with_reduction('none') + + def get_name(self): + return f'ConfLoss({self.pixel_loss})' + + def get_conf_log(self, x): + return x, torch.log(x) + + def compute_loss(self, gt1, gt2, pred1, pred2, **kw): + # compute per-pixel loss + ((loss1, msk1), (loss2, msk2), (mmask_l1, mask1), (mmask_l2, mask2)), details = self.pixel_loss(gt1, gt2, pred1, pred2, **kw) + if loss1.numel() == 0: + print('NO VALID POINTS in img1', force=True) + if loss2.numel() == 0: + print('NO VALID POINTS in img2', force=True) + + # weight by confidence + conf1, log_conf1 = self.get_conf_log(pred1['conf'][msk1]) + conf2, log_conf2 = self.get_conf_log(pred2['conf'][msk2]) + conf_loss1 = loss1 * conf1 - self.alpha * log_conf1 + conf_loss2 = loss2 * conf2 - self.alpha * log_conf2 + + conf_mmask_l1 = mmask_l1 * conf1 - self.alpha * log_conf1 + conf_mmask_l2 = mmask_l2 * conf2 - self.alpha * log_conf2 + + # average + nan protection (in case of no valid pixels at all) + conf_loss1 = conf_loss1.mean() if conf_loss1.numel() > 0 else 0 + conf_loss2 = conf_loss2.mean() if conf_loss2.numel() > 0 else 0 + + # average + nan protection (in case of no valid pixels at all) + conf_mmask_l1 = conf_mmask_l1.mean() if conf_mmask_l1.numel() > 0 else 0 + conf_mmask_l2 = conf_mmask_l2.mean() if conf_mmask_l2.numel() > 0 else 0 + + return conf_loss1 + conf_loss2 + conf_mmask_l1 + conf_mmask_l2, dict(conf_loss_1=float(conf_loss1), conf_loss2=float(conf_loss2),conf_mmask_loss_1=float(conf_mmask_l1), conf_mmask_loss_2=float(conf_mmask_l2), **details) + + +class Regr3D_ShiftInv (Regr3D): + """ Same than Regr3D but invariant to depth shift. + """ + + def get_all_pts3d(self, gt1, gt2, pred1, pred2): + # compute unnormalized points + gt_pts1, gt_pts2, pred_pts1, pred_pts2, mask1, mask2, monitoring = \ + super().get_all_pts3d(gt1, gt2, pred1, pred2) + + # compute median depth + gt_z1, gt_z2 = gt_pts1[..., 2], gt_pts2[..., 2] + pred_z1, pred_z2 = pred_pts1[..., 2], pred_pts2[..., 2] + gt_shift_z = get_joint_pointcloud_depth(gt_z1, gt_z2, mask1, mask2)[:, None, None] + pred_shift_z = get_joint_pointcloud_depth(pred_z1, pred_z2, mask1, mask2)[:, None, None] + + # subtract the median depth + gt_z1 -= gt_shift_z + gt_z2 -= gt_shift_z + pred_z1 -= pred_shift_z + pred_z2 -= pred_shift_z + + # monitoring = dict(monitoring, gt_shift_z=gt_shift_z.mean().detach(), pred_shift_z=pred_shift_z.mean().detach()) + return gt_pts1, gt_pts2, pred_pts1, pred_pts2, mask1, mask2, monitoring + + +class Regr3D_ScaleInv (Regr3D): + """ Same than Regr3D but invariant to depth shift. + if gt_scale == True: enforce the prediction to take the same scale than GT + """ + + def get_all_pts3d(self, gt1, gt2, pred1, pred2): + # compute depth-normalized points + gt_pts1, gt_pts2, pred_pts1, pred_pts2, mask1, mask2, monitoring = super().get_all_pts3d(gt1, gt2, pred1, pred2) + + # measure scene scale + _, gt_scale = get_joint_pointcloud_center_scale(gt_pts1, gt_pts2, mask1, mask2) + _, pred_scale = get_joint_pointcloud_center_scale(pred_pts1, pred_pts2, mask1, mask2) + + # prevent predictions to be in a ridiculous range + pred_scale = pred_scale.clip(min=1e-3, max=1e3) + + # subtract the median depth + if self.gt_scale: + pred_pts1 *= gt_scale / pred_scale + pred_pts2 *= gt_scale / pred_scale + # monitoring = dict(monitoring, pred_scale=(pred_scale/gt_scale).mean()) + else: + gt_pts1 /= gt_scale + gt_pts2 /= gt_scale + pred_pts1 /= pred_scale + pred_pts2 /= pred_scale + # monitoring = dict(monitoring, gt_scale=gt_scale.mean(), pred_scale=pred_scale.mean().detach()) + + return gt_pts1, gt_pts2, pred_pts1, pred_pts2, mask1, mask2, monitoring + + +class Regr3D_ScaleShiftInv (Regr3D_ScaleInv, Regr3D_ShiftInv): + # calls Regr3D_ShiftInv first, then Regr3D_ScaleInv + pass + + +class Regr3D_ShiftInv_MMask (Regr3D_MMask): + """ Same than Regr3D but invariant to depth shift. + """ + + def get_all_pts3d(self, gt1, gt2, pred1, pred2): + # compute unnormalized points + gt_pts1, gt_pts2, pred_pts1, pred_pts2, gt_mmask_pts1, gt_mmask_pts2, mmask_pts1, mmask_pts2, mask1, mask2, monitoring = \ + super().get_all_pts3d(gt1, gt2, pred1, pred2) + + # compute median depth + gt_z1, gt_z2 = gt_pts1[..., 2], gt_pts2[..., 2] + pred_z1, pred_z2 = pred_pts1[..., 2], pred_pts2[..., 2] + gt_shift_z = get_joint_pointcloud_depth(gt_z1, gt_z2, mask1, mask2)[:, None, None] + pred_shift_z = get_joint_pointcloud_depth(pred_z1, pred_z2, mask1, mask2)[:, None, None] + + # subtract the median depth + gt_z1 -= gt_shift_z + gt_z2 -= gt_shift_z + pred_z1 -= pred_shift_z + pred_z2 -= pred_shift_z + + # monitoring = dict(monitoring, gt_shift_z=gt_shift_z.mean().detach(), pred_shift_z=pred_shift_z.mean().detach()) + return gt_pts1, gt_pts2, pred_pts1, pred_pts2, gt_mmask_pts1, gt_mmask_pts2, mmask_pts1, mmask_pts2, mask1, mask2, monitoring + + +class Regr3D_ScaleInv_MMask (Regr3D_MMask): + """ Same than Regr3D but invariant to depth shift. + if gt_scale == True: enforce the prediction to take the same scale than GT + """ + + def get_all_pts3d(self, gt1, gt2, pred1, pred2): + # compute depth-normalized points + gt_pts1, gt_pts2, pred_pts1, pred_pts2, gt_mmask_pts1, gt_mmask_pts2, mmask_pts1, mmask_pts2, mask1, mask2, monitoring = super().get_all_pts3d(gt1, gt2, pred1, pred2) + + # measure scene scale + _, gt_scale = get_joint_pointcloud_center_scale(gt_pts1, gt_pts2, mask1, mask2) + _, pred_scale = get_joint_pointcloud_center_scale(pred_pts1, pred_pts2, mask1, mask2) + + # prevent predictions to be in a ridiculous range + pred_scale = pred_scale.clip(min=1e-3, max=1e3) + + # subtract the median depth + if self.gt_scale: + pred_pts1 *= gt_scale / pred_scale + pred_pts2 *= gt_scale / pred_scale + # monitoring = dict(monitoring, pred_scale=(pred_scale/gt_scale).mean()) + else: + gt_pts1 /= gt_scale + gt_pts2 /= gt_scale + pred_pts1 /= pred_scale + pred_pts2 /= pred_scale + # monitoring = dict(monitoring, gt_scale=gt_scale.mean(), pred_scale=pred_scale.mean().detach()) + + return gt_pts1, gt_pts2, pred_pts1, pred_pts2, gt_mmask_pts1, gt_mmask_pts2, mmask_pts1, mmask_pts2, mask1, mask2, monitoring + +class Regr3D_ScaleShiftInv_MMask (Regr3D_ScaleInv_MMask, Regr3D_ShiftInv_MMask): + # calls Regr3D_ShiftInv first, then Regr3D_ScaleInv + pass diff --git a/dynamic_predictor/dust3r/model.py b/dynamic_predictor/dust3r/model.py new file mode 100644 index 0000000000000000000000000000000000000000..826be67d6a5d5f3fce621b7abef5a3718548ca70 --- /dev/null +++ b/dynamic_predictor/dust3r/model.py @@ -0,0 +1,228 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# DUSt3R model class +# -------------------------------------------------------- +from copy import deepcopy +import torch +import os +from packaging import version +import huggingface_hub + +from .utils.misc import fill_default_args, freeze_all_params, is_symmetrized, interleave, transpose_to_landscape +from .heads import head_factory +from dust3r.patch_embed import get_patch_embed, ManyAR_PatchEmbed +from third_party.raft import load_RAFT +import dust3r.utils.path_to_croco # noqa: F401 +from models.croco import CroCoNet # noqa +from einops import rearrange, repeat, reduce +from einops.layers.torch import Rearrange, Reduce +inf = float('inf') + +hf_version_number = huggingface_hub.__version__ +assert version.parse(hf_version_number) >= version.parse("0.22.0"), "Outdated huggingface_hub version, please reinstall requirements.txt" + +def load_model(model_path, device, verbose=True): + if verbose: + print('... loading model from', model_path) + ckpt = torch.load(model_path, map_location='cpu') + args = ckpt['args'].model.replace("ManyAR_PatchEmbed", "PatchEmbedDust3R") + if 'landscape_only' not in args: + args = args[:-1] + ', landscape_only=False)' + else: + args = args.replace(" ", "").replace('landscape_only=True', 'landscape_only=False') + assert "landscape_only=False" in args + if verbose: + print(f"instantiating : {args}") + net = eval(args) + s = net.load_state_dict(ckpt['model'], strict=False) + if verbose: + print(s) + return net.to(device) + + +class AsymmetricCroCo3DStereo ( + CroCoNet, + huggingface_hub.PyTorchModelHubMixin, + library_name="das3r", + repo_url="https://github.com/kai422/DAS3R", + tags=["image-to-3d"], +): + """ Two siamese encoders, followed by two decoders. + The goal is to output 3d points directly, both images in view1's frame + (hence the asymmetry). + """ + + def __init__(self, + output_mode='pts3d', + head_type='dpt', + depth_mode=('exp', -inf, inf), + conf_mode=('exp', 1, inf), + mask_mode=('sigmoid', 0, 1), + freeze='none', + landscape_only=True, + patch_embed_cls='PatchEmbedDust3R', # PatchEmbedDust3R or ManyAR_PatchEmbed + vn_layer = True, + **croco_kwargs): + self.patch_embed_cls = patch_embed_cls + self.croco_args = fill_default_args(croco_kwargs, super().__init__) + super().__init__(**croco_kwargs) + + # dust3r specific initialization + self.dec_blocks2 = deepcopy(self.dec_blocks) + self.set_downstream_head(output_mode, head_type, landscape_only, depth_mode, conf_mode, mask_mode, **croco_kwargs) + self.set_freeze(freeze) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kw): + if os.path.isfile(pretrained_model_name_or_path): + return load_model(pretrained_model_name_or_path, device='cpu') + else: + return super(AsymmetricCroCo3DStereo, cls).from_pretrained(pretrained_model_name_or_path, **kw) + + def _set_patch_embed(self, img_size=224, patch_size=16, enc_embed_dim=768): + self.patch_embed = get_patch_embed(self.patch_embed_cls, img_size, patch_size, enc_embed_dim) + + def load_state_dict(self, ckpt, **kw): + # duplicate all weights for the second decoder if not present + new_ckpt = dict(ckpt) + if not any(k.startswith('dec_blocks2') for k in ckpt): + for key, value in ckpt.items(): + if key.startswith('dec_blocks'): + new_ckpt[key.replace('dec_blocks', 'dec_blocks2')] = value + return super().load_state_dict(new_ckpt, **kw) + + def set_freeze(self, freeze): # this is for use by downstream models + self.freeze = freeze + to_be_frozen = { + 'none': [], + 'mask': [self.mask_token], + 'encoder': [self.mask_token, self.patch_embed, self.enc_blocks], + 'encoder_and_decoder': [self.mask_token, self.patch_embed, self.enc_blocks, self.dec_blocks, self.dec_blocks2], + 'encoder_and_3d_predictor': [self.mask_token, self.patch_embed, self.enc_blocks, self.dec_blocks, self.dec_blocks2, self.enc_norm, self.decoder_embed, self. dec_norm, self.downstream_head1, self.downstream_head2] + } + freeze_all_params(to_be_frozen[freeze]) + print(f'Freezing {freeze} parameters') + + def _set_prediction_head(self, *args, **kwargs): + """ No prediction head """ + return + + def set_downstream_head(self, output_mode, head_type, landscape_only, depth_mode, conf_mode, mask_mode, patch_size, img_size, + **kw): + if type(img_size) is int: + img_size = (img_size, img_size) + assert img_size[0] % patch_size == 0 and img_size[1] % patch_size == 0, \ + f'{img_size=} must be multiple of {patch_size=}' + self.output_mode = output_mode + self.head_type = head_type + self.depth_mode = depth_mode + self.conf_mode = conf_mode + self.mask_mode = mask_mode + # allocate heads + self.downstream_head_dynamic_mask1 = head_factory(head_type, output_mode, self, has_mask=bool(mask_mode)) + self.downstream_head_dynamic_mask2 = head_factory(head_type, output_mode, self, has_mask=bool(mask_mode)) + + self.downstream_head1 = head_factory(head_type, output_mode, self, has_conf=bool(conf_mode)) + self.downstream_head2 = head_factory(head_type, output_mode, self, has_conf=bool(conf_mode)) + + # magic wrapper + self.head1 = transpose_to_landscape(self.downstream_head1, activate=landscape_only) + self.head2 = transpose_to_landscape(self.downstream_head2, activate=landscape_only) + self.head_dynamic_mask1 = transpose_to_landscape(self.downstream_head_dynamic_mask1, activate=landscape_only) + self.head_dynamic_mask2 = transpose_to_landscape(self.downstream_head_dynamic_mask2, activate=landscape_only) + + def _encode_image(self, image, true_shape): + # embed the image into patches (x has size B x Npatches x C) + x, pos = self.patch_embed(image, true_shape=true_shape) + # x (B, 576, 1024) pos (B, 576, 2); patch_size=16 + B,N,C = x.size() + posvis = pos + # add positional embedding without cls token + assert self.enc_pos_embed is None + # TODO: where to add mask for the patches + # now apply the transformer encoder and normalization + for blk in self.enc_blocks: + x = blk(x, posvis) + + x = self.enc_norm(x) + return x, pos, None + + def _encode_image_pairs(self, img1, img2, true_shape1, true_shape2): + if img1.shape[-2:] == img2.shape[-2:]: + out, pos, _ = self._encode_image(torch.cat((img1, img2), dim=0), + torch.cat((true_shape1, true_shape2), dim=0)) + out, out2 = out.chunk(2, dim=0) + pos, pos2 = pos.chunk(2, dim=0) + else: + out, pos, _ = self._encode_image(img1, true_shape1) + out2, pos2, _ = self._encode_image(img2, true_shape2) + return out, out2, pos, pos2 + + def _encode_symmetrized(self, view1, view2): + img1 = view1['img'] + img2 = view2['img'] + B = img1.shape[0] + + # Recover true_shape when available, otherwise assume that the img shape is the true one + shape1 = view1.get('true_shape', torch.tensor(img1.shape[-2:])[None].repeat(B, 1)) + shape2 = view2.get('true_shape', torch.tensor(img2.shape[-2:])[None].repeat(B, 1)) + + # warning! maybe the images have different portrait/landscape orientations + if is_symmetrized(view1, view2): + # computing half of forward pass!' + feat1, feat2, pos1, pos2 = self._encode_image_pairs(img1[::2], img2[::2], shape1[::2], shape2[::2]) + feat1, feat2 = interleave(feat1, feat2) + pos1, pos2 = interleave(pos1, pos2) + else: + feat1, feat2, pos1, pos2 = self._encode_image_pairs(img1, img2, shape1, shape2) + + return (shape1, shape2), (feat1, feat2), (pos1, pos2) + + def _decoder(self, f1, pos1, f2, pos2): + final_output = [(f1, f2)] # before projection + original_D = f1.shape[-1] + + # project to decoder dim + f1 = self.decoder_embed(f1) + f2 = self.decoder_embed(f2) # Linear(in_features=1024, out_features=768, bias=True) + + final_output.append((f1, f2)) + for blk1, blk2 in zip(self.dec_blocks, self.dec_blocks2): + # img1 side + f1, _ = blk1(*final_output[-1][::+1], pos1, pos2) + # img2 side + f2, _ = blk2(*final_output[-1][::-1], pos2, pos1) + # store the result + final_output.append((f1, f2)) + + # normalize last output + del final_output[1] # duplicate with final_output[0] + final_output[-1] = tuple(map(self.dec_norm, final_output[-1])) + return zip(*final_output) + + def _downstream_head(self, head_id, decout, img_shape): + B, S, D = decout[-1].shape + # img_shape = tuple(map(int, img_shape)) + head = getattr(self, f'head{head_id}') + return head(decout, img_shape) + + def forward(self, view1, view2): + # encode the two images --> B,S,D + with torch.no_grad(): + (shape1, shape2), (feat1, feat2), (pos1, pos2) = self._encode_symmetrized(view1, view2) + # combine all ref images into object-centric representation + dec1, dec2 = self._decoder(feat1, pos1, feat2, pos2) + res1 = self._downstream_head(1, [tok.float() for tok in dec1], shape1) + res2 = self._downstream_head(2, [tok.float() for tok in dec2], shape2) + + with torch.amp.autocast(enabled=False, device_type="cuda"): + mask_1 = self._downstream_head('_dynamic_mask1', [tok.float() for tok in dec1], shape1) + mask_2 = self._downstream_head('_dynamic_mask2', [tok.float() for tok in dec2], shape2) + + res2['pts3d_in_other_view'] = res2.pop('pts3d') # predict view2's pts3d in view1's frame + res1['dynamic_mask'] = mask_1['mask'] + res2['dynamic_mask'] = mask_2['mask'] + + return res1, res2 diff --git a/dynamic_predictor/dust3r/optim_factory.py b/dynamic_predictor/dust3r/optim_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..9b9c16e0e0fda3fd03c3def61abc1f354f75c584 --- /dev/null +++ b/dynamic_predictor/dust3r/optim_factory.py @@ -0,0 +1,14 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# optimization functions +# -------------------------------------------------------- + + +def adjust_learning_rate_by_lr(optimizer, lr): + for param_group in optimizer.param_groups: + if "lr_scale" in param_group: + param_group["lr"] = lr * param_group["lr_scale"] + else: + param_group["lr"] = lr diff --git a/dynamic_predictor/dust3r/patch_embed.py b/dynamic_predictor/dust3r/patch_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..e8d6c5db714e7ef705378a0dfa4e7f9882d0d08c --- /dev/null +++ b/dynamic_predictor/dust3r/patch_embed.py @@ -0,0 +1,70 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# PatchEmbed implementation for DUST3R, +# in particular ManyAR_PatchEmbed that Handle images with non-square aspect ratio +# -------------------------------------------------------- +import torch +import dust3r.utils.path_to_croco # noqa: F401 +from models.blocks import PatchEmbed # noqa + + +def get_patch_embed(patch_embed_cls, img_size, patch_size, enc_embed_dim): + assert patch_embed_cls in ['PatchEmbedDust3R', 'ManyAR_PatchEmbed'] + patch_embed = eval(patch_embed_cls)(img_size, patch_size, 3, enc_embed_dim) + return patch_embed + + +class PatchEmbedDust3R(PatchEmbed): + def forward(self, x, **kw): + B, C, H, W = x.shape + assert H % self.patch_size[0] == 0, f"Input image height ({H}) is not a multiple of patch size ({self.patch_size[0]})." + assert W % self.patch_size[1] == 0, f"Input image width ({W}) is not a multiple of patch size ({self.patch_size[1]})." + x = self.proj(x) + pos = self.position_getter(B, x.size(2), x.size(3), x.device) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x, pos + + +class ManyAR_PatchEmbed (PatchEmbed): + """ Handle images with non-square aspect ratio. + All images in the same batch have the same aspect ratio. + true_shape = [(height, width) ...] indicates the actual shape of each image. + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, init='xavier'): + self.embed_dim = embed_dim + super().__init__(img_size, patch_size, in_chans, embed_dim, norm_layer, flatten, init) + + def forward(self, img, true_shape): + B, C, H, W = img.shape + assert W >= H, f'img should be in landscape mode, but got {W=} {H=}' + assert H % self.patch_size[0] == 0, f"Input image height ({H}) is not a multiple of patch size ({self.patch_size[0]})." + assert W % self.patch_size[1] == 0, f"Input image width ({W}) is not a multiple of patch size ({self.patch_size[1]})." + assert true_shape.shape == (B, 2), f"true_shape has the wrong shape={true_shape.shape}" + + # size expressed in tokens + W //= self.patch_size[0] + H //= self.patch_size[1] + n_tokens = H * W + + height, width = true_shape.T + is_landscape = (width >= height) + is_portrait = ~is_landscape + + # allocate result + x = img.new_zeros((B, n_tokens, self.embed_dim)) + pos = img.new_zeros((B, n_tokens, 2), dtype=torch.int64) + + # linear projection, transposed if necessary + x[is_landscape] = self.proj(img[is_landscape]).permute(0, 2, 3, 1).flatten(1, 2).float() + x[is_portrait] = self.proj(img[is_portrait].swapaxes(-1, -2)).permute(0, 2, 3, 1).flatten(1, 2).float() + + pos[is_landscape] = self.position_getter(1, H, W, pos.device) + pos[is_portrait] = self.position_getter(1, W, H, pos.device) + + x = self.norm(x) + return x, pos diff --git a/dynamic_predictor/dust3r/pose_eval.py b/dynamic_predictor/dust3r/pose_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..9c97f397fa0243ccd460c370f63ac3241dc2c756 --- /dev/null +++ b/dynamic_predictor/dust3r/pose_eval.py @@ -0,0 +1,330 @@ +import os +import math +import cv2 +import numpy as np +import torch +from dust3r.utils.vo_eval import load_traj, eval_metrics, plot_trajectory, save_trajectory_tum_format, process_directory, calculate_averages +import croco.utils.misc as misc +import torch.distributed as dist +from tqdm import tqdm +from dust3r.cloud_opt import global_aligner, GlobalAlignerMode +from dust3r.utils.image import load_images, rgb, enlarge_seg_masks +from dust3r.image_pairs import make_pairs +from dust3r.inference import inference +# from dust3r.demo import get_3D_model_from_scene +import dust3r.eval_metadata +from dust3r.eval_metadata import dataset_metadata + + +def eval_pose_estimation(args, model, device, save_dir=None): + metadata = dataset_metadata.get(args.eval_dataset, dataset_metadata['sintel']) + img_path = metadata['img_path'] + mask_path = metadata['mask_path'] + + ate_mean, rpe_trans_mean, rpe_rot_mean, outfile_list, bug = eval_pose_estimation_dist( + args, model, device, save_dir=save_dir, img_path=img_path, mask_path=mask_path + ) + return ate_mean, rpe_trans_mean, rpe_rot_mean, outfile_list, bug + + + + + +def eval_pose_estimation_dist(args, model, device, img_path, save_dir=None, mask_path=None): + + metadata = dataset_metadata.get(args.eval_dataset, dataset_metadata['sintel']) + anno_path = metadata.get('anno_path', None) + + silent = args.silent + seq_list = args.seq_list + if seq_list is None: + if metadata.get('full_seq', False): + args.full_seq = True + else: + seq_list = metadata.get('seq_list', []) + if args.full_seq: + seq_list = os.listdir(img_path) + seq_list = [seq for seq in seq_list if os.path.isdir(os.path.join(img_path, seq))] + seq_list = sorted(seq_list) + + if save_dir is None: + save_dir = args.output_dir + + # Split seq_list across processes + if misc.is_dist_avail_and_initialized(): + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + + total_seqs = len(seq_list) + seqs_per_proc = (total_seqs + world_size - 1) // world_size # Ceiling division + + start_idx = rank * seqs_per_proc + end_idx = min(start_idx + seqs_per_proc, total_seqs) + + seq_list = seq_list[start_idx:end_idx] + + ate_list = [] + rpe_trans_list = [] + rpe_rot_list = [] + outfile_list = [] + load_img_size = 512 + + error_log_path = f'{save_dir}/_error_log_{rank}.txt' # Unique log file per process + bug = False + + for seq in tqdm(seq_list): + try: + dir_path = metadata['dir_path_func'](img_path, seq) + + # Handle skip_condition + skip_condition = metadata.get('skip_condition', None) + if skip_condition is not None and skip_condition(save_dir, seq): + continue + + mask_path_seq_func = metadata.get('mask_path_seq_func', lambda mask_path, seq: None) + mask_path_seq = mask_path_seq_func(mask_path, seq) + + filelist = [os.path.join(dir_path, name) for name in os.listdir(dir_path)] + filelist.sort() + if args.evaluate_davis: + filelist = filelist[:50] + filelist = filelist[::args.pose_eval_stride] + max_winsize = max(1, math.ceil((len(filelist)-1)/2)) + scene_graph_type = args.scene_graph_type + if int(scene_graph_type.split('-')[1]) > max_winsize: + scene_graph_type = f'{args.scene_graph_type.split("-")[0]}-{max_winsize}' + if len(scene_graph_type.split("-")) > 2: + scene_graph_type += f'-{args.scene_graph_type.split("-")[2]}' + imgs = load_images( + filelist, size=load_img_size, verbose=False, + dynamic_mask_root=mask_path_seq, crop=not args.no_crop + ) + print(f'Processing {seq} with {len(imgs)} images') + if args.eval_dataset == 'davis' and len(imgs) > 95: + # use swinstride-4 + scene_graph_type = scene_graph_type.replace('5', '4') + pairs = make_pairs( + imgs, scene_graph=scene_graph_type, prefilter=None, symmetrize=True + ) + # + + output = inference(pairs, model, device, batch_size=1, verbose=not silent) + + torch.cuda.empty_cache() + + with torch.enable_grad(): + if len(imgs) > 2: + mode = GlobalAlignerMode.PointCloudOptimizer + scene = global_aligner( + output, device=device, mode=mode, verbose=not silent, + shared_focal=not args.not_shared_focal and not args.use_gt_focal, + flow_loss_weight=args.flow_loss_weight, flow_loss_fn=args.flow_loss_fn, + depth_regularize_weight=args.depth_regularize_weight, + num_total_iter=args.n_iter, temporal_smoothing_weight=args.temporal_smoothing_weight, + motion_mask_thre=args.motion_mask_thre, + flow_loss_start_epoch=args.flow_loss_start_epoch, flow_loss_thre=args.flow_loss_thre, translation_weight=args.translation_weight, + sintel_ckpt=args.eval_dataset == 'sintel', use_gt_mask = args.use_gt_mask, use_pred_mask = args.use_pred_mask, sam2_mask_refine=args.sam2_mask_refine, + empty_cache=len(filelist) > 72, pxl_thre=args.pxl_thresh, batchify=not args.not_batchify + ) + if args.use_gt_focal: + focal_path = os.path.join( + img_path.replace('final', 'camdata_left'), seq, 'focal.txt' + ) + focals = np.loadtxt(focal_path) + focals = focals[::args.pose_eval_stride] + original_img_size = cv2.imread(filelist[0]).shape[:2] + resized_img_size = tuple(imgs[0]['img'].shape[-2:]) + focals = focals * max( + (resized_img_size[0] / original_img_size[0]), + (resized_img_size[1] / original_img_size[1]) + ) + scene.preset_focal(focals, requires_grad=False) # TODO: requires_grad=False + lr = 0.01 + loss = scene.compute_global_alignment( + init='mst', niter=args.n_iter, schedule=args.pose_schedule, lr=lr, + ) + else: + mode = GlobalAlignerMode.PairViewer + scene = global_aligner(output, device=device, mode=mode, verbose=not silent) + + if args.save_pose_qualitative: + outfile = get_3D_model_from_scene( + outdir=save_dir, silent=silent, scene=scene, min_conf_thr=2, as_pointcloud=True, mask_sky=False, + clean_depth=True, transparent_cams=False, cam_size=0.01, save_name=seq + ) + else: + outfile = None + pred_traj = scene.get_tum_poses() + + os.makedirs(f'{save_dir}/{seq}', exist_ok=True) + scene.clean_pointcloud() + scene.save_tum_poses(f'{save_dir}/{seq}/pred_traj.txt') + scene.save_focals(f'{save_dir}/{seq}/pred_focal.txt') + scene.save_intrinsics(f'{save_dir}/{seq}/pred_intrinsics.txt') + scene.save_depth_maps(f'{save_dir}/{seq}') + scene.save_dynamic_masks(f'{save_dir}/{seq}') + scene.save_dyna_maps(f'{save_dir}/{seq}') + scene.save_conf_maps(f'{save_dir}/{seq}') + scene.save_init_conf_maps(f'{save_dir}/{seq}') + scene.save_rgb_imgs(f'{save_dir}/{seq}') + enlarge_seg_masks(f'{save_dir}/{seq}', kernel_size=5 if args.use_gt_mask else 3) + + gt_traj_file = metadata['gt_traj_func'](img_path, anno_path, seq) + traj_format = metadata.get('traj_format', None) + + if args.eval_dataset == 'sintel': + gt_traj = load_traj(gt_traj_file=gt_traj_file, stride=args.pose_eval_stride) + elif traj_format is not None: + gt_traj = load_traj(gt_traj_file=gt_traj_file, traj_format=traj_format) + else: + gt_traj = None + + if gt_traj is not None: + ate, rpe_trans, rpe_rot = eval_metrics( + pred_traj, gt_traj, seq=seq, filename=f'{save_dir}/{seq}_eval_metric.txt' + ) + plot_trajectory( + pred_traj, gt_traj, title=seq, filename=f'{save_dir}/{seq}.png' + ) + else: + ate, rpe_trans, rpe_rot = 0, 0, 0 + outfile = None + bug = True + + ate_list.append(ate) + rpe_trans_list.append(rpe_trans) + rpe_rot_list.append(rpe_rot) + outfile_list.append(outfile) + + # Write to error log after each sequence + with open(error_log_path, 'a') as f: + f.write(f'{args.eval_dataset}-{seq: <16} | ATE: {ate:.5f}, RPE trans: {rpe_trans:.5f}, RPE rot: {rpe_rot:.5f}\n') + f.write(f'{ate:.5f}\n') + f.write(f'{rpe_trans:.5f}\n') + f.write(f'{rpe_rot:.5f}\n') + + except Exception as e: + if 'out of memory' in str(e): + # Handle OOM + torch.cuda.empty_cache() # Clear the CUDA memory + with open(error_log_path, 'a') as f: + f.write(f'OOM error in sequence {seq}, skipping this sequence.\n') + print(f'OOM error in sequence {seq}, skipping...') + elif 'Degenerate covariance rank' in str(e) or 'Eigenvalues did not converge' in str(e): + # Handle Degenerate covariance rank exception and Eigenvalues did not converge exception + with open(error_log_path, 'a') as f: + f.write(f'Exception in sequence {seq}: {str(e)}\n') + print(f'Traj evaluation error in sequence {seq}, skipping.') + else: + raise e # Rethrow if it's not an expected exception + + # Aggregate results across all processes + if misc.is_dist_avail_and_initialized(): + torch.distributed.barrier() + + bug_tensor = torch.tensor(int(bug), device=device) + + bug = bool(bug_tensor.item()) + + # Handle outfile_list + outfile_list_all = [None for _ in range(world_size)] + + outfile_list_combined = [] + for sublist in outfile_list_all: + if sublist is not None: + outfile_list_combined.extend(sublist) + + results = process_directory(save_dir) + avg_ate, avg_rpe_trans, avg_rpe_rot = calculate_averages(results) + + # Write the averages to the error log (only on the main process) + if rank == 0: + with open(f'{save_dir}/_error_log.txt', 'a') as f: + # Copy the error log from each process to the main error log + for i in range(world_size): + with open(f'{save_dir}/_error_log_{i}.txt', 'r') as f_sub: + f.write(f_sub.read()) + f.write(f'Average ATE: {avg_ate:.5f}, Average RPE trans: {avg_rpe_trans:.5f}, Average RPE rot: {avg_rpe_rot:.5f}\n') + + return avg_ate, avg_rpe_trans, avg_rpe_rot, outfile_list_combined, bug + + +def pose_estimation_custom(args, model, device, save_dir=None): + load_img_size = 512 + dir_path = args.dir_path + silent = args.silent + + + filelist = [os.path.join(dir_path, name) for name in os.listdir(dir_path)] + filelist.sort() + filelist = filelist[::args.pose_eval_stride] + max_winsize = max(1, math.ceil((len(filelist)-1)/2)) + scene_graph_type = args.scene_graph_type + if int(scene_graph_type.split('-')[1]) > max_winsize: + scene_graph_type = f'{args.scene_graph_type.split("-")[0]}-{max_winsize}' + if len(scene_graph_type.split("-")) > 2: + scene_graph_type += f'-{args.scene_graph_type.split("-")[2]}' + imgs = load_images( + filelist, size=load_img_size, verbose=False, crop=not args.no_crop + ) + print(f'Processing {args.dir_path} with {len(imgs)} images') + if len(imgs) > 95: + # use swinstride-4 + scene_graph_type = scene_graph_type.replace('5', '4') + pairs = make_pairs( + imgs, scene_graph=scene_graph_type, prefilter=None, symmetrize=True + ) + + output = inference(pairs, model, device, batch_size=1, verbose=not silent) + + torch.cuda.empty_cache() + + with torch.enable_grad(): + if len(imgs) > 2: + mode = GlobalAlignerMode.PointCloudOptimizer + scene = global_aligner( + output, device=device, mode=mode, verbose=not silent, + shared_focal=not args.not_shared_focal and not args.use_gt_focal, + flow_loss_weight=args.flow_loss_weight, flow_loss_fn=args.flow_loss_fn, + depth_regularize_weight=args.depth_regularize_weight, + num_total_iter=args.n_iter, temporal_smoothing_weight=args.temporal_smoothing_weight, + motion_mask_thre=args.motion_mask_thre, + flow_loss_start_epoch=args.flow_loss_start_epoch, flow_loss_thre=args.flow_loss_thre, translation_weight=args.translation_weight, + sintel_ckpt=args.eval_dataset == 'sintel', use_gt_mask = args.use_gt_mask, use_pred_mask = args.use_pred_mask, sam2_mask_refine=args.sam2_mask_refine, + empty_cache=len(filelist) > 72, pxl_thre=args.pxl_thresh, batchify=not args.not_batchify + ) + if args.use_gt_focal: + focal_path = args.focal_path + focals = np.loadtxt(focal_path) + focals = focals[::args.pose_eval_stride] + original_img_size = cv2.imread(filelist[0]).shape[:2] + resized_img_size = tuple(imgs[0]['img'].shape[-2:]) + focals = focals * max( + (resized_img_size[0] / original_img_size[0]), + (resized_img_size[1] / original_img_size[1]) + ) + scene.preset_focal(focals, requires_grad=False) # TODO: requires_grad=False + lr = 0.01 + loss = scene.compute_global_alignment( + init='mst', niter=args.n_iter, schedule=args.pose_schedule, lr=lr, + ) + else: + mode = GlobalAlignerMode.PairViewer + scene = global_aligner(output, device=device, mode=mode, verbose=not silent) + + + os.makedirs(f'{save_dir}', exist_ok=True) + scene.clean_pointcloud() + scene.save_tum_poses(f'{save_dir}/pred_traj.txt') + scene.save_focals(f'{save_dir}/pred_focal.txt') + scene.save_intrinsics(f'{save_dir}/pred_intrinsics.txt') + scene.save_depth_maps(f'{save_dir}') + scene.save_dynamic_masks(f'{save_dir}') + scene.save_dyna_maps(f'{save_dir}') + scene.save_conf_maps(f'{save_dir}') + scene.save_init_conf_maps(f'{save_dir}') + scene.save_rgb_imgs(f'{save_dir}') + # enlarge_seg_masks(f'{save_dir}', kernel_size=5 if args.use_gt_mask else 3) diff --git a/dynamic_predictor/dust3r/post_process.py b/dynamic_predictor/dust3r/post_process.py new file mode 100644 index 0000000000000000000000000000000000000000..550a9b41025ad003228ef16f97d045fc238746e4 --- /dev/null +++ b/dynamic_predictor/dust3r/post_process.py @@ -0,0 +1,60 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# utilities for interpreting the DUST3R output +# -------------------------------------------------------- +import numpy as np +import torch +from dust3r.utils.geometry import xy_grid + + +def estimate_focal_knowing_depth(pts3d, pp, focal_mode='median', min_focal=0., max_focal=np.inf): + """ Reprojection method, for when the absolute depth is known: + 1) estimate the camera focal using a robust estimator + 2) reproject points onto true rays, minimizing a certain error + """ + B, H, W, THREE = pts3d.shape + assert THREE == 3 + + # centered pixel grid + pixels = xy_grid(W, H, device=pts3d.device).view(1, -1, 2) - pp.view(-1, 1, 2) # B,HW,2 + pts3d = pts3d.flatten(1, 2) # (B, HW, 3) + + if focal_mode == 'median': + with torch.no_grad(): + # direct estimation of focal + u, v = pixels.unbind(dim=-1) + x, y, z = pts3d.unbind(dim=-1) + fx_votes = (u * z) / x + fy_votes = (v * z) / y + + # assume square pixels, hence same focal for X and Y + f_votes = torch.cat((fx_votes.view(B, -1), fy_votes.view(B, -1)), dim=-1) + focal = torch.nanmedian(f_votes, dim=-1).values + + elif focal_mode == 'weiszfeld': + # init focal with l2 closed form + # we try to find focal = argmin Sum | pixel - focal * (x,y)/z| + xy_over_z = (pts3d[..., :2] / pts3d[..., 2:3]).nan_to_num(posinf=0, neginf=0) # homogeneous (x,y,1) + + dot_xy_px = (xy_over_z * pixels).sum(dim=-1) + dot_xy_xy = xy_over_z.square().sum(dim=-1) + + focal = dot_xy_px.mean(dim=1) / dot_xy_xy.mean(dim=1) + + # iterative re-weighted least-squares + for iter in range(10): + # re-weighting by inverse of distance + dis = (pixels - focal.view(-1, 1, 1) * xy_over_z).norm(dim=-1) + # print(dis.nanmean(-1)) + w = dis.clip(min=1e-8).reciprocal() + # update the scaling with the new weights + focal = (w * dot_xy_px).mean(dim=1) / (w * dot_xy_xy).mean(dim=1) + else: + raise ValueError(f'bad {focal_mode=}') + + focal_base = max(H, W) / (2 * np.tan(np.deg2rad(60) / 2)) # size / 1.1547005383792515 + focal = focal.clip(min=min_focal*focal_base, max=max_focal*focal_base) + # print(focal) + return focal diff --git a/dynamic_predictor/dust3r/training.py b/dynamic_predictor/dust3r/training.py new file mode 100644 index 0000000000000000000000000000000000000000..f4747e728d19ab55f45f0535bf76dd03ae6353d7 --- /dev/null +++ b/dynamic_predictor/dust3r/training.py @@ -0,0 +1,556 @@ +# -------------------------------------------------------- +# training code for DUSt3R +# -------------------------------------------------------- +import os +os.environ['OMP_NUM_THREADS'] = '4' # will affect the performance of pairwise prediction +import argparse +import datetime +import json +import numpy as np +import sys +import time +import math +import wandb +from collections import defaultdict +from pathlib import Path +from typing import Sized + +import torch +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter +torch.backends.cuda.matmul.allow_tf32 = True # for gpu >= Ampere and pytorch >= 1.12 + +from dust3r.model import AsymmetricCroCo3DStereo, inf # noqa: F401, needed when loading the model +from dust3r.datasets import get_data_loader # noqa +from dust3r.losses import * # noqa: F401, needed when loading the model +from dust3r.inference import loss_of_one_batch, visualize_results, visualize_results_mmask # noqa + +from dust3r.pose_eval import eval_pose_estimation +from dust3r.depth_eval import eval_mono_depth_estimation + +# from demo import get_3D_model_from_scene +import dust3r.utils.path_to_croco # noqa: F401 +import croco.utils.misc as misc # noqa +from croco.utils.misc import NativeScalerWithGradNormCount as NativeScaler # noqa +import PIL.Image as Image +from dust3r.cloud_opt.motion_mask_from_raft import get_motion_mask_from_pairs + +def get_args_parser(): + parser = argparse.ArgumentParser('DUST3R training', add_help=False) + # model and criterion + parser.add_argument('--model', default="AsymmetricCroCo3DStereo(pos_embed='RoPE100', patch_embed_cls='ManyAR_PatchEmbed', \ + img_size=(512, 512), head_type='dpt', output_mode='pts3d', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), \ + enc_embed_dim=1024, enc_depth=24, enc_num_heads=16, dec_embed_dim=768, dec_depth=12, dec_num_heads=12, freeze='encoder')", + type=str, help="string containing the model to build") + parser.add_argument('--pretrained', default=None, help='path of a starting checkpoint') + parser.add_argument('--train_criterion', default="ConfLoss(Regr3D(L21, norm_mode='avg_dis'), alpha=0.2)", + type=str, help="train criterion") + parser.add_argument('--test_criterion', default=None, type=str, help="test criterion") + + # dataset + parser.add_argument('--train_dataset', default='[None]', type=str, help="training set") + parser.add_argument('--test_dataset', default='[None]', type=str, help="testing set") + + # training + parser.add_argument('--seed', default=0, type=int, help="Random seed") + parser.add_argument('--batch_size', default=64, type=int, + help="Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus") + parser.add_argument('--test_batch_size', default=64, type=int, + help="Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus") + + parser.add_argument('--accum_iter', default=1, type=int, + help="Accumulate gradient iterations (for increasing the effective batch size under memory constraints)") + parser.add_argument('--epochs', default=800, type=int, help="Maximum number of epochs for the scheduler") + parser.add_argument('--weight_decay', type=float, default=0.05, help="weight decay (default: 0.05)") + parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)') + parser.add_argument('--blr', type=float, default=1.5e-4, metavar='LR', + help='base learning rate: absolute_lr = base_lr * total_batch_size / 256') + parser.add_argument('--min_lr', type=float, default=0., metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0') + parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', help='epochs to warmup LR') + parser.add_argument('--amp', type=int, default=0, + choices=[0, 1], help="Use Automatic Mixed Precision for pretraining") + parser.add_argument("--cudnn_benchmark", action='store_true', default=False, + help="set cudnn.benchmark = False") + parser.add_argument("--eval_only", action='store_true', default=False) + parser.add_argument("--fixed_eval_set", action='store_true', default=False) + parser.add_argument('--resume', default=None, type=str, help='path to latest checkpoint (default: none)') + + # others + parser.add_argument('--num_workers', default=8, type=int) + parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') + parser.add_argument('--eval_freq', type=int, default=5, help='Test loss evaluation frequency') + parser.add_argument('--save_freq', default=1, type=int, + help='frequence (number of epochs) to save checkpoint in checkpoint-last.pth') + parser.add_argument('--keep_freq', default=5, type=int, + help='frequence (number of epochs) to save checkpoint in checkpoint-%d.pth') + parser.add_argument('--print_freq', default=20, type=int, + help='frequence (number of iterations) to print infos while training') + parser.add_argument('--wandb', action='store_true', default=False, help='use wandb for logging') + parser.add_argument('--num_save_visual', default=4, type=int, help='number of visualizations to save') + + # switch mode for train / eval pose / eval depth + parser.add_argument('--mode', default='train', type=str, help='train / eval_pose / eval_depth') + + # for pose eval + + parser.add_argument('--threshold', default=0.5, type=float, help='threshold for motion mask') + parser.add_argument('--pose_eval_freq', default=25, type=int, help='pose evaluation frequency') + parser.add_argument('--pose_eval_stride', default=1, type=int, help='stride for pose evaluation') + parser.add_argument('--scene_graph_type', default='swinstride-5-noncyclic', type=str, help='scene graph window size') + parser.add_argument('--save_best_pose', action='store_true', default=False, help='save best pose') + parser.add_argument('--n_iter', default=300, type=int, help='number of iterations for pose optimization') + parser.add_argument('--save_pose_qualitative', action='store_true', default=False, help='save qualitative pose results') + parser.add_argument('--temporal_smoothing_weight', default=0.01, type=float, help='temporal smoothing weight for pose optimization') + parser.add_argument('--not_shared_focal', action='store_true', default=False, help='use shared focal length for pose optimization') + parser.add_argument('--use_gt_focal', action='store_true', default=False, help='use ground truth focal length for pose optimization') + parser.add_argument('--pose_schedule', default='linear', type=str, help='pose optimization schedule') + parser.add_argument('--flow_loss_weight', default=0.01, type=float, help='flow loss weight for pose optimization') + parser.add_argument('--cananical_space_loss_weight', default=1, type=float, help='cananical_space_loss_weight for pose optimization') + parser.add_argument('--flow_loss_fn', default='smooth_l1', type=str, help='flow loss type for pose optimization') + parser.add_argument('--use_gt_mask', action='store_true', default=False, help='use gt mask for pose optimization, for sintel/davis') + + parser.add_argument('--use_pred_mask', action='store_true', default=False, help='use nn predicted mask for pose optimization') + parser.add_argument('--evaluate_davis', action='store_true', default=False, help='evaluate davis on first 50 frames') + parser.add_argument('--not_batchify', action='store_true', default=False, help='Use non batchify mode for global optimization') + parser.add_argument('--dir_path', type=str, help='path to custom dataset for pose evaluation') + + + parser.add_argument('--motion_mask_thre', default=0.35, type=float, help='motion mask threshold for pose optimization') + parser.add_argument('--sam2_mask_refine', action='store_true', default=False, help='use sam2 mask refine for the motion for pose optimization') + parser.add_argument('--flow_loss_start_epoch', default=0.1, type=float, help='start epoch for flow loss') + parser.add_argument('--flow_loss_thre', default=20, type=float, help='threshold for flow loss') + parser.add_argument('--pxl_thresh', default=50.0, type=float, help='threshold for flow loss') + parser.add_argument('--depth_regularize_weight', default=0.0, type=float, help='depth regularization weight for pose optimization') + parser.add_argument('--translation_weight', default=1, type=float, help='translation weight for pose optimization') + parser.add_argument('--silent', action='store_true', default=False, help='silent mode for pose evaluation') + parser.add_argument('--full_seq', action='store_true', default=False, help='use full sequence for pose evaluation') + parser.add_argument('--seq_list', nargs='+', default=None, help='list of sequences for pose evaluation') + + + parser.add_argument('--eval_dataset', type=str, default='sintel', + choices=['davis', 'kitti', 'bonn', 'scannet', 'tum', 'nyu', 'sintel'], + help='choose dataset for pose evaluation') + + # for monocular depth eval + parser.add_argument('--no_crop', action='store_true', default=False, help='do not crop the image for monocular depth evaluation') + + # output dir + parser.add_argument('--output_dir', default='./results/tmp', type=str, help="path where to save the output") + return parser + +def load_model(args, device): + # model + print('Loading model: {:s}'.format(args.model)) + + model = eval(args.model) + + if args.pretrained and not args.resume: + if os.path.isfile(args.pretrained): + # load from pth file + print('Loading pretrained: ', args.pretrained) + ckpt = torch.load(args.pretrained, map_location=device, weights_only=False) + print(model.load_state_dict(ckpt['model'], strict=False)) + del ckpt # in case it occupies memory + + else: + # load from huggingface + print('Loading pretrained from huggingface: ', args.pretrained) + model = model.from_pretrained(args.pretrained) + + model.to(device) + model_without_ddp = model + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.gpu], find_unused_parameters=True, static_graph=True) + model_without_ddp = model.module + + return model, model_without_ddp + +def train(args): + misc.init_distributed_mode(args) + global_rank = misc.get_rank() + world_size = misc.get_world_size() + # if main process, init wandb + if args.wandb and misc.is_main_process(): + wandb.init(name=args.output_dir.split('/')[-1], + project='dust3r', + config=args, + sync_tensorboard=True, + dir=args.output_dir) + + print("output_dir: " + args.output_dir) + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + + # auto resume if not specified + if args.resume is None: + last_ckpt_fname = os.path.join(args.output_dir, f'checkpoint-last.pth') + if os.path.isfile(last_ckpt_fname) and (not args.eval_only): args.resume = last_ckpt_fname + + print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + + device = "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # fix the seed + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = args.cudnn_benchmark + model, model_without_ddp = load_model(args, device) + + if not args.eval_only: + # training dataset and loader + print('Building train dataset {:s}'.format(args.train_dataset)) + # dataset and loader + data_loader_train = build_dataset(args.train_dataset, args.batch_size, args.num_workers, test=False) + print(f'>> Creating train criterion = {args.train_criterion}') + train_criterion = eval(args.train_criterion).to(device) + + print('Building test dataset {:s}'.format(args.train_dataset)) + data_loader_test = {} + for dataset in args.test_dataset.split('+'): + testset = build_dataset(dataset, args.test_batch_size, args.num_workers, test=True) + name_testset = dataset.split('(')[0] + if getattr(testset.dataset.dataset, 'strides', None) is not None: + name_testset += f'_stride{testset.dataset.dataset.strides}' + data_loader_test[name_testset] = testset + print(f'>> Creating test criterion = {args.test_criterion or args.train_criterion}') + test_criterion = eval(args.test_criterion or args.criterion).to(device) + + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + if args.lr is None: # only base_lr is specified + args.lr = args.blr * eff_batch_size / 256 + print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) + print("actual lr: %.2e" % args.lr) + print("accumulate grad iterations: %d" % args.accum_iter) + print("effective batch size: %d" % eff_batch_size) + + # following timm: set wd as 0 for bias and norm layers + param_groups = misc.get_parameter_groups(model_without_ddp, args.weight_decay) + optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95)) + # print(optimizer) + loss_scaler = NativeScaler() + + def write_log_stats(epoch, train_stats, test_stats): + if misc.is_main_process(): + if log_writer is not None: + log_writer.flush() + gathered_test_stats = {} + log_stats = dict(epoch=epoch, **{f'train_{k}': v for k, v in train_stats.items()}) + + for test_name, testset in data_loader_test.items(): + + if test_name not in test_stats: + continue + + if getattr(testset.dataset.dataset, 'strides', None) is not None: + original_test_name = test_name.split('_stride')[0] + if original_test_name not in gathered_test_stats.keys(): + gathered_test_stats[original_test_name] = [] + gathered_test_stats[original_test_name].append(test_stats[test_name]) + + log_stats.update({test_name + '_' + k: v for k, v in test_stats[test_name].items()}) + + if len(gathered_test_stats) > 0: + for original_test_name, stride_stats in gathered_test_stats.items(): + if len(stride_stats) > 1: + stride_stats = {k: np.mean([x[k] for x in stride_stats]) for k in stride_stats[0]} + log_stats.update({original_test_name + '_stride_mean_' + k: v for k, v in stride_stats.items()}) + if args.wandb: + log_dict = {original_test_name + '_stride_mean_' + k: v for k, v in stride_stats.items()} + log_dict.update({'epoch': epoch}) + wandb.log(log_dict) + + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + def save_model(epoch, fname, best_so_far, best_pose_ate_sofar=None): + misc.save_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch, fname=fname, best_so_far=best_so_far, best_pose_ate_sofar=best_pose_ate_sofar) + + best_so_far, best_pose_ate_sofar = misc.load_model(args=args, model_without_ddp=model_without_ddp, + optimizer=optimizer, loss_scaler=loss_scaler) + if best_so_far is None: + best_so_far = float('inf') + if best_pose_ate_sofar is None: + best_pose_ate_sofar = float('inf') + if global_rank == 0 and args.output_dir is not None: + log_writer = SummaryWriter(log_dir=args.output_dir) + else: + log_writer = None + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + train_stats = test_stats = {} + for epoch in range(args.start_epoch, args.epochs + 1): + + # Test on multiple datasets + new_best = False + new_pose_best = False + already_saved = False + if (epoch > args.start_epoch and args.eval_freq > 0 and epoch % args.eval_freq == 0) or args.eval_only: + test_stats = {} + for test_name, testset in data_loader_test.items(): + print(f'Testing on {test_name}...') + stats = test_one_epoch(model, test_criterion, testset, + device, epoch, log_writer=log_writer, args=args, prefix=test_name) + test_stats[test_name] = stats + + # Save best of all + if stats['loss_med'] < best_so_far: + best_so_far = stats['loss_med'] + new_best = True + # Ensure that eval_pose_estimation is only run on the main process + if args.pose_eval_freq>0 and (epoch % args.pose_eval_freq==0 or args.eval_only): + ate_mean, rpe_trans_mean, rpe_rot_mean, outfile_list, bug = eval_pose_estimation(args, model, device, save_dir=f'{args.output_dir}/{epoch}') + print(f'ATE mean: {ate_mean}, RPE trans mean: {rpe_trans_mean}, RPE rot mean: {rpe_rot_mean}') + + # Optionally log the results to wandb + if args.wandb and misc.is_main_process(): + wandb_dict = { + 'epoch': epoch, + 'ATE mean': ate_mean, + 'RPE trans mean': rpe_trans_mean, + 'RPE rot mean': rpe_rot_mean, + } + if args.save_pose_qualitative: + for outfile in outfile_list: + wandb_dict[outfile.split('/')[-1]] = wandb.Object3D(open(outfile)) + + wandb.log(wandb_dict) + + if ate_mean < best_pose_ate_sofar and not bug: # if the pose estimation is better, and w/o any error + best_pose_ate_sofar = ate_mean + new_pose_best = True + + # Synchronize all processes to ensure eval_pose_estimation is completed + try: + torch.distributed.barrier() + except: + pass + + # Save more stuff + write_log_stats(epoch, train_stats, test_stats) + + if args.eval_only: + exit(0) + + if epoch > args.start_epoch: + if args.keep_freq and epoch % args.keep_freq == 0: + save_model(epoch - 1, str(epoch), best_so_far, best_pose_ate_sofar) + already_saved = True + if new_best: + save_model(epoch - 1, 'best', best_so_far, best_pose_ate_sofar) + already_saved = True + if new_pose_best and args.save_best_pose: + save_model(epoch - 1, 'best_pose', best_so_far, best_pose_ate_sofar) + already_saved = True + + # Save immediately the last checkpoint + if epoch > args.start_epoch: + save_model(epoch - 1, 'last', best_so_far, best_pose_ate_sofar) + + if epoch >= args.epochs: + break # exit after writing last test to disk + + # Train + train_stats = train_one_epoch( + model, train_criterion, data_loader_train, + optimizer, device, epoch, loss_scaler, + log_writer=log_writer, + args=args) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + save_final_model(args, args.epochs, model_without_ddp, best_so_far=best_so_far) + + +def save_final_model(args, epoch, model_without_ddp, best_so_far=None): + output_dir = Path(args.output_dir) + checkpoint_path = output_dir / 'checkpoint-final.pth' + to_save = { + 'args': args, + 'model': model_without_ddp if isinstance(model_without_ddp, dict) else model_without_ddp.cpu().state_dict(), + 'epoch': epoch + } + if best_so_far is not None: + to_save['best_so_far'] = best_so_far + print(f'>> Saving model to {checkpoint_path} ...') + misc.save_on_master(to_save, checkpoint_path) + + +def build_dataset(dataset, batch_size, num_workers, test=False): + split = ['Train', 'Test'][test] + print(f'Building {split} Data loader for dataset: ', dataset) + loader = get_data_loader(dataset, + batch_size=batch_size, + num_workers=num_workers, + pin_mem=True, + shuffle=not (test), + drop_last=not (test)) + + print(f"{split} dataset length: ", len(loader)) + return loader + + +def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, + data_loader: Sized, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, + args, + log_writer=None): + assert torch.backends.cuda.matmul.allow_tf32 == True + + model.train(True) + metric_logger = misc.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + accum_iter = args.accum_iter + + if log_writer is not None: + print('log_dir: {}'.format(log_writer.log_dir)) + + if hasattr(data_loader, 'dataset') and hasattr(data_loader.dataset, 'set_epoch'): + data_loader.dataset.set_epoch(epoch) + if hasattr(data_loader, 'sampler') and hasattr(data_loader.sampler, 'set_epoch'): + data_loader.sampler.set_epoch(epoch) + + optimizer.zero_grad() + + for data_iter_step, batch in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)): + epoch_f = epoch + data_iter_step / len(data_loader) + + # we use a per iteration (instead of per epoch) lr scheduler + if data_iter_step % accum_iter == 0: + misc.adjust_learning_rate(optimizer, epoch_f, args) + + batch_result = loss_of_one_batch(batch, model, criterion, device, + symmetrize_batch=True, + use_amp=bool(args.amp)) + loss, loss_details = batch_result['loss'] # criterion returns two values + loss_value = float(loss) + + if (data_iter_step % max((len(data_loader) // args.num_save_visual), 1) == 0 or data_iter_step == 0) and misc.is_main_process() : + print(f'Saving visualizations for data_iter_step {data_iter_step}...') + save_dir = f'{args.output_dir}/{epoch}' + Path(save_dir).mkdir(parents=True, exist_ok=True) + view1, view2, pred1, pred2 = batch_result['view1'], batch_result['view2'], batch_result['pred1'], batch_result['pred2'] + gt_rgb_mmask1, gt_rgb_mmask2 = visualize_results_mmask(view1, view2, pred1, pred2, save_dir=save_dir, visualize_type='gt') + pred_rgb_mmask1, pred_rgb_mmask2 = visualize_results_mmask(view1, view2, pred1, pred2, save_dir=save_dir, visualize_type='pred') + if args.wandb: + wandb.log({ + 'epoch': epoch, + + 'train_gt_mmask_1': wandb.Image(gt_rgb_mmask1), + 'train_gt_mmask_2': wandb.Image(gt_rgb_mmask2), + + 'train_pred_mmask_1': wandb.Image(pred_rgb_mmask1), + 'train_pred_mmask_2': wandb.Image(pred_rgb_mmask2) + }) + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value), force=True) + sys.exit(1) + + loss /= accum_iter + loss_scaler(loss, optimizer, parameters=model.parameters(), + update_grad=(data_iter_step + 1) % accum_iter == 0) + if (data_iter_step + 1) % accum_iter == 0: + optimizer.zero_grad() + + del loss + del batch + + lr = optimizer.param_groups[0]["lr"] + metric_logger.update(epoch=epoch_f) + metric_logger.update(lr=lr) + metric_logger.update(loss=loss_value, **loss_details) + + if (data_iter_step + 1) % accum_iter == 0 and ((data_iter_step + 1) % (accum_iter * args.print_freq)) == 0: + loss_value_reduce = misc.all_reduce_mean(loss_value) # MUST BE EXECUTED BY ALL NODES + if log_writer is None: + continue + """ We use epoch_1000x as the x-axis in tensorboard. + This calibrates different curves when batch size changes. + """ + epoch_1000x = int(epoch_f * 1000) + log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x) + log_writer.add_scalar('train_lr', lr, epoch_1000x) + log_writer.add_scalar('train_iter', epoch_1000x, epoch_1000x) + for name, val in loss_details.items(): + log_writer.add_scalar('train_' + name, val, epoch_1000x) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +@torch.no_grad() +def test_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, + data_loader: Sized, device: torch.device, epoch: int, + args, log_writer=None, prefix='test'): + + model.eval() + metric_logger = misc.MetricLogger(delimiter=" ") + metric_logger.meters = defaultdict(lambda: misc.SmoothedValue(window_size=9**9)) + header = 'Test Epoch: [{}]'.format(epoch) + + if log_writer is not None: + print('log_dir: {}'.format(log_writer.log_dir)) + + if hasattr(data_loader, 'dataset') and hasattr(data_loader.dataset, 'set_epoch'): + data_loader.dataset.set_epoch(epoch) if not args.fixed_eval_set else data_loader.dataset.set_epoch(0) + if hasattr(data_loader, 'sampler') and hasattr(data_loader.sampler, 'set_epoch'): + data_loader.sampler.set_epoch(epoch) if not args.fixed_eval_set else data_loader.sampler.set_epoch(0) + + for idx, batch in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)): + + batch_result = loss_of_one_batch(batch, model, criterion, device, + symmetrize_batch=True, + use_amp=bool(args.amp)) + loss_tuple = batch_result['loss'] + loss_value, loss_details = loss_tuple # criterion returns two values + metric_logger.update(loss=float(loss_value), **loss_details) + + if args.num_save_visual>0 and (idx % max((len(data_loader) // args.num_save_visual), 1) == 0) and misc.is_main_process() : # save visualizations + + save_dir = f'{args.output_dir}/{epoch}' + Path(save_dir).mkdir(parents=True, exist_ok=True) + view1, view2, pred1, pred2 = batch_result['view1'], batch_result['view2'], batch_result['pred1'], batch_result['pred2'] + + gt_rgb_mmask1, gt_rgb_mmask2 = visualize_results_mmask(view1, view2, pred1, pred2, save_dir=save_dir, visualize_type='gt') + pred_rgb_mmask1, pred_rgb_mmask2 = visualize_results_mmask(view1, view2, pred1, pred2, save_dir=save_dir, visualize_type='pred') + if args.wandb: + wandb.log({ + 'epoch': epoch, + f'{prefix}_test_gt_mmask_1': wandb.Image(gt_rgb_mmask1), + f'{prefix}_test_gt_mmask_2': wandb.Image(gt_rgb_mmask2), + + f'{prefix}_test_pred_mmask_1': wandb.Image(pred_rgb_mmask1), + f'{prefix}_test_pred_mmask_2': wandb.Image(pred_rgb_mmask2) + }) + + + + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + + aggs = [('avg', 'global_avg'), ('med', 'median')] + results = {f'{k}_{tag}': getattr(meter, attr) for k, meter in metric_logger.meters.items() for tag, attr in aggs} + + if log_writer is not None: + for name, val in results.items(): + log_writer.add_scalar(prefix + '_' + name, val, 1000 * epoch) + + return results diff --git a/dynamic_predictor/dust3r/utils/__init__.py b/dynamic_predictor/dust3r/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a32692113d830ddc4af4e6ed608f222fbe062e6e --- /dev/null +++ b/dynamic_predictor/dust3r/utils/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). diff --git a/dynamic_predictor/dust3r/utils/device.py b/dynamic_predictor/dust3r/utils/device.py new file mode 100644 index 0000000000000000000000000000000000000000..e3b6a74dac05a2e1ba3a2b2f0faa8cea08ece745 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/device.py @@ -0,0 +1,76 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# utilitary functions for DUSt3R +# -------------------------------------------------------- +import numpy as np +import torch + + +def todevice(batch, device, callback=None, non_blocking=False): + ''' Transfer some variables to another device (i.e. GPU, CPU:torch, CPU:numpy). + + batch: list, tuple, dict of tensors or other things + device: pytorch device or 'numpy' + callback: function that would be called on every sub-elements. + ''' + if callback: + batch = callback(batch) + + if isinstance(batch, dict): + return {k: todevice(v, device) for k, v in batch.items()} + + if isinstance(batch, (tuple, list)): + return type(batch)(todevice(x, device) for x in batch) + + x = batch + if device == 'numpy': + if isinstance(x, torch.Tensor): + x = x.detach().cpu().numpy() + elif x is not None: + if isinstance(x, np.ndarray): + x = torch.from_numpy(x) + if torch.is_tensor(x): + x = x.to(device, non_blocking=non_blocking) + return x + + +to_device = todevice # alias + + +def to_numpy(x): return todevice(x, 'numpy') +def to_cpu(x): return todevice(x, 'cpu') +def to_cuda(x): return todevice(x, 'cuda') + + +def collate_with_cat(whatever, lists=False): + if isinstance(whatever, dict): + return {k: collate_with_cat(vals, lists=lists) for k, vals in whatever.items()} + + elif isinstance(whatever, (tuple, list)): + if len(whatever) == 0: + return whatever + elem = whatever[0] + T = type(whatever) + + if elem is None: + return None + if isinstance(elem, (bool, float, int, str)): + return whatever + if isinstance(elem, tuple): + return T(collate_with_cat(x, lists=lists) for x in zip(*whatever)) + if isinstance(elem, dict): + return {k: collate_with_cat([e[k] for e in whatever], lists=lists) for k in elem} + + if isinstance(elem, torch.Tensor): + return listify(whatever) if lists else torch.cat(whatever) + if isinstance(elem, np.ndarray): + return listify(whatever) if lists else torch.cat([torch.from_numpy(x) for x in whatever]) + + # otherwise, we just chain lists + return sum(whatever, T()) + + +def listify(elems): + return [x for e in elems for x in e] diff --git a/dynamic_predictor/dust3r/utils/flow_vis.py b/dynamic_predictor/dust3r/utils/flow_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..e3d97f274650633a339ddcdf7fda7284608af449 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/flow_vis.py @@ -0,0 +1,184 @@ +import numpy as np +import cv2 +from functools import wraps +from matplotlib import pyplot as plt +import torch + +MAX_VALUES_BY_DTYPE = { + np.dtype('uint8'): 255, + np.dtype('uint16'): 65535, + np.dtype('uint32'): 4294967295, + np.dtype('float32'): 1.0, +} + +UNKNOWN_FLOW_THRESH = 1e7 +SMALLFLOW = 0.0 +LARGEFLOW = 1e8 + +def flow2rgb(flow_map, max_value): + if isinstance(flow_map,np.ndarray): + if flow_map.shape[2] == 2: + flow_map = flow_map.transpose(2,0, 1) + flow_map_np = flow_map + else: + if flow_map.shape[2] == 2: + # shape is HxWx2 + flow_map = flow_map.permute(2, 0, 1) + flow_map_np = flow_map.detach().cpu().numpy() + _, h, w = flow_map_np.shape + flow_map_np[:,(flow_map_np[0] == 0) & (flow_map_np[1] == 0)] = float('nan') + rgb_map = np.ones((3,h,w)).astype(np.float32) + if max_value is not None: + normalized_flow_map = flow_map_np / max_value + else: + normalized_flow_map = flow_map_np / (np.abs(flow_map_np).max()) + rgb_map[0] += normalized_flow_map[0] + rgb_map[1] -= 0.5*(normalized_flow_map[0] + normalized_flow_map[1]) + rgb_map[2] += normalized_flow_map[1] + return rgb_map.clip(0,1) + + +def flow_to_image(flow, maxrad=None): + """ + Convert flow into middlebury color code image + :param flow: optical flow map + :return: optical flow image in middlebury color + """ + h,w, _ = flow.shape + u = flow[:, :, 0] + v = flow[:, :, 1] + + maxu = -999. + maxv = -999. + minu = 999. + minv = 999. + + idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH) + u[idxUnknow] = 0 + v[idxUnknow] = 0 + + if maxrad is None: + rad = np.sqrt(u ** 2 + v ** 2) + maxrad = max(-1, np.max(rad)) + + #print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv)) + + u = u/(maxrad + np.finfo(float).eps) + v = v/(maxrad + np.finfo(float).eps) + + img = compute_color(u, v) + + idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2) + img[idx] = 0 + valid = np.ones((h,w), np.uint8) + valid[np.logical_and(u == 0 , v == 0)] = 0 + return np.uint8(img)*np.expand_dims(valid, axis=2) + + +def show_flow(flow): + """ + visualize optical flow map using matplotlib + :param filename: optical flow file + :return: None + """ + img = flow_to_image(flow) + plt.imshow(img) + plt.show() + + return img + + +def compute_color(u, v): + """ + compute optical flow color map + :param u: optical flow horizontal map + :param v: optical flow vertical map + :return: optical flow in color code + """ + [h, w] = u.shape + img = np.zeros([h, w, 3]) + nanIdx = np.isnan(u) | np.isnan(v) + u[nanIdx] = 0 + v[nanIdx] = 0 + + colorwheel = make_color_wheel() + ncols = np.size(colorwheel, 0) + + rad = np.sqrt(u**2+v**2) + + a = np.arctan2(-v, -u) / np.pi + + fk = (a+1) / 2 * (ncols - 1) + 1 + + k0 = np.floor(fk).astype(int) + + k1 = k0 + 1 + k1[k1 == ncols+1] = 1 + f = fk - k0 + + for i in range(0, np.size(colorwheel,1)): + tmp = colorwheel[:, i] + col0 = tmp[k0-1] / 255 + col1 = tmp[k1-1] / 255 + col = (1-f) * col0 + f * col1 + + idx = rad <= 1 + col[idx] = 1-rad[idx]*(1-col[idx]) + notidx = np.logical_not(idx) + + col[notidx] *= 0.75 + img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx))) + + return img + + +def make_color_wheel(): + """ + Generate color wheel according Middlebury color code + :return: Color wheel + """ + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + + colorwheel = np.zeros([ncols, 3]) + + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY)) + col += RY + + # YG + colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG)) + colorwheel[col:col+YG, 1] = 255 + col += YG + + # GC + colorwheel[col:col+GC, 1] = 255 + colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC)) + col += GC + + # CB + colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB)) + colorwheel[col:col+CB, 2] = 255 + col += CB + + # BM + colorwheel[col:col+BM, 2] = 255 + colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM)) + col += + BM + + # MR + colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR)) + colorwheel[col:col+MR, 0] = 255 + + return colorwheel + + diff --git a/dynamic_predictor/dust3r/utils/geometry.py b/dynamic_predictor/dust3r/utils/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..7224728d98e8b992efb15dacb3123e6760adbd5b --- /dev/null +++ b/dynamic_predictor/dust3r/utils/geometry.py @@ -0,0 +1,370 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# geometry utilitary functions +# -------------------------------------------------------- +import torch +import numpy as np +from scipy.spatial import cKDTree as KDTree + +from dust3r.utils.misc import invalid_to_zeros, invalid_to_nans +from dust3r.utils.device import to_numpy + + +def xy_grid(W, H, device=None, origin=(0, 0), unsqueeze=None, cat_dim=-1, homogeneous=False, **arange_kw): + """ Output a (H,W,2) array of int32 + with output[j,i,0] = i + origin[0] + output[j,i,1] = j + origin[1] + """ + if device is None: + # numpy + arange, meshgrid, stack, ones = np.arange, np.meshgrid, np.stack, np.ones + else: + # torch + arange = lambda *a, **kw: torch.arange(*a, device=device, **kw) + meshgrid, stack = torch.meshgrid, torch.stack + ones = lambda *a: torch.ones(*a, device=device) + + tw, th = [arange(o, o + s, **arange_kw) for s, o in zip((W, H), origin)] + grid = meshgrid(tw, th, indexing='xy') + if homogeneous: + grid = grid + (ones((H, W)),) + if unsqueeze is not None: + grid = (grid[0].unsqueeze(unsqueeze), grid[1].unsqueeze(unsqueeze)) + if cat_dim is not None: + grid = stack(grid, cat_dim) + return grid + + +def geotrf(Trf, pts, ncol=None, norm=False): + """ Apply a geometric transformation to a list of 3-D points. + + H: 3x3 or 4x4 projection matrix (typically a Homography) + p: numpy/torch/tuple of coordinates. Shape must be (...,2) or (...,3) + + ncol: int. number of columns of the result (2 or 3) + norm: float. if != 0, the resut is projected on the z=norm plane. + + Returns an array of projected 2d points. + """ + assert Trf.ndim >= 2 + if isinstance(Trf, np.ndarray): + pts = np.asarray(pts) + elif isinstance(Trf, torch.Tensor): + pts = torch.as_tensor(pts, dtype=Trf.dtype) + + # adapt shape if necessary + output_reshape = pts.shape[:-1] + ncol = ncol or pts.shape[-1] + + # optimized code + if (isinstance(Trf, torch.Tensor) and isinstance(pts, torch.Tensor) and + Trf.ndim == 3 and pts.ndim == 4): + d = pts.shape[3] + if Trf.shape[-1] == d: + pts = torch.einsum("bij, bhwj -> bhwi", Trf, pts) + elif Trf.shape[-1] == d + 1: + pts = torch.einsum("bij, bhwj -> bhwi", Trf[:, :d, :d], pts) + Trf[:, None, None, :d, d] + else: + raise ValueError(f'bad shape, not ending with 3 or 4, for {pts.shape=}') + else: + if Trf.ndim >= 3: + n = Trf.ndim - 2 + assert Trf.shape[:n] == pts.shape[:n], 'batch size does not match' + Trf = Trf.reshape(-1, Trf.shape[-2], Trf.shape[-1]) + + if pts.ndim > Trf.ndim: + # Trf == (B,d,d) & pts == (B,H,W,d) --> (B, H*W, d) + pts = pts.reshape(Trf.shape[0], -1, pts.shape[-1]) + elif pts.ndim == 2: + # Trf == (B,d,d) & pts == (B,d) --> (B, 1, d) + pts = pts[:, None, :] + + if pts.shape[-1] + 1 == Trf.shape[-1]: + Trf = Trf.swapaxes(-1, -2) # transpose Trf + pts = pts @ Trf[..., :-1, :] + Trf[..., -1:, :] + elif pts.shape[-1] == Trf.shape[-1]: + Trf = Trf.swapaxes(-1, -2) # transpose Trf + pts = pts @ Trf + else: + pts = Trf @ pts.T + if pts.ndim >= 2: + pts = pts.swapaxes(-1, -2) + + if norm: + pts = pts / pts[..., -1:] # DONT DO /= BECAUSE OF WEIRD PYTORCH BUG + if norm != 1: + pts *= norm + + res = pts[..., :ncol].reshape(*output_reshape, ncol) + return res + + +def inv(mat): + """ Invert a torch or numpy matrix + """ + if isinstance(mat, torch.Tensor): + return torch.linalg.inv(mat) + if isinstance(mat, np.ndarray): + return np.linalg.inv(mat) + raise ValueError(f'bad matrix type = {type(mat)}') + + +def depthmap_to_pts3d(depth, pseudo_focal, pp=None, **_): + """ + Args: + - depthmap (BxHxW array): + - pseudo_focal: [B,H,W] ; [B,2,H,W] or [B,1,H,W] + Returns: + pointmap of absolute coordinates (BxHxWx3 array) + """ + + if len(depth.shape) == 4: + B, H, W, n = depth.shape + else: + B, H, W = depth.shape + n = None + + if len(pseudo_focal.shape) == 3: # [B,H,W] + pseudo_focalx = pseudo_focaly = pseudo_focal + elif len(pseudo_focal.shape) == 4: # [B,2,H,W] or [B,1,H,W] + pseudo_focalx = pseudo_focal[:, 0] + if pseudo_focal.shape[1] == 2: + pseudo_focaly = pseudo_focal[:, 1] + else: + pseudo_focaly = pseudo_focalx + else: + raise NotImplementedError("Error, unknown input focal shape format.") + + assert pseudo_focalx.shape == depth.shape[:3] + assert pseudo_focaly.shape == depth.shape[:3] + grid_x, grid_y = xy_grid(W, H, cat_dim=0, device=depth.device)[:, None] + + # set principal point + if pp is None: + grid_x = grid_x - (W - 1) / 2 + grid_y = grid_y - (H - 1) / 2 + else: + grid_x = grid_x.expand(B, -1, -1) - pp[:, 0, None, None] + grid_y = grid_y.expand(B, -1, -1) - pp[:, 1, None, None] + + if n is None: + pts3d = torch.empty((B, H, W, 3), device=depth.device) + pts3d[..., 0] = depth * grid_x / pseudo_focalx + pts3d[..., 1] = depth * grid_y / pseudo_focaly + pts3d[..., 2] = depth + else: + pts3d = torch.empty((B, H, W, 3, n), device=depth.device) + pts3d[..., 0, :] = depth * (grid_x / pseudo_focalx)[..., None] + pts3d[..., 1, :] = depth * (grid_y / pseudo_focaly)[..., None] + pts3d[..., 2, :] = depth + return pts3d + + +def depthmap_to_camera_coordinates(depthmap, camera_intrinsics, pseudo_focal=None): + """ + Args: + - depthmap (HxW array): + - camera_intrinsics: a 3x3 matrix + Returns: + pointmap of absolute coordinates (HxWx3 array), and a mask specifying valid pixels. + """ + camera_intrinsics = np.float32(camera_intrinsics) + H, W = depthmap.shape + + # Compute 3D ray associated with each pixel + # Strong assumption: there are no skew terms + assert camera_intrinsics[0, 1] == 0.0 + assert camera_intrinsics[1, 0] == 0.0 + if pseudo_focal is None: + fu = camera_intrinsics[0, 0] + fv = camera_intrinsics[1, 1] + else: + assert pseudo_focal.shape == (H, W) + fu = fv = pseudo_focal + cu = camera_intrinsics[0, 2] + cv = camera_intrinsics[1, 2] + + u, v = np.meshgrid(np.arange(W), np.arange(H)) + z_cam = depthmap + x_cam = (u - cu) * z_cam / fu + y_cam = (v - cv) * z_cam / fv + X_cam = np.stack((x_cam, y_cam, z_cam), axis=-1).astype(np.float32) + + # Mask for valid coordinates + valid_mask = (depthmap > 0.0) + # Invalid any depth > 80m + valid_mask = valid_mask + return X_cam, valid_mask + + +def depthmap_to_absolute_camera_coordinates(depthmap, camera_intrinsics, camera_pose, z_far=0, **kw): + """ + Args: + - depthmap (HxW array): + - camera_intrinsics: a 3x3 matrix + - camera_pose: a 4x3 or 4x4 cam2world matrix + Returns: + pointmap of absolute coordinates (HxWx3 array), and a mask specifying valid pixels.""" + X_cam, valid_mask = depthmap_to_camera_coordinates(depthmap, camera_intrinsics) + if z_far > 0: + valid_mask = valid_mask & (depthmap < z_far) + + X_world = X_cam # default + if camera_pose is not None: + # R_cam2world = np.float32(camera_params["R_cam2world"]) + # t_cam2world = np.float32(camera_params["t_cam2world"]).squeeze() + R_cam2world = camera_pose[:3, :3] + t_cam2world = camera_pose[:3, 3] + + # Express in absolute coordinates (invalid depth values) + X_world = np.einsum("ik, vuk -> vui", R_cam2world, X_cam) + t_cam2world[None, None, :] + + return X_world, valid_mask + + +def colmap_to_opencv_intrinsics(K): + """ + Modify camera intrinsics to follow a different convention. + Coordinates of the center of the top-left pixels are by default: + - (0.5, 0.5) in Colmap + - (0,0) in OpenCV + """ + K = K.copy() + K[0, 2] -= 0.5 + K[1, 2] -= 0.5 + return K + + +def opencv_to_colmap_intrinsics(K): + """ + Modify camera intrinsics to follow a different convention. + Coordinates of the center of the top-left pixels are by default: + - (0.5, 0.5) in Colmap + - (0,0) in OpenCV + """ + K = K.copy() + K[0, 2] += 0.5 + K[1, 2] += 0.5 + return K + + +def normalize_pointcloud(pts1, pts2, norm_mode='avg_dis', valid1=None, valid2=None, ret_factor=False): + """ renorm pointmaps pts1, pts2 with norm_mode + """ + assert pts1.ndim >= 3 and pts1.shape[-1] == 3 + assert pts2 is None or (pts2.ndim >= 3 and pts2.shape[-1] == 3) + norm_mode, dis_mode = norm_mode.split('_') + + if norm_mode == 'avg': + # gather all points together (joint normalization) + nan_pts1, nnz1 = invalid_to_zeros(pts1, valid1, ndim=3) + nan_pts2, nnz2 = invalid_to_zeros(pts2, valid2, ndim=3) if pts2 is not None else (None, 0) + all_pts = torch.cat((nan_pts1, nan_pts2), dim=1) if pts2 is not None else nan_pts1 + + # compute distance to origin + all_dis = all_pts.norm(dim=-1) + if dis_mode == 'dis': + pass # do nothing + elif dis_mode == 'log1p': + all_dis = torch.log1p(all_dis) + elif dis_mode == 'warp-log1p': + # actually warp input points before normalizing them + log_dis = torch.log1p(all_dis) + warp_factor = log_dis / all_dis.clip(min=1e-8) + H1, W1 = pts1.shape[1:-1] + pts1 = pts1 * warp_factor[:, :W1 * H1].view(-1, H1, W1, 1) + if pts2 is not None: + H2, W2 = pts2.shape[1:-1] + pts2 = pts2 * warp_factor[:, W1 * H1:].view(-1, H2, W2, 1) + all_dis = log_dis # this is their true distance afterwards + else: + raise ValueError(f'bad {dis_mode=}') + + norm_factor = all_dis.sum(dim=1) / (nnz1 + nnz2 + 1e-8) + else: + # gather all points together (joint normalization) + nan_pts1 = invalid_to_nans(pts1, valid1, ndim=3) + nan_pts2 = invalid_to_nans(pts2, valid2, ndim=3) if pts2 is not None else None + all_pts = torch.cat((nan_pts1, nan_pts2), dim=1) if pts2 is not None else nan_pts1 + + # compute distance to origin + all_dis = all_pts.norm(dim=-1) + + if norm_mode == 'avg': + norm_factor = all_dis.nanmean(dim=1) + elif norm_mode == 'median': + norm_factor = all_dis.nanmedian(dim=1).values.detach() + elif norm_mode == 'sqrt': + norm_factor = all_dis.sqrt().nanmean(dim=1)**2 + else: + raise ValueError(f'bad {norm_mode=}') + + norm_factor = norm_factor.clip(min=1e-8) + while norm_factor.ndim < pts1.ndim: + norm_factor.unsqueeze_(-1) + + res = pts1 / norm_factor + if pts2 is not None: + res = (res, pts2 / norm_factor) + if ret_factor: + res = res + (norm_factor,) + return res + + +@torch.no_grad() +def get_joint_pointcloud_depth(z1, z2, valid_mask1, valid_mask2=None, quantile=0.5): + # set invalid points to NaN + _z1 = invalid_to_nans(z1, valid_mask1).reshape(len(z1), -1) + _z2 = invalid_to_nans(z2, valid_mask2).reshape(len(z2), -1) if z2 is not None else None + _z = torch.cat((_z1, _z2), dim=-1) if z2 is not None else _z1 + + # compute median depth overall (ignoring nans) + if quantile == 0.5: + shift_z = torch.nanmedian(_z, dim=-1).values + else: + shift_z = torch.nanquantile(_z, quantile, dim=-1) + return shift_z # (B,) + + +@torch.no_grad() +def get_joint_pointcloud_center_scale(pts1, pts2, valid_mask1=None, valid_mask2=None, z_only=False, center=True): + # set invalid points to NaN + _pts1 = invalid_to_nans(pts1, valid_mask1).reshape(len(pts1), -1, 3) + _pts2 = invalid_to_nans(pts2, valid_mask2).reshape(len(pts2), -1, 3) if pts2 is not None else None + _pts = torch.cat((_pts1, _pts2), dim=1) if pts2 is not None else _pts1 + + # compute median center + _center = torch.nanmedian(_pts, dim=1, keepdim=True).values # (B,1,3) + if z_only: + _center[..., :2] = 0 # do not center X and Y + + # compute median norm + _norm = ((_pts - _center) if center else _pts).norm(dim=-1) + scale = torch.nanmedian(_norm, dim=1).values + return _center[:, None, :, :], scale[:, None, None, None] + + +def find_reciprocal_matches(P1, P2): + """ + returns 3 values: + 1 - reciprocal_in_P2: a boolean array of size P2.shape[0], a "True" value indicates a match + 2 - nn2_in_P1: a int array of size P2.shape[0], it contains the indexes of the closest points in P1 + 3 - reciprocal_in_P2.sum(): the number of matches + """ + tree1 = KDTree(P1) + tree2 = KDTree(P2) + + _, nn1_in_P2 = tree2.query(P1, workers=8) + _, nn2_in_P1 = tree1.query(P2, workers=8) + + reciprocal_in_P1 = (nn2_in_P1[nn1_in_P2] == np.arange(len(nn1_in_P2))) + reciprocal_in_P2 = (nn1_in_P2[nn2_in_P1] == np.arange(len(nn2_in_P1))) + assert reciprocal_in_P1.sum() == reciprocal_in_P2.sum() + return reciprocal_in_P2, nn2_in_P1, reciprocal_in_P2.sum() + + +def get_med_dist_between_poses(poses): + from scipy.spatial.distance import pdist + return np.median(pdist([to_numpy(p[:3, 3]) for p in poses])) diff --git a/dynamic_predictor/dust3r/utils/goem_opt.py b/dynamic_predictor/dust3r/utils/goem_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..6d1827e587b1fde0edae9fca7ba03b7ce434ce5e --- /dev/null +++ b/dynamic_predictor/dust3r/utils/goem_opt.py @@ -0,0 +1,619 @@ +from matplotlib.pyplot import grid +import torch +from torch import nn +from torch.nn import functional as F +import math +from scipy.spatial.transform import Rotation + +def tum_to_pose_matrix(pose): + # pose: [tx, ty, tz, qw, qx, qy, qz] + assert pose.shape == (7,) + pose_xyzw = pose[[3, 4, 5, 6]] + r = Rotation.from_quat(pose_xyzw) + return np.vstack([np.hstack([r.as_matrix(), pose[:3].reshape(-1, 1)]), [0, 0, 0, 1]]) + +def depth_regularization_si_weighted(depth_pred, depth_init, pixel_wise_weight=None, pixel_wise_weight_scale=1, pixel_wise_weight_bias=1, eps=1e-6, pixel_weight_normalize=False): + # scale compute: + depth_pred = torch.clamp(depth_pred, min=eps) + depth_init = torch.clamp(depth_init, min=eps) + log_d_pred = torch.log(depth_pred) + log_d_init = torch.log(depth_init) + B, _, H, W = depth_pred.shape + scale = torch.sum(log_d_init - log_d_pred, + dim=[1, 2, 3], keepdim=True)/(H*W) + if pixel_wise_weight is not None: + if pixel_weight_normalize: + norm = torch.max(pixel_wise_weight.detach().view( + B, -1), dim=1, keepdim=False)[0] + pixel_wise_weight = pixel_wise_weight / \ + (norm[:, None, None, None]+eps) + pixel_wise_weight = pixel_wise_weight * \ + pixel_wise_weight_scale + pixel_wise_weight_bias + else: + pixel_wise_weight = 1 + si_loss = torch.sum(pixel_wise_weight*(log_d_pred - + log_d_init + scale)**2, dim=[1, 2, 3])/(H*W) + return si_loss.mean() + +class WarpImage(torch.nn.Module): + def __init__(self): + super(WarpImage, self).__init__() + self.base_coord = None + + def init_grid(self, shape, device): + H, W = shape + hh, ww = torch.meshgrid(torch.arange( + H).float(), torch.arange(W).float()) + coord = torch.zeros([1, H, W, 2]) + coord[0, ..., 0] = ww + coord[0, ..., 1] = hh + self.base_coord = coord.to(device) + self.W = W + self.H = H + + def warp_image(self, base_coord, img_1, flow_2_1): + B, C, H, W = flow_2_1.shape + sample_grids = base_coord + flow_2_1.permute([0, 2, 3, 1]) + sample_grids[..., 0] /= (W - 1) / 2 + sample_grids[..., 1] /= (H - 1) / 2 + sample_grids -= 1 + warped_image_2_from_1 = F.grid_sample( + img_1, sample_grids, align_corners=True) + return warped_image_2_from_1 + + def forward(self, img_1, flow_2_1): + B, _, H, W = flow_2_1.shape + if self.base_coord is None: + self.init_grid([H, W], device=flow_2_1.device) + base_coord = self.base_coord.expand([B, -1, -1, -1]) + return self.warp_image(base_coord, img_1, flow_2_1) + + +class CameraIntrinsics(nn.Module): + def __init__(self, init_focal_length=0.45, pixel_size=None): + super().__init__() + self.focal_length = nn.Parameter(torch.tensor(init_focal_length)) + self.pixel_size_buffer = pixel_size + + def register_shape(self, orig_shape, opt_shape) -> None: + self.orig_shape = orig_shape + self.opt_shape = opt_shape + H_orig, W_orig = orig_shape + H_opt, W_opt = opt_shape + if self.pixel_size_buffer is None: + # initialize as 35mm film + pixel_size = 0.433 / (H_orig ** 2 + W_orig ** 2) ** 0.5 + else: + pixel_size = self.pixel_size_buffer + self.register_buffer("pixel_size", torch.tensor(pixel_size)) + intrinsics_mat_buffer = torch.zeros(3, 3) + intrinsics_mat_buffer[0, -1] = (W_opt - 1) / 2 + intrinsics_mat_buffer[1, -1] = (H_opt - 1) / 2 + intrinsics_mat_buffer[2, -1] = 1 + self.register_buffer("intrinsics_mat", intrinsics_mat_buffer) + self.register_buffer("scale_H", torch.tensor( + H_opt / (H_orig * pixel_size))) + self.register_buffer("scale_W", torch.tensor( + W_opt / (W_orig * pixel_size))) + + def get_K_and_inv(self, with_batch_dim=True) -> torch.Tensor: + intrinsics_mat = self.intrinsics_mat.clone() + intrinsics_mat[0, 0] = self.focal_length * self.scale_W + intrinsics_mat[1, 1] = self.focal_length * self.scale_H + inv_intrinsics_mat = torch.linalg.inv(intrinsics_mat) + if with_batch_dim: + return intrinsics_mat[None, ...], inv_intrinsics_mat[None, ...] + else: + return intrinsics_mat, inv_intrinsics_mat + + +@torch.jit.script +def hat(v: torch.Tensor) -> torch.Tensor: + """ + Compute the Hat operator [1] of a batch of 3D vectors. + + Args: + v: Batch of vectors of shape `(minibatch , 3)`. + + Returns: + Batch of skew-symmetric matrices of shape + `(minibatch, 3 , 3)` where each matrix is of the form: + `[ 0 -v_z v_y ] + [ v_z 0 -v_x ] + [ -v_y v_x 0 ]` + + Raises: + ValueError if `v` is of incorrect shape. + + [1] https://en.wikipedia.org/wiki/Hat_operator + """ + + N, dim = v.shape + # if dim != 3: + # raise ValueError("Input vectors have to be 3-dimensional.") + + h = torch.zeros((N, 3, 3), dtype=v.dtype, device=v.device) + + x, y, z = v.unbind(1) + + h[:, 0, 1] = -z + h[:, 0, 2] = y + h[:, 1, 0] = z + h[:, 1, 2] = -x + h[:, 2, 0] = -y + h[:, 2, 1] = x + + return h + + +@torch.jit.script +def get_relative_transform(src_R, src_t, tgt_R, tgt_t): + tgt_R_inv = tgt_R.permute([0, 2, 1]) + relative_R = torch.matmul(tgt_R_inv, src_R) + relative_t = torch.matmul(tgt_R_inv, src_t - tgt_t) + return relative_R, relative_t + + +def reproject_depth(src_R, src_t, src_disp, tgt_R, tgt_t, tgt_disp, K_src, K_inv_src, K_trg, K_inv_trg, coord, eps=1e-6): + """ + Convert the depth map's value to another camera pose. + input: + src_R: rotation matrix of source camera + src_t: translation vector of source camera + tgt_R: rotation matrix of target camera + tgt_t: translation vector of target camera + K: intrinsics matrix of the camera + src_disp: disparity map of source camera + tgt_disp: disparity map of target camera + coord: coordinate grids + K_inv: inverse intrinsics matrix of the camera + output: + tgt_depth_from_src: source depth map reprojected to target camera, values are ready for warping. + src_depth_from_tgt: target depth map reprojected to source camera, values are ready for warping. + """ + B, _, H, W = src_disp.shape + + src_depth = 1/(src_disp + eps) + tgt_depth = 1/(tgt_disp + eps) + # project 1 to 2 + + src_depth_flat = src_depth.view([B, 1, H*W]) + src_xyz = src_depth_flat * src_R.matmul(K_inv_src.matmul(coord)) + src_t + src_xyz_at_tgt_cam = K_trg.matmul( + tgt_R.transpose(1, 2).matmul(src_xyz - tgt_t)) + tgt_depth_from_src = src_xyz_at_tgt_cam[:, 2, :].view([B, 1, H, W]) + # project 2 to 1 + tgt_depth_flat = tgt_depth.view([B, 1, H*W]) + tgt_xyz = tgt_depth_flat * tgt_R.matmul(K_inv_trg.matmul(coord)) + tgt_t + tgt_xyz_at_src_cam = K_src.matmul( + src_R.transpose(1, 2).matmul(tgt_xyz - src_t)) + src_depth_from_tgt = tgt_xyz_at_src_cam[:, 2, :].view([B, 1, H, W]) + return tgt_depth_from_src, src_depth_from_tgt + + +# @torch.jit.script +def warp_by_disp(src_R, src_t, tgt_R, tgt_t, K, src_disp, coord, inv_K, debug_mode=False, use_depth=False): + + if debug_mode: + B, C, H, W = src_disp.shape + relative_R, relative_t = get_relative_transform( + src_R, src_t, tgt_R, tgt_t) + + print(relative_t.shape) + H_mat = K.matmul(relative_R.matmul(inv_K)) # Nx3x3 + flat_disp = src_disp.view([B, 1, H * W]) # Nx1xNpoints + relative_t_flat = relative_t.expand([-1, -1, H*W]) + rot_coord = torch.matmul(H_mat, coord) + tr_coord = flat_disp * \ + torch.matmul(K, relative_t_flat) + tgt_coord = rot_coord + tr_coord + normalization_factor = (tgt_coord[:, 2:, :] + 1e-6) + rot_coord_normalized = rot_coord / normalization_factor + tr_coord_normalized = tr_coord / normalization_factor + tgt_coord_normalized = rot_coord_normalized + tr_coord_normalized + debug_info = {} + debug_info['tr_coord_normalized'] = tr_coord_normalized + debug_info['rot_coord_normalized'] = rot_coord_normalized + debug_info['tgt_coord_normalized'] = tgt_coord_normalized + debug_info['tr_coord'] = tr_coord + debug_info['rot_coord'] = rot_coord + debug_info['normalization_factor'] = normalization_factor + debug_info['relative_t_flat'] = relative_t_flat + return (tgt_coord_normalized - coord).view([B, 3, H, W]), debug_info + else: + B, C, H, W = src_disp.shape + relative_R, relative_t = get_relative_transform( + src_R, src_t, tgt_R, tgt_t) + H_mat = K.matmul(relative_R.matmul(inv_K)) # Nx3x3 + flat_disp = src_disp.view([B, 1, H * W]) # Nx1xNpoints + if use_depth: + tgt_coord = flat_disp * torch.matmul(H_mat, coord) + \ + torch.matmul(K, relative_t) + else: + tgt_coord = torch.matmul(H_mat, coord) + flat_disp * \ + torch.matmul(K, relative_t) + tgt_coord = tgt_coord / (tgt_coord[:, -1:, :] + 1e-6) + return (tgt_coord - coord).view([B, 3, H, W]), tgt_coord + + +def unproject_depth(depth, K_inv, R, t, coord): + # this need verification + B, _, H, W = depth.shape + disp_flat = depth.view([B, 1, H * W]) + xyz = disp_flat * R.matmul(K_inv.matmul(coord)) + t + return xyz.reshape([B, 3, H, W]) + + +@torch.jit.script +def _so3_exp_map(log_rot: torch.Tensor, eps: float = 0.0001): + """ + A helper function that computes the so3 exponential map and, + apart from the rotation matrix, also returns intermediate variables + that can be re-used in other functions. + """ + _, dim = log_rot.shape + # if dim != 3: + # raise ValueError("Input tensor shape has to be Nx3.") + + nrms = (log_rot * log_rot).sum(1) + # phis ... rotation angles + rot_angles = torch.clamp(nrms, eps).sqrt() + rot_angles_inv = 1.0 / rot_angles + fac1 = rot_angles_inv * rot_angles.sin() + fac2 = rot_angles_inv * rot_angles_inv * (1.0 - rot_angles.cos()) + skews = hat(log_rot) + skews_square = torch.bmm(skews, skews) + + R = ( + # pyre-fixme[16]: `float` has no attribute `__getitem__`. + fac1[:, None, None] * skews + + fac2[:, None, None] * skews_square + + torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None] + ) + + return R, rot_angles, skews, skews_square + + +def quaternion_to_matrix(quaternions): + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +class CameraPoseDeltaCollection(torch.nn.Module): + def __init__(self, number_of_points=10) -> None: + super().__init__() + zero_rotation = torch.ones([1, 3]) * 1e-3 + zero_translation = torch.zeros([1, 3, 1]) + 1e-4 + for n in range(number_of_points): + self.register_parameter( + f"delta_rotation_{n}", nn.Parameter(zero_rotation)) + self.register_parameter( + f"delta_translation_{n}", nn.Parameter(zero_translation) + ) + self.register_buffer("zero_rotation", torch.eye(3)[None, ...]) + self.register_buffer("zero_translation", torch.zeros([1, 3, 1])) + self.traced_so3_exp_map = None + self.number_of_points = number_of_points + + def get_rotation_and_translation_params(self): + rotation_params = [] + translation_params = [] + for n in range(self.number_of_points): + rotation_params.append(getattr(self, f"delta_rotation_{n}")) + translation_params.append(getattr(self, f"delta_translation_{n}")) + return rotation_params, translation_params + + def set_rotation_and_translation(self, index, rotaion_so3, translation): + delta_rotation = getattr(self, f"delta_rotation_{index}") + delta_translation = getattr(self, f"delta_translation_{index}") + delta_rotation.data = rotaion_so3.detach().clone() + delta_translation.data = translation.detach().clone() + + def set_first_frame_pose(self, R, t): + self.zero_rotation.data = R.detach().clone().reshape([1, 3, 3]) + self.zero_translation.data = t.detach().clone().reshape([1, 3, 1]) + + def get_raw_value(self, index): + so3 = getattr(self, f"delta_rotation_{index}") + translation = getattr(self, f"delta_translation_{index}") + return so3, translation + + def forward(self, list_of_index): + se_3 = [] + t_out = [] + for idx in list_of_index: + delta_rotation, delta_translation = self.get_raw_value(idx) + se_3.append(delta_rotation) + t_out.append(delta_translation) + se_3 = torch.cat(se_3, dim=0) + t_out = torch.cat(t_out, dim=0) + if self.traced_so3_exp_map is None: + self.traced_so3_exp_map = torch.jit.trace( + _so3_exp_map, (se_3,)) + R_out = _so3_exp_map(se_3)[0] + return R_out, t_out + + def forward_index(self, index): + # if index == 0: + # return self.zero_rotation, self.zero_translation + # else: + delta_rotation, delta_translation = self.get_raw_value(index) + if self.traced_so3_exp_map is None: + self.traced_so3_exp_map = torch.jit.trace( + _so3_exp_map, (delta_rotation,)) + R = _so3_exp_map(delta_rotation)[0] + return R, delta_translation + + +class DepthScaleShiftCollection(torch.nn.Module): + def __init__(self, n_points=10, use_inverse=False, grid_size=1): + super().__init__() + self.grid_size = grid_size + for n in range(n_points): + self.register_parameter( + f"shift_{n}", nn.Parameter(torch.FloatTensor([0.0])) + ) + self.register_parameter( + f"scale_{n}", nn.Parameter( + torch.ones([1, 1, grid_size, grid_size])) + ) + + self.use_inverse = use_inverse + self.output_shape = None + + def set_outputshape(self, output_shape): + self.output_shape = output_shape + + def forward(self, index): + shift = getattr(self, f"shift_{index}") + scale = getattr(self, f"scale_{index}") + if self.use_inverse: + scale = torch.exp(scale) # 1 / (scale ** 4) + if self.grid_size != 1: + scale = F.interpolate(scale, self.output_shape, + mode='bilinear', align_corners=True) + return scale, shift + + def set_scale(self, index, scale): + scale_param = getattr(self, f"scale_{index}") + if self.use_inverse: + scale = math.log(scale) # (1 / scale) ** 0.25 + scale_param.data.fill_(scale) + + def get_scale_data(self, index): + scale = getattr(self, f"scale_{index}").data + if self.use_inverse: + scale = torch.exp(scale) # 1 / (scale ** 4) + if self.grid_size != 1: + scale = F.interpolate(scale, self.output_shape, + mode='bilinear', align_corners=True) + return scale + + +def check_R_shape(R): + r0, r1, r2 = R.shape + assert r1 == 3 and r2 == 3 + + +def check_t_shape(t): + t0, t1, t2 = t.shape + assert t1 == 3 and t2 == 1 + + +class DepthBasedWarping(nn.Module): + # tested + def __init__(self) -> None: + super().__init__() + + def generate_grid(self, H, W, device): + yy, xx = torch.meshgrid( + torch.arange(H, device=device, dtype=torch.float32), + torch.arange(W, device=device, dtype=torch.float32), + ) + self.coord = torch.ones( + [1, 3, H, W], device=device, dtype=torch.float32) + self.coord[0, 0, ...] = xx + self.coord[0, 1, ...] = yy + self.coord = self.coord.reshape([1, 3, H * W]) + self.jitted_warp_by_disp = None + + def reproject_depth(self, src_R, src_t, src_disp, tgt_R, tgt_t, tgt_disp, K_src, K_inv_src, K_trg, K_inv_trg, eps=1e-6, check_shape=False): + if check_shape: + check_R_shape(src_R) + check_R_shape(tgt_R) + check_t_shape(src_t) + check_t_shape(tgt_t) + check_t_shape(src_disp) + check_t_shape(tgt_disp) + device = src_disp.device + B, _, H, W = src_disp.shape + if not hasattr(self, "coord"): + self.generate_grid(src_disp.shape[2], src_disp.shape[3], device) + else: + if self.coord.shape[-1] != H * W: + del self.coord + self.generate_grid(H, W, device) + return reproject_depth(src_R, src_t, src_disp, tgt_R, tgt_t, tgt_disp, K_src, K_inv_src, K_trg, K_inv_trg, self.coord, eps=eps) + + def unproject_depth(self, disp, R, t, K_inv, eps=1e-6, check_shape=False): + if check_shape: + check_R_shape(R) + check_R_shape(t) + + _, _, H, W = disp.shape + B = R.shape[0] + device = disp.device + if not hasattr(self, "coord"): + self.generate_grid(H, W, device=device) + else: + if self.coord.shape[-1] != H * W: + del self.coord + self.generate_grid(H, W, device=device) + # if self.jitted_warp_by_disp is None: + # self.jitted_warp_by_disp = torch.jit.trace( + # warp_by_disp, (src_R.detach(), src_t.detach(), tgt_R.detach(), tgt_t.detach(), K, src_disp.detach(), self.coord, inv_K)) + return unproject_depth(1 / (disp + eps), K_inv, R, t, self.coord) + + def forward( + self, + src_R, + src_t, + tgt_R, + tgt_t, + src_disp, + K, + inv_K, + eps=1e-6, + use_depth=False, + check_shape=False, + debug_mode=False, + ): + """warp the current depth frame and generate flow field. + + Args: + src_R (FloatTensor): 1x3x3 + src_t (FloatTensor): 1x3x1 + tgt_R (FloatTensor): Nx3x3 + tgt_t (FloatTensor): Nx3x1 + src_disp (FloatTensor): Nx1XHxW + src_K (FloatTensor): 1x3x3 + """ + if check_shape: + check_R_shape(src_R) + check_R_shape(tgt_R) + check_t_shape(src_t) + check_t_shape(tgt_t) + + _, _, H, W = src_disp.shape + B = tgt_R.shape[0] + device = src_disp.device + if not hasattr(self, "coord"): + self.generate_grid(H, W, device=device) + else: + if self.coord.shape[-1] != H * W: + del self.coord + self.generate_grid(H, W, device=device) + # if self.jitted_warp_by_disp is None: + # self.jitted_warp_by_disp = torch.jit.trace( + # warp_by_disp, (src_R.detach(), src_t.detach(), tgt_R.detach(), tgt_t.detach(), K, src_disp.detach(), self.coord, inv_K)) + + return warp_by_disp(src_R, src_t, tgt_R, tgt_t, K, src_disp, self.coord, inv_K, debug_mode, use_depth) + + +class DepthToXYZ(nn.Module): + # tested + def __init__(self) -> None: + super().__init__() + + def generate_grid(self, H, W, device): + yy, xx = torch.meshgrid( + torch.arange(H, device=device, dtype=torch.float32), + torch.arange(W, device=device, dtype=torch.float32), + ) + self.coord = torch.ones( + [1, 3, H, W], device=device, dtype=torch.float32) + self.coord[0, 0, ...] = xx + self.coord[0, 1, ...] = yy + self.coord = self.coord.reshape([1, 3, H * W]) + + def forward(self, disp, K_inv, R, t, eps=1e-6, check_shape=False): + """warp the current depth frame and generate flow field. + + Args: + src_R (FloatTensor): 1x3x3 + src_t (FloatTensor): 1x3x1 + tgt_R (FloatTensor): Nx3x3 + tgt_t (FloatTensor): Nx3x1 + src_disp (FloatTensor): Nx1XHxW + src_K (FloatTensor): 1x3x3 + """ + if check_shape: + check_R_shape(R) + check_R_shape(t) + + _, _, H, W = disp.shape + B = R.shape[0] + device = disp.device + if not hasattr(self, "coord"): + self.generate_grid(H, W, device=device) + else: + if self.coord.shape[-1] != H * W: + del self.coord + self.generate_grid(H, W, device=device) + # if self.jitted_warp_by_disp is None: + # self.jitted_warp_by_disp = torch.jit.trace( + # warp_by_disp, (src_R.detach(), src_t.detach(), tgt_R.detach(), tgt_t.detach(), K, src_disp.detach(), self.coord, inv_K)) + + return unproject_depth(1 / (disp + eps), K_inv, R, t, self.coord) + +class OccMask(torch.nn.Module): + def __init__(self, th=3): + super(OccMask, self).__init__() + self.th = th + self.base_coord = None + + def init_grid(self, shape, device): + H, W = shape + hh, ww = torch.meshgrid(torch.arange( + H).float(), torch.arange(W).float()) + coord = torch.zeros([1, H, W, 2]) + coord[0, ..., 0] = ww + coord[0, ..., 1] = hh + self.base_coord = coord.to(device) + self.W = W + self.H = H + + @torch.no_grad() + def get_oob_mask(self, base_coord, flow_1_2): + target_range = base_coord + flow_1_2.permute([0, 2, 3, 1]) + oob_mask = (target_range[..., 0] < 0) | (target_range[..., 0] > self.W-1) | ( + target_range[..., 1] < 0) | (target_range[..., 1] > self.H-1) + return ~oob_mask[:, None, ...] + + @torch.no_grad() + def get_flow_inconsistency_tensor(self, base_coord, flow_1_2, flow_2_1): + B, C, H, W = flow_1_2.shape + sample_grids = base_coord + flow_1_2.permute([0, 2, 3, 1]) + sample_grids[..., 0] /= (W - 1) / 2 + sample_grids[..., 1] /= (H - 1) / 2 + sample_grids -= 1 + sampled_flow = F.grid_sample( + flow_2_1, sample_grids, align_corners=True) + return torch.abs((sampled_flow+flow_1_2).sum(1, keepdim=True)) + + def forward(self, flow_1_2, flow_2_1): + B, _, H, W = flow_1_2.shape + if self.base_coord is None: + self.init_grid([H, W], device=flow_1_2.device) + base_coord = self.base_coord.expand([B, -1, -1, -1]) + oob_mask = self.get_oob_mask(base_coord, flow_1_2) + flow_inconsistency_tensor = self.get_flow_inconsistency_tensor( + base_coord, flow_1_2, flow_2_1) + valid_flow_mask = flow_inconsistency_tensor < self.th + return valid_flow_mask*oob_mask \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/image.py b/dynamic_predictor/dust3r/utils/image.py new file mode 100644 index 0000000000000000000000000000000000000000..54855bccba57a9cf46b853c2288e8257dac0e7d8 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/image.py @@ -0,0 +1,323 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# utilitary functions about images (loading/converting...) +# -------------------------------------------------------- +import os +import torch +import numpy as np +import PIL.Image +from PIL.ImageOps import exif_transpose +import torchvision.transforms as tvf +os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1" +import cv2 # noqa +import glob +import imageio +import matplotlib.pyplot as plt + +try: + from pillow_heif import register_heif_opener # noqa + register_heif_opener() + heif_support_enabled = True +except ImportError: + heif_support_enabled = False + +ImgNorm = tvf.Compose([tvf.ToTensor(), tvf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) +ToTensor = tvf.ToTensor() +TAG_FLOAT = 202021.25 + +def depth_read(filename): + """ Read depth data from file, return as numpy array. """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check) + width = np.fromfile(f,dtype=np.int32,count=1)[0] + height = np.fromfile(f,dtype=np.int32,count=1)[0] + size = width*height + assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height) + depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width)) + return depth + +def cam_read(filename): + """ Read camera data, return (M,N) tuple. + + M is the intrinsic matrix, N is the extrinsic matrix, so that + + x = M*N*X, + with x being a point in homogeneous image pixel coordinates, X being a + point in homogeneous world coordinates. + """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check) + M = np.fromfile(f,dtype='float64',count=9).reshape((3,3)) + N = np.fromfile(f,dtype='float64',count=12).reshape((3,4)) + return M,N + +def flow_read(filename): + """ Read optical flow from file, return (U,V) tuple. + + Original code by Deqing Sun, adapted from Daniel Scharstein. + """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check) + width = np.fromfile(f,dtype=np.int32,count=1)[0] + height = np.fromfile(f,dtype=np.int32,count=1)[0] + size = width*height + assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height) + tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2)) + u = tmp[:,np.arange(width)*2] + v = tmp[:,np.arange(width)*2 + 1] + return u,v + +def img_to_arr( img ): + if isinstance(img, str): + img = imread_cv2(img) + return img + +def imread_cv2(path, options=cv2.IMREAD_COLOR): + """ Open an image or a depthmap with opencv-python. + """ + if path.endswith(('.exr', 'EXR')): + options = cv2.IMREAD_ANYDEPTH + img = cv2.imread(path, options) + if img is None: + raise IOError(f'Could not load image={path} with {options=}') + if img.ndim == 3: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + return img + + +def rgb(ftensor, true_shape=None): + if isinstance(ftensor, list): + return [rgb(x, true_shape=true_shape) for x in ftensor] + if isinstance(ftensor, torch.Tensor): + ftensor = ftensor.detach().cpu().numpy() # H,W,3 + if ftensor.ndim == 3 and ftensor.shape[0] == 3: + ftensor = ftensor.transpose(1, 2, 0) + elif ftensor.ndim == 4 and ftensor.shape[1] == 3: + ftensor = ftensor.transpose(0, 2, 3, 1) + if true_shape is not None: + H, W = true_shape + ftensor = ftensor[:H, :W] + # if ftensor.dtype == np.uint8: + # img = np.float32(ftensor) / 255 + # else: + # + img = (ftensor * 0.5) + 0.5 + return img.clip(min=0, max=1) + + +def _resize_pil_image(img, long_edge_size, nearest=False): + S = max(img.size) + if S > long_edge_size: + interp = PIL.Image.LANCZOS if not nearest else PIL.Image.NEAREST + elif S <= long_edge_size: + interp = PIL.Image.BICUBIC + new_size = tuple(int(round(x*long_edge_size/S)) for x in img.size) + return img.resize(new_size, interp) + + +def crop_img(img, size, square_ok=False, nearest=False, crop=True): + W1, H1 = img.size + if size == 224: + # resize short side to 224 (then crop) + img = _resize_pil_image(img, round(size * max(W1/H1, H1/W1)), nearest=nearest) + else: + # resize long side to 512 + img = _resize_pil_image(img, size, nearest=nearest) + W, H = img.size + cx, cy = W//2, H//2 + if size == 224: + half = min(cx, cy) + img = img.crop((cx-half, cy-half, cx+half, cy+half)) + else: + halfw, halfh = ((2*cx)//16)*8, ((2*cy)//16)*8 + if not (square_ok) and W == H: + halfh = 3*halfw/4 + if crop: + img = img.crop((cx-halfw, cy-halfh, cx+halfw, cy+halfh)) + else: # resize + img = img.resize((2*halfw, 2*halfh), PIL.Image.LANCZOS) + return img + +def load_images(folder_or_list, size, square_ok=False, verbose=True, dynamic_mask_root=None, crop=True, fps=0, num_frames=110): + """Open and convert all images or videos in a list or folder to proper input format for DUSt3R.""" + if isinstance(folder_or_list, str): + if verbose: + print(f'>> Loading images from {folder_or_list}') + # if folder_or_list is a folder, load all images in the folder + if os.path.isdir(folder_or_list): + root, folder_content = folder_or_list, sorted(os.listdir(folder_or_list)) + else: # the folder_content will be the folder_or_list itself + root, folder_content = '', [folder_or_list] + + elif isinstance(folder_or_list, list): + if verbose: + print(f'>> Loading a list of {len(folder_or_list)} items') + root, folder_content = '', folder_or_list + + else: + raise ValueError(f'Bad input {folder_or_list=} ({type(folder_or_list)})') + + supported_images_extensions = ['.jpg', '.jpeg', '.png'] + supported_video_extensions = ['.mp4', '.avi', '.mov'] + if heif_support_enabled: + supported_images_extensions += ['.heic', '.heif'] + supported_images_extensions = tuple(supported_images_extensions) + supported_video_extensions = tuple(supported_video_extensions) + + imgs = [] + # Sort items by their names + folder_content = sorted(folder_content, key=lambda x: x.split('/')[-1]) + for path in folder_content: + full_path = os.path.join(root, path) + if path.lower().endswith(supported_images_extensions): + # Process image files + img = exif_transpose(PIL.Image.open(full_path)).convert('RGB') + W1, H1 = img.size + img = crop_img(img, size, square_ok=square_ok, crop=crop) + W2, H2 = img.size + + if verbose: + print(f' - Adding {path} with resolution {W1}x{H1} --> {W2}x{H2}') + + single_dict = dict( + img=ImgNorm(img)[None], + true_shape=np.int32([img.size[::-1]]), + idx=len(imgs), + instance=full_path, + mask=~(ToTensor(img)[None].sum(1) <= 0.01) + ) + + if dynamic_mask_root is not None: + dynamic_mask_path = os.path.join(dynamic_mask_root, os.path.basename(path)) + else: # Sintel dataset handling + dynamic_mask_path = full_path.replace('final', 'dynamic_label_perfect').replace('clean', 'dynamic_label_perfect') + + if os.path.exists(dynamic_mask_path): + dynamic_mask = PIL.Image.open(dynamic_mask_path).convert('L') + dynamic_mask = crop_img(dynamic_mask, size, square_ok=square_ok) + dynamic_mask = ToTensor(dynamic_mask)[None].sum(1) > 0.99 # "1" means dynamic + if dynamic_mask.sum() < 0.8 * dynamic_mask.numel(): # Consider static if over 80% is dynamic + single_dict['dynamic_mask'] = dynamic_mask + else: + single_dict['dynamic_mask'] = torch.zeros_like(single_dict['mask']) + else: + single_dict['dynamic_mask'] = torch.zeros_like(single_dict['mask']) + + imgs.append(single_dict) + + elif path.lower().endswith(supported_video_extensions): + # Process video files + if verbose: + print(f'>> Loading video from {full_path}') + cap = cv2.VideoCapture(full_path) + if not cap.isOpened(): + print(f'Error opening video file {full_path}') + continue + + video_fps = cap.get(cv2.CAP_PROP_FPS) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + if video_fps == 0: + print(f'Error: Video FPS is 0 for {full_path}') + cap.release() + continue + if fps > 0: + frame_interval = max(1, int(round(video_fps / fps))) + else: + frame_interval = 1 + frame_indices = list(range(0, total_frames, frame_interval)) + if num_frames is not None: + frame_indices = frame_indices[:num_frames] + + if verbose: + print(f' - Video FPS: {video_fps}, Frame Interval: {frame_interval}, Total Frames to Read: {len(frame_indices)}') + + for frame_idx in frame_indices: + cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx) + ret, frame = cap.read() + if not ret: + break # End of video + + img = PIL.Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) + W1, H1 = img.size + img = crop_img(img, size, square_ok=square_ok, crop=crop) + W2, H2 = img.size + + if verbose: + print(f' - Adding frame {frame_idx} from {path} with resolution {W1}x{H1} --> {W2}x{H2}') + + single_dict = dict( + img=ImgNorm(img)[None], + true_shape=np.int32([img.size[::-1]]), + idx=len(imgs), + instance=f'{full_path}_frame_{frame_idx}', + mask=~(ToTensor(img)[None].sum(1) <= 0.01) + ) + + # Dynamic masks for video frames are set to zeros by default + single_dict['dynamic_mask'] = torch.zeros_like(single_dict['mask']) + + imgs.append(single_dict) + + cap.release() + + else: + continue # Skip unsupported file types + + assert imgs, 'No images found at ' + root + if verbose: + print(f' (Found {len(imgs)} images)') + return imgs + +def enlarge_seg_masks(folder, kernel_size=5, prefix="dynamic_mask"): + mask_pathes = glob.glob(f'{folder}/{prefix}_*.png') + for mask_path in mask_pathes: + mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) + kernel = np.ones((kernel_size, kernel_size),np.uint8) + enlarged_mask = cv2.dilate(mask, kernel, iterations=1) + cv2.imwrite(mask_path.replace(prefix, 'enlarged_dynamic_mask'), enlarged_mask) + +def show_mask(mask, ax, obj_id=None, random_color=False): + if random_color: + color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) + else: + cmap = plt.get_cmap("tab10") + cmap_idx = 1 if obj_id is None else obj_id + color = np.array([*cmap(cmap_idx)[:3], 0.6]) + h, w = mask.shape[-2:] + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask_image) + +def get_overlaied_gif(folder, img_format="frame_*.png", mask_format="dynamic_mask_*.png", output_path="_overlaied.gif"): + img_paths = glob.glob(f'{folder}/{img_format}') + mask_paths = glob.glob(f'{folder}/{mask_format}') + assert len(img_paths) == len(mask_paths), f"Number of images and masks should be the same, got {len(img_paths)} images and {len(mask_paths)} masks" + img_paths = sorted(img_paths) + mask_paths = sorted(mask_paths, key=lambda x: int(x.split('_')[-1].split('.')[0])) + frames = [] + for img_path, mask_path in zip(img_paths, mask_paths): + # Read image and convert to RGB for Matplotlib + img = cv2.imread(img_path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + # Read mask and normalize + mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) + mask = mask.astype(np.float32) / 255.0 + # Create figure and axis + fig, ax = plt.subplots(figsize=(img.shape[1]/100, img.shape[0]/100), dpi=100) + ax.imshow(img) + # Overlay mask using show_mask + show_mask(mask, ax) + ax.axis('off') + # Render the figure to a numpy array + fig.canvas.draw() + img_array = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) + img_array = img_array.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + frames.append(img_array) + plt.close(fig) # Close the figure to free memory + # Save frames as a GIF using imageio + imageio.mimsave(os.path.join(folder,output_path), frames, fps=10) diff --git a/dynamic_predictor/dust3r/utils/misc.py b/dynamic_predictor/dust3r/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..be0051e9cba3286da8305012674668e437e9f72b --- /dev/null +++ b/dynamic_predictor/dust3r/utils/misc.py @@ -0,0 +1,200 @@ +# -------------------------------------------------------- +# utilitary functions for DUSt3R +# -------------------------------------------------------- +import torch +import cv2 +import numpy as np +from dust3r.utils.vo_eval import save_trajectory_tum_format +from PIL import Image + +def get_stride_distribution(strides, dist_type='uniform'): + + # input strides sorted by descreasing order by default + + if dist_type == 'uniform': + dist = np.ones(len(strides)) / len(strides) + elif dist_type == 'exponential': + lambda_param = 1.0 + dist = np.exp(-lambda_param * np.arange(len(strides))) + elif dist_type.startswith('linear'): # e.g., linear_1_2 + try: + start, end = map(float, dist_type.split('_')[1:]) + dist = np.linspace(start, end, len(strides)) + except ValueError: + raise ValueError(f'Invalid linear distribution format: {dist_type}') + else: + raise ValueError('Unknown distribution type %s' % dist_type) + + # normalize to sum to 1 + return dist / np.sum(dist) + + +def fill_default_args(kwargs, func): + import inspect # a bit hacky but it works reliably + signature = inspect.signature(func) + + for k, v in signature.parameters.items(): + if v.default is inspect.Parameter.empty: + continue + kwargs.setdefault(k, v.default) + + return kwargs + + +def freeze_all_params(modules): + for module in modules: + try: + for n, param in module.named_parameters(): + param.requires_grad = False + except AttributeError: + # module is directly a parameter + module.requires_grad = False + + +def is_symmetrized(gt1, gt2): + x = gt1['instance'] + y = gt2['instance'] + if len(x) == len(y) and len(x) == 1: + return False # special case of batchsize 1 + ok = True + for i in range(0, len(x), 2): + ok = ok and (x[i] == y[i + 1]) and (x[i + 1] == y[i]) + return ok + + +def flip(tensor): + """ flip so that tensor[0::2] <=> tensor[1::2] """ + return torch.stack((tensor[1::2], tensor[0::2]), dim=1).flatten(0, 1) + + +def interleave(tensor1, tensor2): + res1 = torch.stack((tensor1, tensor2), dim=1).flatten(0, 1) + res2 = torch.stack((tensor2, tensor1), dim=1).flatten(0, 1) + return res1, res2 + + +def transpose_to_landscape(head, activate=True): + """ Predict in the correct aspect-ratio, + then transpose the result in landscape + and stack everything back together. + """ + def wrapper_no(decout, true_shape): + B = len(true_shape) + assert true_shape[0:1].allclose(true_shape), 'true_shape must be all identical' + H, W = true_shape[0].cpu().tolist() + res = head(decout, (H, W)) + return res + + def wrapper_yes(decout, true_shape): + B = len(true_shape) + # by definition, the batch is in landscape mode so W >= H + H, W = int(true_shape.min()), int(true_shape.max()) + + height, width = true_shape.T + is_landscape = (width >= height) + is_portrait = ~is_landscape + + # true_shape = true_shape.cpu() + if is_landscape.all(): + return head(decout, (H, W)) + if is_portrait.all(): + return transposed(head(decout, (W, H))) + + # batch is a mix of both portraint & landscape + def selout(ar): return [d[ar] for d in decout] + l_result = head(selout(is_landscape), (H, W)) + p_result = transposed(head(selout(is_portrait), (W, H))) + + # allocate full result + result = {} + for k in l_result | p_result: + x = l_result[k].new(B, *l_result[k].shape[1:]) + x[is_landscape] = l_result[k] + x[is_portrait] = p_result[k] + result[k] = x + + return result + + return wrapper_yes if activate else wrapper_no + + +def transposed(dic): + return {k: v.swapaxes(1, 2) for k, v in dic.items()} + + +def invalid_to_nans(arr, valid_mask, ndim=999): + if valid_mask is not None: + arr = arr.clone() + arr[~valid_mask] = float('nan') + if arr.ndim > ndim: + arr = arr.flatten(-2 - (arr.ndim - ndim), -2) + return arr + + +def invalid_to_zeros(arr, valid_mask, ndim=999): + if valid_mask is not None: + arr = arr.clone() + arr[~valid_mask] = 0 + nnz = valid_mask.view(len(valid_mask), -1).sum(1) + else: + nnz = arr.numel() // len(arr) if len(arr) else 0 # number of point per image + if arr.ndim > ndim: + arr = arr.flatten(-2 - (arr.ndim - ndim), -2) + return arr, nnz + +def save_tum_poses(traj, path): + # traj = self.get_tum_poses() + save_trajectory_tum_format(traj, path) + return traj[0] # return the poses + +def save_focals(focals, path): + # convert focal to txt + # focals = self.get_focals() + np.savetxt(path, focals.detach().cpu().numpy(), fmt='%.6f') + return focals + +def save_intrinsics(K_raw, path): + # K_raw = self.get_intrinsics() + K = K_raw.reshape(-1, 9) + np.savetxt(path, K.detach().cpu().numpy(), fmt='%.6f') + return K_raw + +def save_conf_maps(conf, path): + # conf = self.get_conf() + for i, c in enumerate(conf): + np.save(f'{path}/conf_{i}.npy', c.detach().cpu().numpy()) + return conf + +def save_rgb_imgs(imgs, path): + # imgs = self.imgs + for i, img in enumerate(imgs): + # convert from rgb to bgr + img = img[..., ::-1] + cv2.imwrite(f'{path}/frame_{i:04d}.png', img*255) + return imgs + +def save_dynamic_masks(dynamic_masks, path): + # dynamic_masks = self.dynamic_masks + for i, dynamic_mask in enumerate(dynamic_masks): + cv2.imwrite(f'{path}/dynamic_mask_{i}.png', (dynamic_mask * 255).detach().cpu().numpy().astype(np.uint8)) + return dynamic_masks + +def save_depth_maps(depth_maps, path): + images = [] + for i, depth_map in enumerate(depth_maps): + depth_map_colored = cv2.applyColorMap((depth_map * 255).detach().cpu().numpy().astype(np.uint8), cv2.COLORMAP_JET) + img_path = f'{path}/frame_{(i):04d}.png' + cv2.imwrite(img_path, depth_map_colored) + images.append(Image.open(img_path)) + # Save npy file + np.save(f'{path}/frame_{(i):04d}.npy', depth_map.detach().cpu().numpy()) + + # Save gif using Pillow + images[0].save(f'{path}/_depth_maps.gif', save_all=True, append_images=images[1:], duration=100, loop=0) + return depth_maps + +def to_cpu(x): + if isinstance(x, torch.Tensor): + return x.detach().cpu() + if isinstance(x, list): + return [to_cpu(xx) for xx in x] \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/parallel.py b/dynamic_predictor/dust3r/utils/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..06ae7fefdb9d2298929f0cbc20dfbc57eb7d7f7b --- /dev/null +++ b/dynamic_predictor/dust3r/utils/parallel.py @@ -0,0 +1,79 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# utilitary functions for multiprocessing +# -------------------------------------------------------- +from tqdm import tqdm +from multiprocessing.dummy import Pool as ThreadPool +from multiprocessing import cpu_count + + +def parallel_threads(function, args, workers=0, star_args=False, kw_args=False, front_num=1, Pool=ThreadPool, **tqdm_kw): + """ tqdm but with parallel execution. + + Will essentially return + res = [ function(arg) # default + function(*arg) # if star_args is True + function(**arg) # if kw_args is True + for arg in args] + + Note: + the first elements of args will not be parallelized. + This can be useful for debugging. + """ + while workers <= 0: + workers += cpu_count() + if workers == 1: + front_num = float('inf') + + # convert into an iterable + try: + n_args_parallel = len(args) - front_num + except TypeError: + n_args_parallel = None + args = iter(args) + + # sequential execution first + front = [] + while len(front) < front_num: + try: + a = next(args) + except StopIteration: + return front # end of the iterable + front.append(function(*a) if star_args else function(**a) if kw_args else function(a)) + + # then parallel execution + out = [] + with Pool(workers) as pool: + # Pass the elements of args into function + if star_args: + futures = pool.imap(starcall, [(function, a) for a in args]) + elif kw_args: + futures = pool.imap(starstarcall, [(function, a) for a in args]) + else: + futures = pool.imap(function, args) + # Print out the progress as tasks complete + for f in tqdm(futures, total=n_args_parallel, **tqdm_kw): + out.append(f) + return front + out + + +def parallel_processes(*args, **kwargs): + """ Same as parallel_threads, with processes + """ + import multiprocessing as mp + kwargs['Pool'] = mp.Pool + return parallel_threads(*args, **kwargs) + + +def starcall(args): + """ convenient wrapper for Process.Pool """ + function, args = args + return function(*args) + + +def starstarcall(args): + """ convenient wrapper for Process.Pool """ + function, args = args + return function(**args) diff --git a/dynamic_predictor/dust3r/utils/path_to_croco.py b/dynamic_predictor/dust3r/utils/path_to_croco.py new file mode 100644 index 0000000000000000000000000000000000000000..39226ce6bc0e1993ba98a22096de32cb6fa916b4 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/path_to_croco.py @@ -0,0 +1,19 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# CroCo submodule import +# -------------------------------------------------------- + +import sys +import os.path as path +HERE_PATH = path.normpath(path.dirname(__file__)) +CROCO_REPO_PATH = path.normpath(path.join(HERE_PATH, '../../croco')) +CROCO_MODELS_PATH = path.join(CROCO_REPO_PATH, 'models') +# check the presence of models directory in repo to be sure its cloned +if path.isdir(CROCO_MODELS_PATH): + # workaround for sibling import + sys.path.insert(0, CROCO_REPO_PATH) +else: + raise ImportError(f"croco is not initialized, could not find: {CROCO_MODELS_PATH}.\n " + "Did you forget to run 'git submodule update --init --recursive' ?") diff --git a/dynamic_predictor/dust3r/utils/po_utils/__init__.py b/dynamic_predictor/dust3r/utils/po_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9f72a502d59c2aadc7af203b5277cb031a14c67e --- /dev/null +++ b/dynamic_predictor/dust3r/utils/po_utils/__init__.py @@ -0,0 +1 @@ +# junyi \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/po_utils/basic.py b/dynamic_predictor/dust3r/utils/po_utils/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..fbfa0690a1271f34952a88eafddba6c9b835a324 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/po_utils/basic.py @@ -0,0 +1,397 @@ +import os +import numpy as np +from os.path import isfile +import torch +import torch.nn.functional as F +EPS = 1e-6 +import copy + +def sub2ind(height, width, y, x): + return y*width + x + +def ind2sub(height, width, ind): + y = ind // width + x = ind % width + return y, x + +def get_lr_str(lr): + lrn = "%.1e" % lr # e.g., 5.0e-04 + lrn = lrn[0] + lrn[3:5] + lrn[-1] # e.g., 5e-4 + return lrn + +def strnum(x): + s = '%g' % x + if '.' in s: + if x < 1.0: + s = s[s.index('.'):] + s = s[:min(len(s),4)] + return s + +def assert_same_shape(t1, t2): + for (x, y) in zip(list(t1.shape), list(t2.shape)): + assert(x==y) + +def print_stats(name, tensor): + shape = tensor.shape + tensor = tensor.detach().cpu().numpy() + print('%s (%s) min = %.2f, mean = %.2f, max = %.2f' % (name, tensor.dtype, np.min(tensor), np.mean(tensor), np.max(tensor)), shape) + +def print_stats_py(name, tensor): + shape = tensor.shape + print('%s (%s) min = %.2f, mean = %.2f, max = %.2f' % (name, tensor.dtype, np.min(tensor), np.mean(tensor), np.max(tensor)), shape) + +def print_(name, tensor): + tensor = tensor.detach().cpu().numpy() + print(name, tensor, tensor.shape) + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + +def normalize_single(d): + # d is a whatever shape torch tensor + dmin = torch.min(d) + dmax = torch.max(d) + d = (d-dmin)/(EPS+(dmax-dmin)) + return d + +def normalize(d): + # d is B x whatever. normalize within each element of the batch + out = torch.zeros(d.size()) + if d.is_cuda: + out = out.cuda() + B = list(d.size())[0] + for b in list(range(B)): + out[b] = normalize_single(d[b]) + return out + +def hard_argmax2d(tensor): + B, C, Y, X = list(tensor.shape) + assert(C==1) + + # flatten the Tensor along the height and width axes + flat_tensor = tensor.reshape(B, -1) + # argmax of the flat tensor + argmax = torch.argmax(flat_tensor, dim=1) + + # convert the indices into 2d coordinates + argmax_y = torch.floor(argmax / X) # row + argmax_x = argmax % X # col + + argmax_y = argmax_y.reshape(B) + argmax_x = argmax_x.reshape(B) + return argmax_y, argmax_x + +def argmax2d(heat, hard=True): + B, C, Y, X = list(heat.shape) + assert(C==1) + + if hard: + # hard argmax + loc_y, loc_x = hard_argmax2d(heat) + loc_y = loc_y.float() + loc_x = loc_x.float() + else: + heat = heat.reshape(B, Y*X) + prob = torch.nn.functional.softmax(heat, dim=1) + + grid_y, grid_x = meshgrid2d(B, Y, X) + + grid_y = grid_y.reshape(B, -1) + grid_x = grid_x.reshape(B, -1) + + loc_y = torch.sum(grid_y*prob, dim=1) + loc_x = torch.sum(grid_x*prob, dim=1) + # these are B + + return loc_y, loc_x + +def reduce_masked_mean(x, mask, dim=None, keepdim=False): + # x and mask are the same shape, or at least broadcastably so < actually it's safer if you disallow broadcasting + # returns shape-1 + # axis can be a list of axes + for (a,b) in zip(x.size(), mask.size()): + # if not b==1: + assert(a==b) # some shape mismatch! + # assert(x.size() == mask.size()) + prod = x*mask + if dim is None: + numer = torch.sum(prod) + denom = EPS+torch.sum(mask) + else: + numer = torch.sum(prod, dim=dim, keepdim=keepdim) + denom = EPS+torch.sum(mask, dim=dim, keepdim=keepdim) + + mean = numer/denom + return mean + +def reduce_masked_median(x, mask, keep_batch=False): + # x and mask are the same shape + assert(x.size() == mask.size()) + device = x.device + + B = list(x.shape)[0] + x = x.detach().cpu().numpy() + mask = mask.detach().cpu().numpy() + + if keep_batch: + x = np.reshape(x, [B, -1]) + mask = np.reshape(mask, [B, -1]) + meds = np.zeros([B], np.float32) + for b in list(range(B)): + xb = x[b] + mb = mask[b] + if np.sum(mb) > 0: + xb = xb[mb > 0] + meds[b] = np.median(xb) + else: + meds[b] = np.nan + meds = torch.from_numpy(meds).to(device) + return meds.float() + else: + x = np.reshape(x, [-1]) + mask = np.reshape(mask, [-1]) + if np.sum(mask) > 0: + x = x[mask > 0] + med = np.median(x) + else: + med = np.nan + med = np.array([med], np.float32) + med = torch.from_numpy(med).to(device) + return med.float() + +def pack_seqdim(tensor, B): + shapelist = list(tensor.shape) + B_, S = shapelist[:2] + assert(B==B_) + otherdims = shapelist[2:] + tensor = torch.reshape(tensor, [B*S]+otherdims) + return tensor + +def unpack_seqdim(tensor, B): + shapelist = list(tensor.shape) + BS = shapelist[0] + assert(BS%B==0) + otherdims = shapelist[1:] + S = int(BS/B) + tensor = torch.reshape(tensor, [B,S]+otherdims) + return tensor + +def meshgrid2d(B, Y, X, stack=False, norm=False, device='cuda', on_chans=False): + # returns a meshgrid sized B x Y x X + + grid_y = torch.linspace(0.0, Y-1, Y, device=torch.device(device)) + grid_y = torch.reshape(grid_y, [1, Y, 1]) + grid_y = grid_y.repeat(B, 1, X) + + grid_x = torch.linspace(0.0, X-1, X, device=torch.device(device)) + grid_x = torch.reshape(grid_x, [1, 1, X]) + grid_x = grid_x.repeat(B, Y, 1) + + if norm: + grid_y, grid_x = normalize_grid2d( + grid_y, grid_x, Y, X) + + if stack: + # note we stack in xy order + # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample) + if on_chans: + grid = torch.stack([grid_x, grid_y], dim=1) + else: + grid = torch.stack([grid_x, grid_y], dim=-1) + return grid + else: + return grid_y, grid_x + +def meshgrid3d(B, Z, Y, X, stack=False, norm=False, device='cuda'): + # returns a meshgrid sized B x Z x Y x X + + grid_z = torch.linspace(0.0, Z-1, Z, device=device) + grid_z = torch.reshape(grid_z, [1, Z, 1, 1]) + grid_z = grid_z.repeat(B, 1, Y, X) + + grid_y = torch.linspace(0.0, Y-1, Y, device=device) + grid_y = torch.reshape(grid_y, [1, 1, Y, 1]) + grid_y = grid_y.repeat(B, Z, 1, X) + + grid_x = torch.linspace(0.0, X-1, X, device=device) + grid_x = torch.reshape(grid_x, [1, 1, 1, X]) + grid_x = grid_x.repeat(B, Z, Y, 1) + + # if cuda: + # grid_z = grid_z.cuda() + # grid_y = grid_y.cuda() + # grid_x = grid_x.cuda() + + if norm: + grid_z, grid_y, grid_x = normalize_grid3d( + grid_z, grid_y, grid_x, Z, Y, X) + + if stack: + # note we stack in xyz order + # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample) + grid = torch.stack([grid_x, grid_y, grid_z], dim=-1) + return grid + else: + return grid_z, grid_y, grid_x + +def normalize_grid2d(grid_y, grid_x, Y, X, clamp_extreme=True): + # make things in [-1,1] + grid_y = 2.0*(grid_y / float(Y-1)) - 1.0 + grid_x = 2.0*(grid_x / float(X-1)) - 1.0 + + if clamp_extreme: + grid_y = torch.clamp(grid_y, min=-2.0, max=2.0) + grid_x = torch.clamp(grid_x, min=-2.0, max=2.0) + + return grid_y, grid_x + +def normalize_grid3d(grid_z, grid_y, grid_x, Z, Y, X, clamp_extreme=True): + # make things in [-1,1] + grid_z = 2.0*(grid_z / float(Z-1)) - 1.0 + grid_y = 2.0*(grid_y / float(Y-1)) - 1.0 + grid_x = 2.0*(grid_x / float(X-1)) - 1.0 + + if clamp_extreme: + grid_z = torch.clamp(grid_z, min=-2.0, max=2.0) + grid_y = torch.clamp(grid_y, min=-2.0, max=2.0) + grid_x = torch.clamp(grid_x, min=-2.0, max=2.0) + + return grid_z, grid_y, grid_x + +def gridcloud2d(B, Y, X, norm=False, device='cuda'): + # we want to sample for each location in the grid + grid_y, grid_x = meshgrid2d(B, Y, X, norm=norm, device=device) + x = torch.reshape(grid_x, [B, -1]) + y = torch.reshape(grid_y, [B, -1]) + # these are B x N + xy = torch.stack([x, y], dim=2) + # this is B x N x 2 + return xy + +def gridcloud3d(B, Z, Y, X, norm=False, device='cuda'): + # we want to sample for each location in the grid + grid_z, grid_y, grid_x = meshgrid3d(B, Z, Y, X, norm=norm, device=device) + x = torch.reshape(grid_x, [B, -1]) + y = torch.reshape(grid_y, [B, -1]) + z = torch.reshape(grid_z, [B, -1]) + # these are B x N + xyz = torch.stack([x, y, z], dim=2) + # this is B x N x 3 + return xyz + +import re +def readPFM(file): + file = open(file, 'rb') + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header == b'PF': + color = True + elif header == b'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline()) + if dim_match: + width, height = map(int, dim_match.groups()) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data + +def normalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin / float(H) + ymax = ymax / float(H) + xmin = xmin / float(W) + xmax = xmax / float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin * float(H) + ymax = ymax * float(H) + xmin = xmin * float(W) + xmax = xmax * float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_box2d(box2d, H, W): + return unnormalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def normalize_box2d(box2d, H, W): + return normalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def get_gaussian_kernel_2d(channels, kernel_size=3, sigma=2.0, mid_one=False): + C = channels + xy_grid = gridcloud2d(C, kernel_size, kernel_size) # C x N x 2 + + mean = (kernel_size - 1)/2.0 + variance = sigma**2.0 + + gaussian_kernel = (1.0/(2.0*np.pi*variance)**1.5) * torch.exp(-torch.sum((xy_grid - mean)**2.0, dim=-1) / (2.0*variance)) # C X N + gaussian_kernel = gaussian_kernel.view(C, 1, kernel_size, kernel_size) # C x 1 x 3 x 3 + kernel_sum = torch.sum(gaussian_kernel, dim=(2,3), keepdim=True) + + gaussian_kernel = gaussian_kernel / kernel_sum # normalize + + if mid_one: + # normalize so that the middle element is 1 + maxval = gaussian_kernel[:,:,(kernel_size//2),(kernel_size//2)].reshape(C, 1, 1, 1) + gaussian_kernel = gaussian_kernel / maxval + + return gaussian_kernel + +def gaussian_blur_2d(input, kernel_size=3, sigma=2.0, reflect_pad=False, mid_one=False): + B, C, Z, X = input.shape + kernel = get_gaussian_kernel_2d(C, kernel_size, sigma, mid_one=mid_one) + if reflect_pad: + pad = (kernel_size - 1)//2 + out = F.pad(input, (pad, pad, pad, pad), mode='reflect') + out = F.conv2d(out, kernel, padding=0, groups=C) + else: + out = F.conv2d(input, kernel, padding=(kernel_size - 1)//2, groups=C) + return out + +def gradient2d(x, absolute=False, square=False, return_sum=False): + # x should be B x C x H x W + dh = x[:, :, 1:, :] - x[:, :, :-1, :] + dw = x[:, :, :, 1:] - x[:, :, :, :-1] + + zeros = torch.zeros_like(x) + zero_h = zeros[:, :, 0:1, :] + zero_w = zeros[:, :, :, 0:1] + dh = torch.cat([dh, zero_h], axis=2) + dw = torch.cat([dw, zero_w], axis=3) + if absolute: + dh = torch.abs(dh) + dw = torch.abs(dw) + if square: + dh = dh ** 2 + dw = dw ** 2 + if return_sum: + return dh+dw + else: + return dh, dw \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/po_utils/geom.py b/dynamic_predictor/dust3r/utils/po_utils/geom.py new file mode 100644 index 0000000000000000000000000000000000000000..07a167bffd518803152c456e05194d02fec3242e --- /dev/null +++ b/dynamic_predictor/dust3r/utils/po_utils/geom.py @@ -0,0 +1,575 @@ +import torch +import dust3r.utils.po_utils.basic +import numpy as np +import torchvision.ops as ops +from dust3r.utils.po_utils.basic import print_ + +def matmul2(mat1, mat2): + return torch.matmul(mat1, mat2) + +def matmul3(mat1, mat2, mat3): + return torch.matmul(mat1, torch.matmul(mat2, mat3)) + +def eye_3x3(B, device='cuda'): + rt = torch.eye(3, device=torch.device(device)).view(1,3,3).repeat([B, 1, 1]) + return rt + +def eye_4x4(B, device='cuda'): + rt = torch.eye(4, device=torch.device(device)).view(1,4,4).repeat([B, 1, 1]) + return rt + +def safe_inverse(a): #parallel version + B, _, _ = list(a.shape) + inv = a.clone() + r_transpose = a[:, :3, :3].transpose(1,2) #inverse of rotation matrix + + inv[:, :3, :3] = r_transpose + inv[:, :3, 3:4] = -torch.matmul(r_transpose, a[:, :3, 3:4]) + + return inv + +def safe_inverse_single(a): + r, t = split_rt_single(a) + t = t.view(3,1) + r_transpose = r.t() + inv = torch.cat([r_transpose, -torch.matmul(r_transpose, t)], 1) + bottom_row = a[3:4, :] # this is [0, 0, 0, 1] + # bottom_row = torch.tensor([0.,0.,0.,1.]).view(1,4) + inv = torch.cat([inv, bottom_row], 0) + return inv + +def split_intrinsics(K): + # K is B x 3 x 3 or B x 4 x 4 + fx = K[:,0,0] + fy = K[:,1,1] + x0 = K[:,0,2] + y0 = K[:,1,2] + return fx, fy, x0, y0 + +def apply_pix_T_cam(pix_T_cam, xyz): + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + + # xyz is shaped B x H*W x 3 + # returns xy, shaped B x H*W x 2 + + B, N, C = list(xyz.shape) + assert(C==3) + + x, y, z = torch.unbind(xyz, axis=-1) + + fx = torch.reshape(fx, [B, 1]) + fy = torch.reshape(fy, [B, 1]) + x0 = torch.reshape(x0, [B, 1]) + y0 = torch.reshape(y0, [B, 1]) + + EPS = 1e-4 + z = torch.clamp(z, min=EPS) + x = (x*fx)/(z)+x0 + y = (y*fy)/(z)+y0 + xy = torch.stack([x, y], axis=-1) + return xy + +def apply_pix_T_cam_py_batch(pix_T_cam, xyz): + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + + # xyz is shaped B x H*W x 3 + # returns xy, shaped B x H*W x 2 + + B, N, C = list(xyz.shape) + assert(C==3) + + x, y, z = xyz[:,:,0], xyz[:,:,1], xyz[:,:,2] + + fx = np.reshape(fx, [B, 1]) + fy = np.reshape(fy, [B, 1]) + x0 = np.reshape(x0, [B, 1]) + y0 = np.reshape(y0, [B, 1]) + + EPS = 1e-4 + z = np.clip(z, EPS, None) + x = (x*fx)/(z)+x0 + y = (y*fy)/(z)+y0 + xy = np.stack([x, y], axis=-1) + return xy + + +def split_intrinsic(K): + # K is B x 3 x 3 or B x 4 x 4 + fx = K[0,0] + fy = K[1,1] + x0 = K[0,2] + y0 = K[1,2] + return fx, fy, x0, y0 + +def apply_pix_T_cam_py(pix_T_cam, xyz): + + fx, fy, x0, y0 = split_intrinsic(pix_T_cam) + + # xyz is shaped B x H*W x 3 + # returns xy, shaped B x H*W x 2 + + N, C = list(xyz.shape) + assert(C==3) + + x, y, z = xyz[:,0], xyz[:,1], xyz[:,2] + + EPS = 1e-4 + z = np.clip(z, EPS, None) + x = (x*fx)/(z)+x0 + y = (y*fy)/(z)+y0 + xy = np.stack([x, y], axis=-1) + return xy + +def get_camM_T_camXs(origin_T_camXs, ind=0): + B, S = list(origin_T_camXs.shape)[0:2] + camM_T_camXs = torch.zeros_like(origin_T_camXs) + for b in list(range(B)): + camM_T_origin = safe_inverse_single(origin_T_camXs[b,ind]) + for s in list(range(S)): + camM_T_camXs[b,s] = torch.matmul(camM_T_origin, origin_T_camXs[b,s]) + return camM_T_camXs + +def apply_4x4(RT, xyz): + B, N, _ = list(xyz.shape) + ones = torch.ones_like(xyz[:,:,0:1]) + xyz1 = torch.cat([xyz, ones], 2) + xyz1_t = torch.transpose(xyz1, 1, 2) + # this is B x 4 x N + xyz2_t = torch.matmul(RT, xyz1_t) + xyz2 = torch.transpose(xyz2_t, 1, 2) + xyz2 = xyz2[:,:,:3] + return xyz2 + +def apply_4x4_py(RT, xyz): + # print('RT', RT.shape) + B, N, _ = list(xyz.shape) + ones = np.ones_like(xyz[:,:,0:1]) + xyz1 = np.concatenate([xyz, ones], 2) + # print('xyz1', xyz1.shape) + xyz1_t = xyz1.transpose(0,2,1) + # print('xyz1_t', xyz1_t.shape) + # this is B x 4 x N + xyz2_t = np.matmul(RT, xyz1_t) + # print('xyz2_t', xyz2_t.shape) + xyz2 = xyz2_t.transpose(0,2,1) + # print('xyz2', xyz2.shape) + xyz2 = xyz2[:,:,:3] + return xyz2 + +def apply_3x3(RT, xy): + B, N, _ = list(xy.shape) + ones = torch.ones_like(xy[:,:,0:1]) + xy1 = torch.cat([xy, ones], 2) + xy1_t = torch.transpose(xy1, 1, 2) + # this is B x 4 x N + xy2_t = torch.matmul(RT, xy1_t) + xy2 = torch.transpose(xy2_t, 1, 2) + xy2 = xy2[:,:,:2] + return xy2 + +def generate_polygon(ctr_x, ctr_y, avg_r, irregularity, spikiness, num_verts): + ''' + Start with the center of the polygon at ctr_x, ctr_y, + Then creates the polygon by sampling points on a circle around the center. + Random noise is added by varying the angular spacing between sequential points, + and by varying the radial distance of each point from the centre. + + Params: + ctr_x, ctr_y - coordinates of the "centre" of the polygon + avg_r - in px, the average radius of this polygon, this roughly controls how large the polygon is, really only useful for order of magnitude. + irregularity - [0,1] indicating how much variance there is in the angular spacing of vertices. [0,1] will map to [0, 2pi/numberOfVerts] + spikiness - [0,1] indicating how much variance there is in each vertex from the circle of radius avg_r. [0,1] will map to [0, avg_r] +pp num_verts + + Returns: + np.array [num_verts, 2] - CCW order. + ''' + # spikiness + spikiness = np.clip(spikiness, 0, 1) * avg_r + + # generate n angle steps + irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / num_verts + lower = (2*np.pi / num_verts) - irregularity + upper = (2*np.pi / num_verts) + irregularity + + # angle steps + angle_steps = np.random.uniform(lower, upper, num_verts) + sc = (2 * np.pi) / angle_steps.sum() + angle_steps *= sc + + # get all radii + angle = np.random.uniform(0, 2*np.pi) + radii = np.clip(np.random.normal(avg_r, spikiness, num_verts), 0, 2 * avg_r) + + # compute all points + points = [] + for i in range(num_verts): + x = ctr_x + radii[i] * np.cos(angle) + y = ctr_y + radii[i] * np.sin(angle) + points.append([x, y]) + angle += angle_steps[i] + + return np.array(points).astype(int) + + +def get_random_affine_2d(B, rot_min=-5.0, rot_max=5.0, tx_min=-0.1, tx_max=0.1, ty_min=-0.1, ty_max=0.1, sx_min=-0.05, sx_max=0.05, sy_min=-0.05, sy_max=0.05, shx_min=-0.05, shx_max=0.05, shy_min=-0.05, shy_max=0.05): + ''' + Params: + rot_min: rotation amount min + rot_max: rotation amount max + + tx_min: translation x min + tx_max: translation x max + + ty_min: translation y min + ty_max: translation y max + + sx_min: scaling x min + sx_max: scaling x max + + sy_min: scaling y min + sy_max: scaling y max + + shx_min: shear x min + shx_max: shear x max + + shy_min: shear y min + shy_max: shear y max + + Returns: + transformation matrix: (B, 3, 3) + ''' + # rotation + if rot_max - rot_min != 0: + rot_amount = np.random.uniform(low=rot_min, high=rot_max, size=B) + rot_amount = np.pi/180.0*rot_amount + else: + rot_amount = rot_min + rotation = np.zeros((B, 3, 3)) # B, 3, 3 + rotation[:, 2, 2] = 1 + rotation[:, 0, 0] = np.cos(rot_amount) + rotation[:, 0, 1] = -np.sin(rot_amount) + rotation[:, 1, 0] = np.sin(rot_amount) + rotation[:, 1, 1] = np.cos(rot_amount) + + # translation + translation = np.zeros((B, 3, 3)) # B, 3, 3 + translation[:, [0,1,2], [0,1,2]] = 1 + if (tx_max - tx_min) > 0: + trans_x = np.random.uniform(low=tx_min, high=tx_max, size=B) + translation[:, 0, 2] = trans_x + # else: + # translation[:, 0, 2] = tx_max + if ty_max - ty_min != 0: + trans_y = np.random.uniform(low=ty_min, high=ty_max, size=B) + translation[:, 1, 2] = trans_y + # else: + # translation[:, 1, 2] = ty_max + + # scaling + scaling = np.zeros((B, 3, 3)) # B, 3, 3 + scaling[:, [0,1,2], [0,1,2]] = 1 + if (sx_max - sx_min) > 0: + scale_x = 1 + np.random.uniform(low=sx_min, high=sx_max, size=B) + scaling[:, 0, 0] = scale_x + # else: + # scaling[:, 0, 0] = sx_max + if (sy_max - sy_min) > 0: + scale_y = 1 + np.random.uniform(low=sy_min, high=sy_max, size=B) + scaling[:, 1, 1] = scale_y + # else: + # scaling[:, 1, 1] = sy_max + + # shear + shear = np.zeros((B, 3, 3)) # B, 3, 3 + shear[:, [0,1,2], [0,1,2]] = 1 + if (shx_max - shx_min) > 0: + shear_x = np.random.uniform(low=shx_min, high=shx_max, size=B) + shear[:, 0, 1] = shear_x + # else: + # shear[:, 0, 1] = shx_max + if (shy_max - shy_min) > 0: + shear_y = np.random.uniform(low=shy_min, high=shy_max, size=B) + shear[:, 1, 0] = shear_y + # else: + # shear[:, 1, 0] = shy_max + + # compose all those + rt = np.einsum("ijk,ikl->ijl", rotation, translation) + ss = np.einsum("ijk,ikl->ijl", scaling, shear) + trans = np.einsum("ijk,ikl->ijl", rt, ss) + + return trans + +def get_centroid_from_box2d(box2d): + ymin = box2d[:,0] + xmin = box2d[:,1] + ymax = box2d[:,2] + xmax = box2d[:,3] + x = (xmin+xmax)/2.0 + y = (ymin+ymax)/2.0 + return y, x + +def normalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin / float(H) + ymax = ymax / float(H) + xmin = xmin / float(W) + xmax = xmax / float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_boxlist2d(boxlist2d, H, W): + boxlist2d = boxlist2d.clone() + ymin, xmin, ymax, xmax = torch.unbind(boxlist2d, dim=2) + ymin = ymin * float(H) + ymax = ymax * float(H) + xmin = xmin * float(W) + xmax = xmax * float(W) + boxlist2d = torch.stack([ymin, xmin, ymax, xmax], dim=2) + return boxlist2d + +def unnormalize_box2d(box2d, H, W): + return unnormalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def normalize_box2d(box2d, H, W): + return normalize_boxlist2d(box2d.unsqueeze(1), H, W).squeeze(1) + +def get_size_from_box2d(box2d): + ymin = box2d[:,0] + xmin = box2d[:,1] + ymax = box2d[:,2] + xmax = box2d[:,3] + height = ymax-ymin + width = xmax-xmin + return height, width + +def crop_and_resize(im, boxlist, PH, PW, boxlist_is_normalized=False): + B, C, H, W = im.shape + B2, N, D = boxlist.shape + assert(B==B2) + assert(D==4) + # PH, PW is the size to resize to + + # output is B,N,C,PH,PW + + # pt wants xy xy, unnormalized + if boxlist_is_normalized: + boxlist_unnorm = unnormalize_boxlist2d(boxlist, H, W) + else: + boxlist_unnorm = boxlist + + ymin, xmin, ymax, xmax = boxlist_unnorm.unbind(2) + # boxlist_pt = torch.stack([boxlist_unnorm[:,1], boxlist_unnorm[:,0], boxlist_unnorm[:,3], boxlist_unnorm[:,2]], dim=1) + boxlist_pt = torch.stack([xmin, ymin, xmax, ymax], dim=2) + # we want a B-len list of K x 4 arrays + + # print('im', im.shape) + # print('boxlist', boxlist.shape) + # print('boxlist_pt', boxlist_pt.shape) + + # boxlist_pt = list(boxlist_pt.unbind(0)) + + crops = [] + for b in range(B): + crops_b = ops.roi_align(im[b:b+1], [boxlist_pt[b]], output_size=(PH, PW)) + crops.append(crops_b) + # # crops = im + + # print('crops', crops.shape) + # crops = crops.reshape(B,N,C,PH,PW) + + + # crops = [] + # for b in range(B): + # crop_b = ops.roi_align(im[b:b+1], [boxlist_pt[b]], output_size=(PH, PW)) + # print('crop_b', crop_b.shape) + # crops.append(crop_b) + crops = torch.stack(crops, dim=0) + + # print('crops', crops.shape) + # boxlist_list = boxlist_pt.unbind(0) + # print('rgb_crop', rgb_crop.shape) + + return crops + + +# def get_boxlist_from_centroid_and_size(cy, cx, h, w, clip=True): +# # cy,cx are both B,N +# ymin = cy - h/2 +# ymax = cy + h/2 +# xmin = cx - w/2 +# xmax = cx + w/2 + +# box = torch.stack([ymin, xmin, ymax, xmax], dim=-1) +# if clip: +# box = torch.clamp(box, 0, 1) +# return box + + +def get_boxlist_from_centroid_and_size(cy, cx, h, w):#, clip=False): + # cy,cx are the same shape + ymin = cy - h/2 + ymax = cy + h/2 + xmin = cx - w/2 + xmax = cx + w/2 + + # if clip: + # ymin = torch.clamp(ymin, 0, H-1) + # ymax = torch.clamp(ymax, 0, H-1) + # xmin = torch.clamp(xmin, 0, W-1) + # xmax = torch.clamp(xmax, 0, W-1) + + box = torch.stack([ymin, xmin, ymax, xmax], dim=-1) + return box + + +def get_box2d_from_mask(mask, normalize=False): + # mask is B, 1, H, W + + B, C, H, W = mask.shape + assert(C==1) + xy = utils.basic.gridcloud2d(B, H, W, norm=False, device=mask.device) # B, H*W, 2 + + box = torch.zeros((B, 4), dtype=torch.float32, device=mask.device) + for b in range(B): + xy_b = xy[b] # H*W, 2 + mask_b = mask[b].reshape(H*W) + xy_ = xy_b[mask_b > 0] + x_ = xy_[:,0] + y_ = xy_[:,1] + ymin = torch.min(y_) + ymax = torch.max(y_) + xmin = torch.min(x_) + xmax = torch.max(x_) + box[b] = torch.stack([ymin, xmin, ymax, xmax], dim=0) + if normalize: + box = normalize_boxlist2d(box.unsqueeze(1), H, W).squeeze(1) + return box + +def convert_box2d_to_intrinsics(box2d, pix_T_cam, H, W, use_image_aspect_ratio=True, mult_padding=1.0): + # box2d is B x 4, with ymin, xmin, ymax, xmax in normalized coords + # ymin, xmin, ymax, xmax = torch.unbind(box2d, dim=1) + # H, W is the original size of the image + # mult_padding is relative to object size in pixels + + # i assume we're rendering an image the same size as the original (H, W) + + if not mult_padding==1.0: + y, x = get_centroid_from_box2d(box2d) + h, w = get_size_from_box2d(box2d) + box2d = get_box2d_from_centroid_and_size( + y, x, h*mult_padding, w*mult_padding, clip=False) + + if use_image_aspect_ratio: + h, w = get_size_from_box2d(box2d) + y, x = get_centroid_from_box2d(box2d) + + # note h,w are relative right now + # we need to undo this, to see the real ratio + + h = h*float(H) + w = w*float(W) + box_ratio = h/w + im_ratio = H/float(W) + + # print('box_ratio:', box_ratio) + # print('im_ratio:', im_ratio) + + if box_ratio >= im_ratio: + w = h/im_ratio + # print('setting w:', h/im_ratio) + else: + h = w*im_ratio + # print('setting h:', w*im_ratio) + + box2d = get_box2d_from_centroid_and_size( + y, x, h/float(H), w/float(W), clip=False) + + assert(h > 1e-4) + assert(w > 1e-4) + + ymin, xmin, ymax, xmax = torch.unbind(box2d, dim=1) + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + + # the topleft of the new image will now have a different offset from the center of projection + + new_x0 = x0 - xmin*W + new_y0 = y0 - ymin*H + + pix_T_cam = pack_intrinsics(fx, fy, new_x0, new_y0) + # this alone will give me an image in original resolution, + # with its topleft at the box corner + + box_h, box_w = get_size_from_box2d(box2d) + # these are normalized, and shaped B. (e.g., [0.4], [0.3]) + + # we are going to scale the image by the inverse of this, + # since we are zooming into this area + + sy = 1./box_h + sx = 1./box_w + + pix_T_cam = scale_intrinsics(pix_T_cam, sx, sy) + return pix_T_cam, box2d + +def pixels2camera(x,y,z,fx,fy,x0,y0): + # x and y are locations in pixel coordinates, z is a depth in meters + # they can be images or pointclouds + # fx, fy, x0, y0 are camera intrinsics + # returns xyz, sized B x N x 3 + + B = x.shape[0] + + fx = torch.reshape(fx, [B,1]) + fy = torch.reshape(fy, [B,1]) + x0 = torch.reshape(x0, [B,1]) + y0 = torch.reshape(y0, [B,1]) + + x = torch.reshape(x, [B,-1]) + y = torch.reshape(y, [B,-1]) + z = torch.reshape(z, [B,-1]) + + # unproject + x = (z/fx)*(x-x0) + y = (z/fy)*(y-y0) + + xyz = torch.stack([x,y,z], dim=2) + # B x N x 3 + return xyz + +def camera2pixels(xyz, pix_T_cam): + # xyz is shaped B x H*W x 3 + # returns xy, shaped B x H*W x 2 + + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + x, y, z = torch.unbind(xyz, dim=-1) + B = list(z.shape)[0] + + fx = torch.reshape(fx, [B,1]) + fy = torch.reshape(fy, [B,1]) + x0 = torch.reshape(x0, [B,1]) + y0 = torch.reshape(y0, [B,1]) + x = torch.reshape(x, [B,-1]) + y = torch.reshape(y, [B,-1]) + z = torch.reshape(z, [B,-1]) + + EPS = 1e-4 + z = torch.clamp(z, min=EPS) + x = (x*fx)/z + x0 + y = (y*fy)/z + y0 + xy = torch.stack([x, y], dim=-1) + return xy + +def depth2pointcloud(z, pix_T_cam): + B, C, H, W = list(z.shape) + device = z.device + y, x = utils.basic.meshgrid2d(B, H, W, device=device) + z = torch.reshape(z, [B, H, W]) + fx, fy, x0, y0 = split_intrinsics(pix_T_cam) + xyz = pixels2camera(x, y, z, fx, fy, x0, y0) + return xyz \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/po_utils/improc.py b/dynamic_predictor/dust3r/utils/po_utils/improc.py new file mode 100644 index 0000000000000000000000000000000000000000..ca52800d37c77f5d71bc4dcf1c5dd3965d71b79a --- /dev/null +++ b/dynamic_predictor/dust3r/utils/po_utils/improc.py @@ -0,0 +1,1526 @@ +import torch +import numpy as np +import dust3r.utils.po_utils.basic +from sklearn.decomposition import PCA +from matplotlib import cm +import matplotlib.pyplot as plt +import cv2 +import torch.nn.functional as F +import torchvision +EPS = 1e-6 + +from skimage.color import ( + rgb2lab, rgb2yuv, rgb2ycbcr, lab2rgb, yuv2rgb, ycbcr2rgb, + rgb2hsv, hsv2rgb, rgb2xyz, xyz2rgb, rgb2hed, hed2rgb) + +def _convert(input_, type_): + return { + 'float': input_.float(), + 'double': input_.double(), + }.get(type_, input_) + +def _generic_transform_sk_3d(transform, in_type='', out_type=''): + def apply_transform_individual(input_): + device = input_.device + input_ = input_.cpu() + input_ = _convert(input_, in_type) + + input_ = input_.permute(1, 2, 0).detach().numpy() + transformed = transform(input_) + output = torch.from_numpy(transformed).float().permute(2, 0, 1) + output = _convert(output, out_type) + return output.to(device) + + def apply_transform(input_): + to_stack = [] + for image in input_: + to_stack.append(apply_transform_individual(image)) + return torch.stack(to_stack) + return apply_transform + +hsv_to_rgb = _generic_transform_sk_3d(hsv2rgb) + +def preprocess_color_tf(x): + import tensorflow as tf + return tf.cast(x,tf.float32) * 1./255 - 0.5 + +def preprocess_color(x): + if isinstance(x, np.ndarray): + return x.astype(np.float32) * 1./255 - 0.5 + else: + return x.float() * 1./255 - 0.5 + +def pca_embed(emb, keep, valid=None): + ## emb -- [S,H/2,W/2,C] + ## keep is the number of principal components to keep + ## Helper function for reduce_emb. + emb = emb + EPS + #emb is B x C x H x W + emb = emb.permute(0, 2, 3, 1).cpu().detach().numpy() #this is B x H x W x C + + if valid: + valid = valid.cpu().detach().numpy().reshape((H*W)) + + emb_reduced = list() + + B, H, W, C = np.shape(emb) + for img in emb: + if np.isnan(img).any(): + emb_reduced.append(np.zeros([H, W, keep])) + continue + + pixels_kd = np.reshape(img, (H*W, C)) + + if valid: + pixels_kd_pca = pixels_kd[valid] + else: + pixels_kd_pca = pixels_kd + + P = PCA(keep) + P.fit(pixels_kd_pca) + + if valid: + pixels3d = P.transform(pixels_kd)*valid + else: + pixels3d = P.transform(pixels_kd) + + out_img = np.reshape(pixels3d, [H,W,keep]).astype(np.float32) + if np.isnan(out_img).any(): + emb_reduced.append(np.zeros([H, W, keep])) + continue + + emb_reduced.append(out_img) + + emb_reduced = np.stack(emb_reduced, axis=0).astype(np.float32) + + return torch.from_numpy(emb_reduced).permute(0, 3, 1, 2) + +def pca_embed_together(emb, keep): + ## emb -- [S,H/2,W/2,C] + ## keep is the number of principal components to keep + ## Helper function for reduce_emb. + emb = emb + EPS + #emb is B x C x H x W + emb = emb.permute(0, 2, 3, 1).cpu().detach().numpy() #this is B x H x W x C + + B, H, W, C = np.shape(emb) + if np.isnan(emb).any(): + return torch.zeros(B, keep, H, W) + + pixelskd = np.reshape(emb, (B*H*W, C)) + P = PCA(keep) + P.fit(pixelskd) + pixels3d = P.transform(pixelskd) + out_img = np.reshape(pixels3d, [B,H,W,keep]).astype(np.float32) + + if np.isnan(out_img).any(): + return torch.zeros(B, keep, H, W) + + return torch.from_numpy(out_img).permute(0, 3, 1, 2) + +def reduce_emb(emb, valid=None, inbound=None, together=False): + ## emb -- [S,C,H/2,W/2], inbound -- [S,1,H/2,W/2] + ## Reduce number of chans to 3 with PCA. For vis. + # S,H,W,C = emb.shape.as_list() + S, C, H, W = list(emb.size()) + keep = 3 + + if together: + reduced_emb = pca_embed_together(emb, keep) + else: + reduced_emb = pca_embed(emb, keep, valid) #not im + + reduced_emb = utils.basic.normalize(reduced_emb) - 0.5 + if inbound is not None: + emb_inbound = emb*inbound + else: + emb_inbound = None + + return reduced_emb, emb_inbound + +def get_feat_pca(feat, valid=None): + B, C, D, W = list(feat.size()) + # feat is B x C x D x W. If 3D input, average it through Height dimension before passing into this function. + + pca, _ = reduce_emb(feat, valid=valid,inbound=None, together=True) + # pca is B x 3 x W x D + return pca + +def gif_and_tile(ims, just_gif=False): + S = len(ims) + # each im is B x H x W x C + # i want a gif in the left, and the tiled frames on the right + # for the gif tool, this means making a B x S x H x W tensor + # where the leftmost part is sequential and the rest is tiled + gif = torch.stack(ims, dim=1) + if just_gif: + return gif + til = torch.cat(ims, dim=2) + til = til.unsqueeze(dim=1).repeat(1, S, 1, 1, 1) + im = torch.cat([gif, til], dim=3) + return im + +def back2color(i, blacken_zeros=False): + if blacken_zeros: + const = torch.tensor([-0.5]) + i = torch.where(i==0.0, const.cuda() if i.is_cuda else const, i) + return back2color(i) + else: + return ((i+0.5)*255).type(torch.ByteTensor) + +def convert_occ_to_height(occ, reduce_axis=3): + B, C, D, H, W = list(occ.shape) + assert(C==1) + # note that height increases DOWNWARD in the tensor + # (like pixel/camera coordinates) + + G = list(occ.shape)[reduce_axis] + values = torch.linspace(float(G), 1.0, steps=G, dtype=torch.float32, device=occ.device) + if reduce_axis==2: + # fro view + values = values.view(1, 1, G, 1, 1) + elif reduce_axis==3: + # top view + values = values.view(1, 1, 1, G, 1) + elif reduce_axis==4: + # lateral view + values = values.view(1, 1, 1, 1, G) + else: + assert(False) # you have to reduce one of the spatial dims (2-4) + values = torch.max(occ*values, dim=reduce_axis)[0]/float(G) + # values = values.view([B, C, D, W]) + return values + +def xy2heatmap(xy, sigma, grid_xs, grid_ys, norm=False): + # xy is B x N x 2, containing float x and y coordinates of N things + # grid_xs and grid_ys are B x N x Y x X + + B, N, Y, X = list(grid_xs.shape) + + mu_x = xy[:,:,0].clone() + mu_y = xy[:,:,1].clone() + + x_valid = (mu_x>-0.5) & (mu_x-0.5) & (mu_y 0.5).float() + return prior + +def seq2color(im, norm=True, colormap='coolwarm'): + B, S, H, W = list(im.shape) + # S is sequential + + # prep a mask of the valid pixels, so we can blacken the invalids later + mask = torch.max(im, dim=1, keepdim=True)[0] + + # turn the S dim into an explicit sequence + coeffs = np.linspace(1.0, float(S), S).astype(np.float32)/float(S) + + # # increase the spacing from the center + # coeffs[:int(S/2)] -= 2.0 + # coeffs[int(S/2)+1:] += 2.0 + + coeffs = torch.from_numpy(coeffs).float().cuda() + coeffs = coeffs.reshape(1, S, 1, 1).repeat(B, 1, H, W) + # scale each channel by the right coeff + im = im * coeffs + # now im is in [1/S, 1], except for the invalid parts which are 0 + # keep the highest valid coeff at each pixel + im = torch.max(im, dim=1, keepdim=True)[0] + + out = [] + for b in range(B): + im_ = im[b] + # move channels out to last dim_ + im_ = im_.detach().cpu().numpy() + im_ = np.squeeze(im_) + # im_ is H x W + if colormap=='coolwarm': + im_ = cm.coolwarm(im_)[:, :, :3] + elif colormap=='PiYG': + im_ = cm.PiYG(im_)[:, :, :3] + elif colormap=='winter': + im_ = cm.winter(im_)[:, :, :3] + elif colormap=='spring': + im_ = cm.spring(im_)[:, :, :3] + elif colormap=='onediff': + im_ = np.reshape(im_, (-1)) + im0_ = cm.spring(im_)[:, :3] + im1_ = cm.winter(im_)[:, :3] + im1_[im_==1/float(S)] = im0_[im_==1/float(S)] + im_ = np.reshape(im1_, (H, W, 3)) + else: + assert(False) # invalid colormap + # move channels into dim 0 + im_ = np.transpose(im_, [2, 0, 1]) + im_ = torch.from_numpy(im_).float().cuda() + out.append(im_) + out = torch.stack(out, dim=0) + + # blacken the invalid pixels, instead of using the 0-color + out = out*mask + # out = out*255.0 + + # put it in [-0.5, 0.5] + out = out - 0.5 + + return out + +def colorize(d): + # this is actually just grayscale right now + + if d.ndim==2: + d = d.unsqueeze(dim=0) + else: + assert(d.ndim==3) + + # color_map = cm.get_cmap('plasma') + color_map = cm.get_cmap('inferno') + # S1, D = traj.shape + + # print('d1', d.shape) + C,H,W = d.shape + assert(C==1) + d = d.reshape(-1) + d = d.detach().cpu().numpy() + # print('d2', d.shape) + color = np.array(color_map(d)) * 255 # rgba + # print('color1', color.shape) + color = np.reshape(color[:,:3], [H*W, 3]) + # print('color2', color.shape) + color = torch.from_numpy(color).permute(1,0).reshape(3,H,W) + # # gather + # cm = matplotlib.cm.get_cmap(cmap if cmap is not None else 'gray') + # if cmap=='RdBu' or cmap=='RdYlGn': + # colors = cm(np.arange(256))[:, :3] + # else: + # colors = cm.colors + # colors = np.array(colors).astype(np.float32) + # colors = np.reshape(colors, [-1, 3]) + # colors = tf.constant(colors, dtype=tf.float32) + + # value = tf.gather(colors, indices) + # colorize(value, normalize=True, vmin=None, vmax=None, cmap=None, vals=255) + + # copy to the three chans + # d = d.repeat(3, 1, 1) + return color + + +def oned2inferno(d, norm=True, do_colorize=False): + # convert a 1chan input to a 3chan image output + + # if it's just B x H x W, add a C dim + if d.ndim==3: + d = d.unsqueeze(dim=1) + # d should be B x C x H x W, where C=1 + B, C, H, W = list(d.shape) + assert(C==1) + + if norm: + d = utils.basic.normalize(d) + + if do_colorize: + rgb = torch.zeros(B, 3, H, W) + for b in list(range(B)): + rgb[b] = colorize(d[b]) + else: + rgb = d.repeat(1, 3, 1, 1)*255.0 + # rgb = (255.0*rgb).type(torch.ByteTensor) + rgb = rgb.type(torch.ByteTensor) + + # rgb = tf.cast(255.0*rgb, tf.uint8) + # rgb = tf.reshape(rgb, [-1, hyp.H, hyp.W, 3]) + # rgb = tf.expand_dims(rgb, axis=0) + return rgb + +def oned2gray(d, norm=True): + # convert a 1chan input to a 3chan image output + + # if it's just B x H x W, add a C dim + if d.ndim==3: + d = d.unsqueeze(dim=1) + # d should be B x C x H x W, where C=1 + B, C, H, W = list(d.shape) + assert(C==1) + + if norm: + d = utils.basic.normalize(d) + + rgb = d.repeat(1,3,1,1) + rgb = (255.0*rgb).type(torch.ByteTensor) + return rgb + + +def draw_frame_id_on_vis(vis, frame_id, scale=0.5, left=5, top=20): + + rgb = vis.detach().cpu().numpy()[0] + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) + color = (255, 255, 255) + # print('putting frame id', frame_id) + + frame_str = utils.basic.strnum(frame_id) + + text_color_bg = (0,0,0) + font = cv2.FONT_HERSHEY_SIMPLEX + text_size, _ = cv2.getTextSize(frame_str, font, scale, 1) + text_w, text_h = text_size + cv2.rectangle(rgb, (left, top-text_h), (left + text_w, top+1), text_color_bg, -1) + + cv2.putText( + rgb, + frame_str, + (left, top), # from left, from top + font, + scale, # font scale (float) + color, + 1) # font thickness (int) + rgb = cv2.cvtColor(rgb.astype(np.uint8), cv2.COLOR_BGR2RGB) + vis = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + return vis + +COLORMAP_FILE = "./utils/bremm.png" +class ColorMap2d: + def __init__(self, filename=None): + self._colormap_file = filename or COLORMAP_FILE + self._img = plt.imread(self._colormap_file) + + self._height = self._img.shape[0] + self._width = self._img.shape[1] + + def __call__(self, X): + assert len(X.shape) == 2 + output = np.zeros((X.shape[0], 3)) + for i in range(X.shape[0]): + x, y = X[i, :] + xp = int((self._width-1) * x) + yp = int((self._height-1) * y) + xp = np.clip(xp, 0, self._width-1) + yp = np.clip(yp, 0, self._height-1) + output[i, :] = self._img[yp, xp] + return output + +def get_n_colors(N, sequential=False): + label_colors = [] + for ii in range(N): + if sequential: + rgb = cm.winter(ii/(N-1)) + rgb = (np.array(rgb) * 255).astype(np.uint8)[:3] + else: + rgb = np.zeros(3) + while np.sum(rgb) < 128: # ensure min brightness + rgb = np.random.randint(0,256,3) + label_colors.append(rgb) + return label_colors + +class Summ_writer(object): + def __init__(self, writer, global_step, log_freq=10, fps=8, scalar_freq=100, just_gif=False): + self.writer = writer + self.global_step = global_step + self.log_freq = log_freq + self.fps = fps + self.just_gif = just_gif + self.maxwidth = 10000 + self.save_this = (self.global_step % self.log_freq == 0) + self.scalar_freq = max(scalar_freq,1) + + + def summ_gif(self, name, tensor, blacken_zeros=False): + # tensor should be in B x S x C x H x W + + assert tensor.dtype in {torch.uint8,torch.float32} + shape = list(tensor.shape) + + if tensor.dtype == torch.float32: + tensor = back2color(tensor, blacken_zeros=blacken_zeros) + + video_to_write = tensor[0:1] + + S = video_to_write.shape[1] + if S==1: + # video_to_write is 1 x 1 x C x H x W + self.writer.add_image(name, video_to_write[0,0], global_step=self.global_step) + else: + self.writer.add_video(name, video_to_write, fps=self.fps, global_step=self.global_step) + + return video_to_write + + def draw_boxlist2d_on_image(self, rgb, boxlist, scores=None, tids=None, linewidth=1): + B, C, H, W = list(rgb.shape) + assert(C==3) + B2, N, D = list(boxlist.shape) + assert(B2==B) + assert(D==4) # ymin, xmin, ymax, xmax + + rgb = back2color(rgb) + if scores is None: + scores = torch.ones(B2, N).float() + if tids is None: + tids = torch.arange(N).reshape(1,N).repeat(B2,N).long() + # tids = torch.zeros(B2, N).long() + out = self.draw_boxlist2d_on_image_py( + rgb[0].cpu().detach().numpy(), + boxlist[0].cpu().detach().numpy(), + scores[0].cpu().detach().numpy(), + tids[0].cpu().detach().numpy(), + linewidth=linewidth) + out = torch.from_numpy(out).type(torch.ByteTensor).permute(2, 0, 1) + out = torch.unsqueeze(out, dim=0) + out = preprocess_color(out) + out = torch.reshape(out, [1, C, H, W]) + return out + + def draw_boxlist2d_on_image_py(self, rgb, boxlist, scores, tids, linewidth=1): + # all inputs are numpy tensors + # rgb is H x W x 3 + # boxlist is N x 4 + # scores is N + # tids is N + + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + # rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) + + rgb = rgb.astype(np.uint8).copy() + + + H, W, C = rgb.shape + assert(C==3) + N, D = boxlist.shape + assert(D==4) + + # color_map = cm.get_cmap('tab20') + # color_map = cm.get_cmap('set1') + color_map = cm.get_cmap('Accent') + color_map = color_map.colors + # print('color_map', color_map) + + # draw + for ind, box in enumerate(boxlist): + # box is 4 + if not np.isclose(scores[ind], 0.0): + # box = utils.geom.scale_box2d(box, H, W) + ymin, xmin, ymax, xmax = box + + # ymin, ymax = ymin*H, ymax*H + # xmin, xmax = xmin*W, xmax*W + + # print 'score = %.2f' % scores[ind] + # color_id = tids[ind] % 20 + color_id = tids[ind] + color = color_map[color_id] + color = np.array(color)*255.0 + color = color.round() + # color = color.astype(np.uint8) + # color = color[::-1] + # print('color', color) + + # print 'tid = %d; score = %.3f' % (tids[ind], scores[ind]) + + # if False: + if scores[ind] < 1.0: # not gt + cv2.putText(rgb, + # '%d (%.2f)' % (tids[ind], scores[ind]), + '%.2f' % (scores[ind]), + (int(xmin), int(ymin)), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, # font size + color), + #1) # font weight + + xmin = np.clip(int(xmin), 0, W-1) + xmax = np.clip(int(xmax), 0, W-1) + ymin = np.clip(int(ymin), 0, H-1) + ymax = np.clip(int(ymax), 0, H-1) + + cv2.line(rgb, (xmin, ymin), (xmin, ymax), color, linewidth, cv2.LINE_AA) + cv2.line(rgb, (xmin, ymin), (xmax, ymin), color, linewidth, cv2.LINE_AA) + cv2.line(rgb, (xmax, ymin), (xmax, ymax), color, linewidth, cv2.LINE_AA) + cv2.line(rgb, (xmax, ymax), (xmin, ymax), color, linewidth, cv2.LINE_AA) + + # rgb = cv2.cvtColor(rgb.astype(np.uint8), cv2.COLOR_BGR2RGB) + return rgb + + def summ_boxlist2d(self, name, rgb, boxlist, scores=None, tids=None, frame_id=None, only_return=False, linewidth=2): + B, C, H, W = list(rgb.shape) + boxlist_vis = self.draw_boxlist2d_on_image(rgb, boxlist, scores=scores, tids=tids, linewidth=linewidth) + return self.summ_rgb(name, boxlist_vis, frame_id=frame_id, only_return=only_return) + + def summ_rgbs(self, name, ims, frame_ids=None, blacken_zeros=False, only_return=False): + if self.save_this: + + ims = gif_and_tile(ims, just_gif=self.just_gif) + vis = ims + + assert vis.dtype in {torch.uint8,torch.float32} + + if vis.dtype == torch.float32: + vis = back2color(vis, blacken_zeros) + + B, S, C, H, W = list(vis.shape) + + if frame_ids is not None: + assert(len(frame_ids)==S) + for s in range(S): + vis[:,s] = draw_frame_id_on_vis(vis[:,s], frame_ids[s]) + + if int(W) > self.maxwidth: + vis = vis[:,:,:,:self.maxwidth] + + if only_return: + return vis + else: + return self.summ_gif(name, vis, blacken_zeros) + + def summ_rgb(self, name, ims, blacken_zeros=False, frame_id=None, only_return=False, halfres=False): + if self.save_this: + assert ims.dtype in {torch.uint8,torch.float32} + + if ims.dtype == torch.float32: + ims = back2color(ims, blacken_zeros) + + #ims is B x C x H x W + vis = ims[0:1] # just the first one + B, C, H, W = list(vis.shape) + + if halfres: + vis = F.interpolate(vis, scale_factor=0.5) + + if frame_id is not None: + vis = draw_frame_id_on_vis(vis, frame_id) + + if int(W) > self.maxwidth: + vis = vis[:,:,:,:self.maxwidth] + + if only_return: + return vis + else: + return self.summ_gif(name, vis.unsqueeze(1), blacken_zeros) + + def flow2color(self, flow, clip=50.0): + """ + :param flow: Optical flow tensor. + :return: RGB image normalized between 0 and 1. + """ + + # flow is B x C x H x W + + B, C, H, W = list(flow.size()) + + flow = flow.clone().detach() + + abs_image = torch.abs(flow) + flow_mean = abs_image.mean(dim=[1,2,3]) + flow_std = abs_image.std(dim=[1,2,3]) + + if clip: + flow = torch.clamp(flow, -clip, clip)/clip + else: + # Apply some kind of normalization. Divide by the perceived maximum (mean + std*2) + flow_max = flow_mean + flow_std*2 + 1e-10 + for b in range(B): + flow[b] = flow[b].clamp(-flow_max[b].item(), flow_max[b].item()) / flow_max[b].clamp(min=1) + + radius = torch.sqrt(torch.sum(flow**2, dim=1, keepdim=True)) #B x 1 x H x W + radius_clipped = torch.clamp(radius, 0.0, 1.0) + + angle = torch.atan2(flow[:, 1:], flow[:, 0:1]) / np.pi #B x 1 x H x W + + hue = torch.clamp((angle + 1.0) / 2.0, 0.0, 1.0) + saturation = torch.ones_like(hue) * 0.75 + value = radius_clipped + hsv = torch.cat([hue, saturation, value], dim=1) #B x 3 x H x W + + #flow = tf.image.hsv_to_rgb(hsv) + flow = hsv_to_rgb(hsv) + flow = (flow*255.0).type(torch.ByteTensor) + return flow + + def summ_flow(self, name, im, clip=0.0, only_return=False, frame_id=None): + # flow is B x C x D x W + if self.save_this: + return self.summ_rgb(name, self.flow2color(im, clip=clip), only_return=only_return, frame_id=frame_id) + else: + return None + + def summ_oneds(self, name, ims, frame_ids=None, bev=False, fro=False, logvis=False, reduce_max=False, max_val=0.0, norm=True, only_return=False, do_colorize=False): + if self.save_this: + if bev: + B, C, H, _, W = list(ims[0].shape) + if reduce_max: + ims = [torch.max(im, dim=3)[0] for im in ims] + else: + ims = [torch.mean(im, dim=3) for im in ims] + elif fro: + B, C, _, H, W = list(ims[0].shape) + if reduce_max: + ims = [torch.max(im, dim=2)[0] for im in ims] + else: + ims = [torch.mean(im, dim=2) for im in ims] + + + if len(ims) != 1: # sequence + im = gif_and_tile(ims, just_gif=self.just_gif) + else: + im = torch.stack(ims, dim=1) # single frame + + B, S, C, H, W = list(im.shape) + + if logvis and max_val: + max_val = np.log(max_val) + im = torch.log(torch.clamp(im, 0)+1.0) + im = torch.clamp(im, 0, max_val) + im = im/max_val + norm = False + elif max_val: + im = torch.clamp(im, 0, max_val) + im = im/max_val + norm = False + + if norm: + # normalize before oned2inferno, + # so that the ranges are similar within B across S + im = utils.basic.normalize(im) + + im = im.view(B*S, C, H, W) + vis = oned2inferno(im, norm=norm, do_colorize=do_colorize) + vis = vis.view(B, S, 3, H, W) + + if frame_ids is not None: + assert(len(frame_ids)==S) + for s in range(S): + vis[:,s] = draw_frame_id_on_vis(vis[:,s], frame_ids[s]) + + if W > self.maxwidth: + vis = vis[...,:self.maxwidth] + + if only_return: + return vis + else: + self.summ_gif(name, vis) + + def summ_oned(self, name, im, bev=False, fro=False, logvis=False, max_val=0, max_along_y=False, norm=True, frame_id=None, only_return=False): + if self.save_this: + + if bev: + B, C, H, _, W = list(im.shape) + if max_along_y: + im = torch.max(im, dim=3)[0] + else: + im = torch.mean(im, dim=3) + elif fro: + B, C, _, H, W = list(im.shape) + if max_along_y: + im = torch.max(im, dim=2)[0] + else: + im = torch.mean(im, dim=2) + else: + B, C, H, W = list(im.shape) + + im = im[0:1] # just the first one + assert(C==1) + + if logvis and max_val: + max_val = np.log(max_val) + im = torch.log(im) + im = torch.clamp(im, 0, max_val) + im = im/max_val + norm = False + elif max_val: + im = torch.clamp(im, 0, max_val)/max_val + norm = False + + vis = oned2inferno(im, norm=norm) + if W > self.maxwidth: + vis = vis[...,:self.maxwidth] + return self.summ_rgb(name, vis, blacken_zeros=False, frame_id=frame_id, only_return=only_return) + + def summ_feats(self, name, feats, valids=None, pca=True, fro=False, only_return=False, frame_ids=None): + if self.save_this: + if valids is not None: + valids = torch.stack(valids, dim=1) + + feats = torch.stack(feats, dim=1) + # feats leads with B x S x C + + if feats.ndim==6: + + # feats is B x S x C x D x H x W + if fro: + reduce_dim = 3 + else: + reduce_dim = 4 + + if valids is None: + feats = torch.mean(feats, dim=reduce_dim) + else: + valids = valids.repeat(1, 1, feats.size()[2], 1, 1, 1) + feats = utils.basic.reduce_masked_mean(feats, valids, dim=reduce_dim) + + B, S, C, D, W = list(feats.size()) + + if not pca: + # feats leads with B x S x C + feats = torch.mean(torch.abs(feats), dim=2, keepdims=True) + # feats leads with B x S x 1 + feats = torch.unbind(feats, dim=1) + return self.summ_oneds(name=name, ims=feats, norm=True, only_return=only_return, frame_ids=frame_ids) + + else: + __p = lambda x: utils.basic.pack_seqdim(x, B) + __u = lambda x: utils.basic.unpack_seqdim(x, B) + + feats_ = __p(feats) + + if valids is None: + feats_pca_ = get_feat_pca(feats_) + else: + valids_ = __p(valids) + feats_pca_ = get_feat_pca(feats_, valids) + + feats_pca = __u(feats_pca_) + + return self.summ_rgbs(name=name, ims=torch.unbind(feats_pca, dim=1), only_return=only_return, frame_ids=frame_ids) + + def summ_feat(self, name, feat, valid=None, pca=True, only_return=False, bev=False, fro=False, frame_id=None): + if self.save_this: + if feat.ndim==5: # B x C x D x H x W + + if bev: + reduce_axis = 3 + elif fro: + reduce_axis = 2 + else: + # default to bev + reduce_axis = 3 + + if valid is None: + feat = torch.mean(feat, dim=reduce_axis) + else: + valid = valid.repeat(1, feat.size()[1], 1, 1, 1) + feat = utils.basic.reduce_masked_mean(feat, valid, dim=reduce_axis) + + B, C, D, W = list(feat.shape) + + if not pca: + feat = torch.mean(torch.abs(feat), dim=1, keepdims=True) + # feat is B x 1 x D x W + return self.summ_oned(name=name, im=feat, norm=True, only_return=only_return, frame_id=frame_id) + else: + feat_pca = get_feat_pca(feat, valid) + return self.summ_rgb(name, feat_pca, only_return=only_return, frame_id=frame_id) + + def summ_scalar(self, name, value): + if (not (isinstance(value, int) or isinstance(value, float) or isinstance(value, np.float32))) and ('torch' in value.type()): + value = value.detach().cpu().numpy() + if not np.isnan(value): + if (self.log_freq == 1): + self.writer.add_scalar(name, value, global_step=self.global_step) + elif self.save_this or np.mod(self.global_step, self.scalar_freq)==0: + self.writer.add_scalar(name, value, global_step=self.global_step) + + def summ_seg(self, name, seg, only_return=False, frame_id=None, colormap='tab20', label_colors=None): + if not self.save_this: + return + + B,H,W = seg.shape + + if label_colors is None: + custom_label_colors = False + # label_colors = get_n_colors(int(torch.max(seg).item()), sequential=True) + label_colors = cm.get_cmap(colormap).colors + label_colors = [[int(i*255) for i in l] for l in label_colors] + else: + custom_label_colors = True + # label_colors = matplotlib.cm.get_cmap(colormap).colors + # label_colors = [[int(i*255) for i in l] for l in label_colors] + # print('label_colors', label_colors) + + # label_colors = [ + # (0, 0, 0), # None + # (70, 70, 70), # Buildings + # (190, 153, 153), # Fences + # (72, 0, 90), # Other + # (220, 20, 60), # Pedestrians + # (153, 153, 153), # Poles + # (157, 234, 50), # RoadLines + # (128, 64, 128), # Roads + # (244, 35, 232), # Sidewalks + # (107, 142, 35), # Vegetation + # (0, 0, 255), # Vehicles + # (102, 102, 156), # Walls + # (220, 220, 0) # TrafficSigns + # ] + + r = torch.zeros_like(seg,dtype=torch.uint8) + g = torch.zeros_like(seg,dtype=torch.uint8) + b = torch.zeros_like(seg,dtype=torch.uint8) + + for label in range(0,len(label_colors)): + if (not custom_label_colors):# and (N > 20): + label_ = label % 20 + else: + label_ = label + + idx = (seg == label+1) + r[idx] = label_colors[label_][0] + g[idx] = label_colors[label_][1] + b[idx] = label_colors[label_][2] + + rgb = torch.stack([r,g,b],axis=1) + return self.summ_rgb(name,rgb,only_return=only_return, frame_id=frame_id) + + def summ_pts_on_rgb(self, name, trajs, rgb, valids=None, frame_id=None, only_return=False, show_dots=True, cmap='coolwarm', linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, C, H, W = rgb.shape + B, S, N, D = trajs.shape + + rgb = rgb[0] # C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + valids = valids.long().detach().cpu().numpy() # S, N + + rgb = rgb.astype(np.uint8).copy() + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + valid = valids[:,i] # S + + color_map = cm.get_cmap(cmap) + color = np.array(color_map(i)[:3]) * 255 # rgb + for s in range(S): + if valid[s]: + cv2.circle(rgb, (int(traj[s,0]), int(traj[s,1])), linewidth, color, -1) + rgb = torch.from_numpy(rgb).permute(2,0,1).unsqueeze(0) + rgb = preprocess_color(rgb) + return self.summ_rgb(name, rgb, only_return=only_return, frame_id=frame_id) + + def summ_pts_on_rgbs(self, name, trajs, rgbs, valids=None, frame_ids=None, only_return=False, show_dots=True, cmap='coolwarm', linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, S, C, H, W = rgbs.shape + B, S2, N, D = trajs.shape + assert(S==S2) + + rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + rgbs_color = [] + for rgb in rgbs: + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgbs_color.append(rgb) # each element 3 x H x W + + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + valids = valids.long().detach().cpu().numpy() # S, N + + rgbs_color = [rgb.astype(np.uint8).copy() for rgb in rgbs_color] + + for i in range(N): + traj = trajs[:,i] # S,2 + valid = valids[:,i] # S + + color_map = cm.get_cmap(cmap) + color = np.array(color_map(0)[:3]) * 255 # rgb + for s in range(S): + if valid[s]: + cv2.circle(rgbs_color[s], (traj[s,0], traj[s,1]), linewidth, color, -1) + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + + def summ_traj2ds_on_rgbs(self, name, trajs, rgbs, valids=None, frame_ids=None, only_return=False, show_dots=False, cmap='coolwarm', vals=None, linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, S, C, H, W = rgbs.shape + B, S2, N, D = trajs.shape + assert(S==S2) + + rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + if vals is not None: + vals = vals[0] # N + # print('vals', vals.shape) + + rgbs_color = [] + for rgb in rgbs: + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgbs_color.append(rgb) # each element 3 x H x W + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i].long().detach().cpu().numpy() # S, 2 + valid = valids[:,i].long().detach().cpu().numpy() # S + + # print('traj', traj.shape) + # print('valid', valid.shape) + + if vals is not None: + # val = vals[:,i].float().detach().cpu().numpy() # [] + val = vals[i].float().detach().cpu().numpy() # [] + # print('val', val.shape) + else: + val = None + + for t in range(S): + if valid[t]: + # traj_seq = traj[max(t-16,0):t+1] + traj_seq = traj[max(t-8,0):t+1] + val_seq = np.linspace(0,1,len(traj_seq)) + # if t<2: + # val_seq = np.zeros_like(val_seq) + # print('val_seq', val_seq) + # val_seq = 1.0 + # val_seq = np.arange(8)/8.0 + # val_seq = val_seq[-len(traj_seq):] + # rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + # input() + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + # vis = visibles[:,i] # S + vis = torch.ones_like(traj[:,0]) # S + valid = valids[:,i] # S + rgbs_color = self.draw_circ_on_images_py(rgbs_color, traj, vis, S=0, show_dots=show_dots, cmap=cmap_, linewidth=linewidth) + + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + + def summ_traj2ds_on_rgbs_py(self, name, trajs, rgbs_color, valids=None, frame_ids=None, only_return=False, show_dots=False, cmap='coolwarm', vals=None, linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + # B, S, C, H, W = rgbs.shape + B, S, N, D = trajs.shape + # assert(S==S2) + + # rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + if vals is not None: + vals = vals[0] # N + # print('vals', vals.shape) + + # rgbs_color = [] + # for rgb in rgbs: + # rgb = back2color(rgb).detach().cpu().numpy() + # rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + # rgbs_color.append(rgb) # each element 3 x H x W + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i].long().detach().cpu().numpy() # S, 2 + valid = valids[:,i].long().detach().cpu().numpy() # S + + # print('traj', traj.shape) + # print('valid', valid.shape) + + if vals is not None: + # val = vals[:,i].float().detach().cpu().numpy() # [] + val = vals[i].float().detach().cpu().numpy() # [] + # print('val', val.shape) + else: + val = None + + for t in range(S): + # if valid[t]: + # traj_seq = traj[max(t-16,0):t+1] + traj_seq = traj[max(t-8,0):t+1] + val_seq = np.linspace(0,1,len(traj_seq)) + # if t<2: + # val_seq = np.zeros_like(val_seq) + # print('val_seq', val_seq) + # val_seq = 1.0 + # val_seq = np.arange(8)/8.0 + # val_seq = val_seq[-len(traj_seq):] + # rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + rgbs_color[t] = self.draw_traj_on_image_py(rgbs_color[t], traj_seq, S=S, show_dots=show_dots, cmap=cmap_, val=val_seq, linewidth=linewidth) + # input() + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + # vis = visibles[:,i] # S + vis = torch.ones_like(traj[:,0]) # S + valid = valids[:,i] # S + rgbs_color = self.draw_circ_on_images_py(rgbs_color, traj, vis, S=0, show_dots=show_dots, cmap=cmap_, linewidth=linewidth) + + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + + def summ_traj2ds_on_rgbs2(self, name, trajs, visibles, rgbs, valids=None, frame_ids=None, only_return=False, show_dots=True, cmap=None, linewidth=1): + # trajs is B, S, N, 2 + # rgbs is B, S, C, H, W + B, S, C, H, W = rgbs.shape + B, S2, N, D = trajs.shape + assert(S==S2) + + rgbs = rgbs[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + visibles = visibles[0] # S, N + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) # S, N + else: + valids = valids[0] + # print('trajs', trajs.shape) + # print('valids', valids.shape) + + rgbs_color = [] + for rgb in rgbs: + rgb = back2color(rgb).detach().cpu().numpy() + rgb = np.transpose(rgb, [1, 2, 0]) # put channels last + rgbs_color.append(rgb) # each element 3 x H x W + + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + visibles = visibles.float().detach().cpu().numpy() # S, N + valids = valids.long().detach().cpu().numpy() # S, N + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + vis = visibles[:,i] # S + valid = valids[:,i] # S + rgbs_color = self.draw_traj_on_images_py(rgbs_color, traj, S=S, show_dots=show_dots, cmap=cmap_, linewidth=linewidth) + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S,2 + vis = visibles[:,i] # S + valid = valids[:,i] # S + if valid[0]: + rgbs_color = self.draw_circ_on_images_py(rgbs_color, traj, vis, S=S, show_dots=show_dots, cmap=None, linewidth=linewidth) + + rgbs = [] + for rgb in rgbs_color: + rgb = torch.from_numpy(rgb).permute(2, 0, 1).unsqueeze(0) + rgbs.append(preprocess_color(rgb)) + + return self.summ_rgbs(name, rgbs, only_return=only_return, frame_ids=frame_ids) + + def summ_traj2ds_on_rgb(self, name, trajs, rgb, valids=None, show_dots=False, show_lines=True, frame_id=None, only_return=False, cmap='coolwarm', linewidth=1): + # trajs is B, S, N, 2 + # rgb is B, C, H, W + B, C, H, W = rgb.shape + B, S, N, D = trajs.shape + + rgb = rgb[0] # S, C, H, W + trajs = trajs[0] # S, N, 2 + + if valids is None: + valids = torch.ones_like(trajs[:,:,0]) + else: + valids = valids[0] + + rgb_color = back2color(rgb).detach().cpu().numpy() + rgb_color = np.transpose(rgb_color, [1, 2, 0]) # put channels last + + # using maxdist will dampen the colors for short motions + norms = torch.sqrt(1e-4 + torch.sum((trajs[-1] - trajs[0])**2, dim=1)) # N + maxdist = torch.quantile(norms, 0.95).detach().cpu().numpy() + maxdist = None + trajs = trajs.long().detach().cpu().numpy() # S, N, 2 + valids = valids.long().detach().cpu().numpy() # S, N + + for i in range(N): + if cmap=='onediff' and i==0: + cmap_ = 'spring' + elif cmap=='onediff': + cmap_ = 'winter' + else: + cmap_ = cmap + traj = trajs[:,i] # S, 2 + valid = valids[:,i] # S + if valid[0]==1: + traj = traj[valid>0] + rgb_color = self.draw_traj_on_image_py( + rgb_color, traj, S=S, show_dots=show_dots, show_lines=show_lines, cmap=cmap_, maxdist=maxdist, linewidth=linewidth) + + rgb_color = torch.from_numpy(rgb_color).permute(2, 0, 1).unsqueeze(0) + rgb = preprocess_color(rgb_color) + return self.summ_rgb(name, rgb, only_return=only_return, frame_id=frame_id) + + def draw_traj_on_image_py(self, rgb, traj, S=50, linewidth=1, show_dots=False, show_lines=True, cmap='coolwarm', val=None, maxdist=None): + # all inputs are numpy tensors + # rgb is 3 x H x W + # traj is S x 2 + + H, W, C = rgb.shape + assert(C==3) + + rgb = rgb.astype(np.uint8).copy() + + S1, D = traj.shape + assert(D==2) + + color_map = cm.get_cmap(cmap) + S1, D = traj.shape + + for s in range(S1): + if val is not None: + color = np.array(color_map(val[s])[:3]) * 255 # rgb + else: + if maxdist is not None: + val = (np.sqrt(np.sum((traj[s]-traj[0])**2))/maxdist).clip(0,1) + color = np.array(color_map(val)[:3]) * 255 # rgb + else: + color = np.array(color_map((s)/max(1,float(S-2)))[:3]) * 255 # rgb + + if show_lines and s<(S1-1): + cv2.line(rgb, + (int(traj[s,0]), int(traj[s,1])), + (int(traj[s+1,0]), int(traj[s+1,1])), + color, + linewidth, + cv2.LINE_AA) + if show_dots: + cv2.circle(rgb, (int(traj[s,0]), int(traj[s,1])), linewidth, np.array(color_map(1)[:3])*255, -1) + + # if maxdist is not None: + # val = (np.sqrt(np.sum((traj[-1]-traj[0])**2))/maxdist).clip(0,1) + # color = np.array(color_map(val)[:3]) * 255 # rgb + # else: + # # draw the endpoint of traj, using the next color (which may be the last color) + # color = np.array(color_map((S1-1)/max(1,float(S-2)))[:3]) * 255 # rgb + + # # emphasize endpoint + # cv2.circle(rgb, (traj[-1,0], traj[-1,1]), linewidth*2, color, -1) + + return rgb + + + + def draw_traj_on_images_py(self, rgbs, traj, S=50, linewidth=1, show_dots=False, cmap='coolwarm', maxdist=None): + # all inputs are numpy tensors + # rgbs is a list of H,W,3 + # traj is S,2 + H, W, C = rgbs[0].shape + assert(C==3) + + rgbs = [rgb.astype(np.uint8).copy() for rgb in rgbs] + + S1, D = traj.shape + assert(D==2) + + x = int(np.clip(traj[0,0], 0, W-1)) + y = int(np.clip(traj[0,1], 0, H-1)) + color = rgbs[0][y,x] + color = (int(color[0]),int(color[1]),int(color[2])) + for s in range(S): + # bak_color = np.array(color_map(1.0)[:3]) * 255 # rgb + # cv2.circle(rgbs[s], (traj[s,0], traj[s,1]), linewidth*4, bak_color, -1) + cv2.polylines(rgbs[s], + [traj[:s+1]], + False, + color, + linewidth, + cv2.LINE_AA) + return rgbs + + def draw_circs_on_image_py(self, rgb, xy, colors=None, linewidth=10, radius=3, show_dots=False, maxdist=None): + # all inputs are numpy tensors + # rgbs is a list of 3,H,W + # xy is N,2 + H, W, C = rgb.shape + assert(C==3) + + rgb = rgb.astype(np.uint8).copy() + + N, D = xy.shape + assert(D==2) + + + xy = xy.astype(np.float32) + xy[:,0] = np.clip(xy[:,0], 0, W-1) + xy[:,1] = np.clip(xy[:,1], 0, H-1) + xy = xy.astype(np.int32) + + + + if colors is None: + colors = get_n_colors(N) + + for n in range(N): + color = colors[n] + # print('color', color) + # color = (color[0]*255).astype(np.uint8) + color = (int(color[0]),int(color[1]),int(color[2])) + + # x = int(np.clip(xy[0,0], 0, W-1)) + # y = int(np.clip(xy[0,1], 0, H-1)) + # color_ = rgbs[0][y,x] + # color_ = (int(color_[0]),int(color_[1]),int(color_[2])) + # color_ = (int(color_[0]),int(color_[1]),int(color_[2])) + + cv2.circle(rgb, (xy[n,0], xy[n,1]), linewidth, color, 3) + # vis_color = int(np.squeeze(vis[s])*255) + # vis_color = (vis_color,vis_color,vis_color) + # cv2.circle(rgbs[s], (traj[s,0], traj[s,1]), linewidth+1, vis_color, -1) + return rgb + + def draw_circ_on_images_py(self, rgbs, traj, vis, S=50, linewidth=1, show_dots=False, cmap=None, maxdist=None): + # all inputs are numpy tensors + # rgbs is a list of 3,H,W + # traj is S,2 + H, W, C = rgbs[0].shape + assert(C==3) + + rgbs = [rgb.astype(np.uint8).copy() for rgb in rgbs] + + S1, D = traj.shape + assert(D==2) + + if cmap is None: + bremm = ColorMap2d() + traj_ = traj[0:1].astype(np.float32) + traj_[:,0] /= float(W) + traj_[:,1] /= float(H) + color = bremm(traj_) + # print('color', color) + color = (color[0]*255).astype(np.uint8) + # color = (int(color[0]),int(color[1]),int(color[2])) + color = (int(color[2]),int(color[1]),int(color[0])) + + for s in range(S1): + if cmap is not None: + color_map = cm.get_cmap(cmap) + # color = np.array(color_map(s/(S-1))[:3]) * 255 # rgb + color = np.array(color_map((s+1)/max(1,float(S-1)))[:3]) * 255 # rgb + # color = color.astype(np.uint8) + # color = (color[0], color[1], color[2]) + # print('color', color) + # import ipdb; ipdb.set_trace() + + cv2.circle(rgbs[s], (int(traj[s,0]), int(traj[s,1])), linewidth+1, color, -1) + # vis_color = int(np.squeeze(vis[s])*255) + # vis_color = (vis_color,vis_color,vis_color) + # cv2.circle(rgbs[s], (int(traj[s,0]), int(traj[s,1])), linewidth+1, vis_color, -1) + + return rgbs + + def summ_traj_as_crops(self, name, trajs_e, rgbs, frame_id=None, only_return=False, show_circ=False, trajs_g=None, is_g=False): + B, S, N, D = trajs_e.shape + assert(N==1) + assert(D==2) + + rgbs_vis = [] + n = 0 + pad_amount = 100 + trajs_e_py = trajs_e[0].detach().cpu().numpy() + # trajs_e_py = np.clip(trajs_e_py, min=pad_amount/2, max=pad_amoun + trajs_e_py = trajs_e_py + pad_amount + + if trajs_g is not None: + trajs_g_py = trajs_g[0].detach().cpu().numpy() + trajs_g_py = trajs_g_py + pad_amount + + for s in range(S): + rgb = rgbs[0,s].detach().cpu().numpy() + # print('orig rgb', rgb.shape) + rgb = np.transpose(rgb,(1,2,0)) # H, W, 3 + + rgb = np.pad(rgb, ((pad_amount,pad_amount),(pad_amount,pad_amount),(0,0))) + # print('pad rgb', rgb.shape) + H, W, C = rgb.shape + + if trajs_g is not None: + xy_g = trajs_g_py[s,n] + xy_g[0] = np.clip(xy_g[0], pad_amount, W-pad_amount) + xy_g[1] = np.clip(xy_g[1], pad_amount, H-pad_amount) + rgb = self.draw_circs_on_image_py(rgb, xy_g.reshape(1,2), colors=[(0,255,0)], linewidth=2, radius=3) + + xy_e = trajs_e_py[s,n] + xy_e[0] = np.clip(xy_e[0], pad_amount, W-pad_amount) + xy_e[1] = np.clip(xy_e[1], pad_amount, H-pad_amount) + + if show_circ: + if is_g: + rgb = self.draw_circs_on_image_py(rgb, xy_e.reshape(1,2), colors=[(0,255,0)], linewidth=2, radius=3) + else: + rgb = self.draw_circs_on_image_py(rgb, xy_e.reshape(1,2), colors=[(255,0,255)], linewidth=2, radius=3) + + + xmin = int(xy_e[0])-pad_amount//2 + xmax = int(xy_e[0])+pad_amount//2 + ymin = int(xy_e[1])-pad_amount//2 + ymax = int(xy_e[1])+pad_amount//2 + + rgb_ = rgb[ymin:ymax, xmin:xmax] + + H_, W_ = rgb_.shape[:2] + # if np.any(rgb_.shape==0): + # input() + if H_==0 or W_==0: + import ipdb; ipdb.set_trace() + + rgb_ = rgb_.transpose(2,0,1) + rgb_ = torch.from_numpy(rgb_) + + rgbs_vis.append(rgb_) + + # nrow = int(np.sqrt(S)*(16.0/9)/2.0) + nrow = int(np.sqrt(S)*1.5) + grid_img = torchvision.utils.make_grid(torch.stack(rgbs_vis, dim=0), nrow=nrow).unsqueeze(0) + # print('grid_img', grid_img.shape) + return self.summ_rgb(name, grid_img.byte(), frame_id=frame_id, only_return=only_return) + + def summ_occ(self, name, occ, reduce_axes=[3], bev=False, fro=False, pro=False, frame_id=None, only_return=False): + if self.save_this: + B, C, D, H, W = list(occ.shape) + if bev: + reduce_axes = [3] + elif fro: + reduce_axes = [2] + elif pro: + reduce_axes = [4] + for reduce_axis in reduce_axes: + height = convert_occ_to_height(occ, reduce_axis=reduce_axis) + if reduce_axis == reduce_axes[-1]: + return self.summ_oned(name=('%s_ax%d' % (name, reduce_axis)), im=height, norm=False, frame_id=frame_id, only_return=only_return) + else: + self.summ_oned(name=('%s_ax%d' % (name, reduce_axis)), im=height, norm=False, frame_id=frame_id, only_return=only_return) + +def erode2d(im, times=1, device='cuda'): + weights2d = torch.ones(1, 1, 3, 3, device=device) + for time in range(times): + im = 1.0 - F.conv2d(1.0 - im, weights2d, padding=1).clamp(0, 1) + return im + +def dilate2d(im, times=1, device='cuda', mode='square'): + weights2d = torch.ones(1, 1, 3, 3, device=device) + if mode=='cross': + weights2d[:,:,0,0] = 0.0 + weights2d[:,:,0,2] = 0.0 + weights2d[:,:,2,0] = 0.0 + weights2d[:,:,2,2] = 0.0 + for time in range(times): + im = F.conv2d(im, weights2d, padding=1).clamp(0, 1) + return im \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/po_utils/misc.py b/dynamic_predictor/dust3r/utils/po_utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..bedf11918c9676dea695f7d3b3bd4fd31b3f74d8 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/po_utils/misc.py @@ -0,0 +1,165 @@ +import torch +import numpy as np +import math +from prettytable import PrettyTable + +def count_parameters(model): + table = PrettyTable(["Modules", "Parameters"]) + total_params = 0 + for name, parameter in model.named_parameters(): + if not parameter.requires_grad: + continue + param = parameter.numel() + if param > 100000: + table.add_row([name, param]) + total_params+=param + print(table) + print('total params: %.2f M' % (total_params/1000000.0)) + return total_params + +def posemb_sincos_2d_xy(xy, C, temperature=10000, dtype=torch.float32, cat_coords=False): + device = xy.device + dtype = xy.dtype + B, S, D = xy.shape + assert(D==2) + x = xy[:,:,0] + y = xy[:,:,1] + assert (C % 4) == 0, 'feature dimension must be multiple of 4 for sincos emb' + omega = torch.arange(C // 4, device=device) / (C // 4 - 1) + omega = 1. / (temperature ** omega) + + y = y.flatten()[:, None] * omega[None, :] + x = x.flatten()[:, None] * omega[None, :] + pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) + pe = pe.reshape(B,S,C).type(dtype) + if cat_coords: + pe = torch.cat([pe, xy], dim=2) # B,N,C+2 + return pe + +class SimplePool(): + def __init__(self, pool_size, version='pt'): + self.pool_size = pool_size + self.version = version + self.items = [] + + if not (version=='pt' or version=='np'): + print('version = %s; please choose pt or np') + assert(False) # please choose pt or np + + def __len__(self): + return len(self.items) + + def mean(self, min_size=1): + if min_size=='half': + pool_size_thresh = self.pool_size/2 + else: + pool_size_thresh = min_size + + if self.version=='np': + if len(self.items) >= pool_size_thresh: + return np.sum(self.items)/float(len(self.items)) + else: + return np.nan + if self.version=='pt': + if len(self.items) >= pool_size_thresh: + return torch.sum(self.items)/float(len(self.items)) + else: + return torch.from_numpy(np.nan) + + def sample(self, with_replacement=True): + idx = np.random.randint(len(self.items)) + if with_replacement: + return self.items[idx] + else: + return self.items.pop(idx) + + def fetch(self, num=None): + if self.version=='pt': + item_array = torch.stack(self.items) + elif self.version=='np': + item_array = np.stack(self.items) + if num is not None: + # there better be some items + assert(len(self.items) >= num) + + # if there are not that many elements just return however many there are + if len(self.items) < num: + return item_array + else: + idxs = np.random.randint(len(self.items), size=num) + return item_array[idxs] + else: + return item_array + + def is_full(self): + full = len(self.items)==self.pool_size + return full + + def empty(self): + self.items = [] + + def update(self, items): + for item in items: + if len(self.items) < self.pool_size: + # the pool is not full, so let's add this in + self.items.append(item) + else: + # the pool is full + # pop from the front + self.items.pop(0) + # add to the back + self.items.append(item) + return self.items + +def farthest_point_sample(xyz, npoint, include_ends=False, deterministic=False): + """ + Input: + xyz: pointcloud data, [B, N, C], where C is probably 3 + npoint: number of samples + Return: + inds: sampled pointcloud index, [B, npoint] + """ + device = xyz.device + B, N, C = xyz.shape + xyz = xyz.float() + inds = torch.zeros(B, npoint, dtype=torch.long).to(device) + distance = torch.ones(B, N).to(device) * 1e10 + if deterministic: + farthest = torch.randint(0, 1, (B,), dtype=torch.long).to(device) + else: + farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device) + batch_indices = torch.arange(B, dtype=torch.long).to(device) + for i in range(npoint): + if include_ends: + if i==0: + farthest = 0 + elif i==1: + farthest = N-1 + inds[:, i] = farthest + centroid = xyz[batch_indices, farthest, :].view(B, 1, C) + dist = torch.sum((xyz - centroid) ** 2, -1) + mask = dist < distance + distance[mask] = dist[mask] + farthest = torch.max(distance, -1)[1] + + if npoint > N: + # if we need more samples, make them random + distance += torch.randn_like(distance) + return inds + +def farthest_point_sample_py(xyz, npoint): + N,C = xyz.shape + inds = np.zeros(npoint, dtype=np.int32) + distance = np.ones(N) * 1e10 + farthest = np.random.randint(0, N, dtype=np.int32) + for i in range(npoint): + inds[i] = farthest + centroid = xyz[farthest, :].reshape(1,C) + dist = np.sum((xyz - centroid) ** 2, -1) + mask = dist < distance + distance[mask] = dist[mask] + farthest = np.argmax(distance, -1) + if npoint > N: + # if we need more samples, make them random + distance += np.random.randn(*distance.shape) + return inds \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/viz_demo.py b/dynamic_predictor/dust3r/utils/viz_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..f00cfc651c249147b432e8d6b29927ab04b1e5e1 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/viz_demo.py @@ -0,0 +1,124 @@ +from scipy.spatial.transform import Rotation +import numpy as np +import trimesh +from dust3r.utils.device import to_numpy +import torch +import os +import cv2 +from dust3r.viz import add_scene_cam, CAM_COLORS, OPENGL, pts3d_to_trimesh, cat_meshes +from third_party.raft import load_RAFT +from datasets_preprocess.sintel_get_dynamics import compute_optical_flow +from dust3r.utils.flow_vis import flow_to_image + +def convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05, show_cam=True, + cam_color=None, as_pointcloud=False, + transparent_cams=False, silent=False, save_name=None): + assert len(pts3d) == len(mask) <= len(imgs) <= len(cams2world) == len(focals) + pts3d = to_numpy(pts3d) + imgs = to_numpy(imgs) + focals = to_numpy(focals) + cams2world = to_numpy(cams2world) + + scene = trimesh.Scene() + + # full pointcloud + if as_pointcloud: + pts = np.concatenate([p[m] for p, m in zip(pts3d, mask)]) + col = np.concatenate([p[m] for p, m in zip(imgs, mask)]) + pct = trimesh.PointCloud(pts.reshape(-1, 3), colors=col.reshape(-1, 3)) + scene.add_geometry(pct) + else: + meshes = [] + for i in range(len(imgs)): + meshes.append(pts3d_to_trimesh(imgs[i], pts3d[i], mask[i])) + mesh = trimesh.Trimesh(**cat_meshes(meshes)) + scene.add_geometry(mesh) + + # add each camera + if show_cam: + for i, pose_c2w in enumerate(cams2world): + if isinstance(cam_color, list): + camera_edge_color = cam_color[i] + else: + camera_edge_color = cam_color or CAM_COLORS[i % len(CAM_COLORS)] + add_scene_cam(scene, pose_c2w, camera_edge_color, + None if transparent_cams else imgs[i], focals[i], + imsize=imgs[i].shape[1::-1], screen_width=cam_size) + + rot = np.eye(4) + rot[:3, :3] = Rotation.from_euler('y', np.deg2rad(180)).as_matrix() + scene.apply_transform(np.linalg.inv(cams2world[0] @ OPENGL @ rot)) + if save_name is None: save_name='scene' + outfile = os.path.join(outdir, save_name+'.glb') + if not silent: + print('(exporting 3D scene to', outfile, ')') + scene.export(file_obj=outfile) + return outfile + +def get_dynamic_mask_from_pairviewer(scene, flow_net=None, both_directions=False, output_dir='./demo_tmp', motion_mask_thre=0.35): + """ + get the dynamic mask from the pairviewer + """ + if flow_net is None: + # flow_net = load_RAFT(model_path="third_party/RAFT/models/Tartan-C-T-TSKH-spring540x960-M.pth").to('cuda').eval() # sea-raft + flow_net = load_RAFT(model_path="third_party/RAFT/models/raft-things.pth").to('cuda').eval() + + imgs = scene.imgs + img1 = torch.from_numpy(imgs[0]*255).permute(2,0,1)[None] # (B, 3, H, W) + img2 = torch.from_numpy(imgs[1]*255).permute(2,0,1)[None] + with torch.no_grad(): + forward_flow = flow_net(img1.cuda(), img2.cuda(), iters=20, test_mode=True)[1] # (B, 2, H, W) + if both_directions: + backward_flow = flow_net(img2.cuda(), img1.cuda(), iters=20, test_mode=True)[1] + + B, _, H, W = forward_flow.shape + + depth_map1 = scene.get_depthmaps()[0] # (H, W) + depth_map2 = scene.get_depthmaps()[1] + + im_poses = scene.get_im_poses() + cam1 = im_poses[0] # (4, 4) cam2world + cam2 = im_poses[1] + extrinsics1 = torch.linalg.inv(cam1) # (4, 4) world2cam + extrinsics2 = torch.linalg.inv(cam2) + + intrinsics = scene.get_intrinsics() + intrinsics_1 = intrinsics[0] # (3, 3) + intrinsics_2 = intrinsics[1] + + ego_flow_1_2 = compute_optical_flow(depth_map1, depth_map2, extrinsics1, extrinsics2, intrinsics_1, intrinsics_2) # (H*W, 2) + ego_flow_1_2 = ego_flow_1_2.reshape(H, W, 2).transpose(2, 0, 1) # (2, H, W) + + error_map = np.linalg.norm(ego_flow_1_2 - forward_flow[0].cpu().numpy(), axis=0) # (H, W) + + error_map_normalized = (error_map - error_map.min()) / (error_map.max() - error_map.min()) + error_map_normalized_int = (error_map_normalized * 255).astype(np.uint8) + if both_directions: + ego_flow_2_1 = compute_optical_flow(depth_map2, depth_map1, extrinsics2, extrinsics1, intrinsics_2, intrinsics_1) + ego_flow_2_1 = ego_flow_2_1.reshape(H, W, 2).transpose(2, 0, 1) + error_map_2 = np.linalg.norm(ego_flow_2_1 - backward_flow[0].cpu().numpy(), axis=0) + error_map_2_normalized = (error_map_2 - error_map_2.min()) / (error_map_2.max() - error_map_2.min()) + error_map_2_normalized = (error_map_2_normalized * 255).astype(np.uint8) + cv2.imwrite(f'{output_dir}/dynamic_mask_bw.png', cv2.applyColorMap(error_map_2_normalized, cv2.COLORMAP_JET)) + np.save(f'{output_dir}/dynamic_mask_bw.npy', error_map_2) + + backward_flow = backward_flow[0].cpu().numpy().transpose(1, 2, 0) + np.save(f'{output_dir}/backward_flow.npy', backward_flow) + flow_img = flow_to_image(backward_flow) + cv2.imwrite(f'{output_dir}/backward_flow.png', flow_img) + + cv2.imwrite(f'{output_dir}/dynamic_mask.png', cv2.applyColorMap(error_map_normalized_int, cv2.COLORMAP_JET)) + error_map_normalized_bin = (error_map_normalized > motion_mask_thre).astype(np.uint8) + # save the binary mask + cv2.imwrite(f'{output_dir}/dynamic_mask_binary.png', error_map_normalized_bin*255) + # save the original one as npy file + np.save(f'{output_dir}/dynamic_mask.npy', error_map) + + # also save the flow + forward_flow = forward_flow[0].cpu().numpy().transpose(1, 2, 0) + np.save(f'{output_dir}/forward_flow.npy', forward_flow) + # save flow as image + flow_img = flow_to_image(forward_flow) + cv2.imwrite(f'{output_dir}/forward_flow.png', flow_img) + + return error_map \ No newline at end of file diff --git a/dynamic_predictor/dust3r/utils/vo_eval.py b/dynamic_predictor/dust3r/utils/vo_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..67554f7a6e97fd7a1dcda750fdbf9080dc8fbbb1 --- /dev/null +++ b/dynamic_predictor/dust3r/utils/vo_eval.py @@ -0,0 +1,334 @@ +import os +import re +from copy import deepcopy +from pathlib import Path + +import evo.main_ape as main_ape +import evo.main_rpe as main_rpe +import matplotlib.pyplot as plt +import numpy as np +from evo.core import sync +from evo.core.metrics import PoseRelation, Unit +from evo.core.trajectory import PosePath3D, PoseTrajectory3D +from evo.tools import file_interface, plot +from scipy.spatial.transform import Rotation + + +def sintel_cam_read(filename): + """Read camera data, return (M,N) tuple. + + M is the intrinsic matrix, N is the extrinsic matrix, so that + + x = M*N*X, + with x being a point in homogeneous image pixel coordinates, X being a + point in homogeneous world coordinates. + """ + TAG_FLOAT = 202021.25 + + f = open(filename, "rb") + check = np.fromfile(f, dtype=np.float32, count=1)[0] + assert ( + check == TAG_FLOAT + ), " cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? ".format( + TAG_FLOAT, check + ) + M = np.fromfile(f, dtype="float64", count=9).reshape((3, 3)) + N = np.fromfile(f, dtype="float64", count=12).reshape((3, 4)) + return M, N + + +def load_replica_traj(gt_file): + traj_w_c = np.loadtxt(gt_file) + assert traj_w_c.shape[1] == 12 or traj_w_c.shape[1] == 16 + poses = [ + np.array( + [ + [r[0], r[1], r[2], r[3]], + [r[4], r[5], r[6], r[7]], + [r[8], r[9], r[10], r[11]], + [0, 0, 0, 1], + ] + ) + for r in traj_w_c + ] + + pose_path = PosePath3D(poses_se3=poses) + timestamps_mat = np.arange(traj_w_c.shape[0]).astype(float) + + traj = PoseTrajectory3D(poses_se3=pose_path.poses_se3, timestamps=timestamps_mat) + xyz = traj.positions_xyz + # shift -1 column -> w in back column + # quat = np.roll(traj.orientations_quat_wxyz, -1, axis=1) + # uncomment this line if the quaternion is in scalar-first format + quat = traj.orientations_quat_wxyz + + traj_tum = np.column_stack((xyz, quat)) + return (traj_tum, timestamps_mat) + + +def load_sintel_traj(gt_file): # './data/sintel/training/camdata_left/alley_2' + # Refer to ParticleSfM + gt_pose_lists = sorted(os.listdir(gt_file)) + gt_pose_lists = [os.path.join(gt_file, x) for x in gt_pose_lists if x.endswith(".cam")] + tstamps = [float(x.split("/")[-1][:-4].split("_")[-1]) for x in gt_pose_lists] + gt_poses = [sintel_cam_read(f)[1] for f in gt_pose_lists] # [1] means get the extrinsic + xyzs, wxyzs = [], [] + tum_gt_poses = [] + for gt_pose in gt_poses: + gt_pose = np.concatenate([gt_pose, np.array([[0, 0, 0, 1]])], 0) + gt_pose_inv = np.linalg.inv(gt_pose) # world2cam -> cam2world + xyz = gt_pose_inv[:3, -1] + xyzs.append(xyz) + R = Rotation.from_matrix(gt_pose_inv[:3, :3]) + xyzw = R.as_quat() # scalar-last for scipy + wxyz = np.array([xyzw[-1], xyzw[0], xyzw[1], xyzw[2]]) + wxyzs.append(wxyz) + tum_gt_pose = np.concatenate([xyz, wxyz], 0) #TODO: check if this is correct + tum_gt_poses.append(tum_gt_pose) + + tum_gt_poses = np.stack(tum_gt_poses, 0) + tum_gt_poses[:, :3] = tum_gt_poses[:, :3] - np.mean( + tum_gt_poses[:, :3], 0, keepdims=True + ) + tt = np.expand_dims(np.stack(tstamps, 0), -1) + return tum_gt_poses, tt + + +def load_traj(gt_traj_file, traj_format="sintel", skip=0, stride=1, num_frames=None): + """Read trajectory format. Return in TUM-RGBD format. + Returns: + traj_tum (N, 7): camera to world poses in (x,y,z,qx,qy,qz,qw) + timestamps_mat (N, 1): timestamps + """ + if traj_format == "replica": + traj_tum, timestamps_mat = load_replica_traj(gt_traj_file) + elif traj_format == "sintel": + traj_tum, timestamps_mat = load_sintel_traj(gt_traj_file) + elif traj_format in ["tum", "tartanair"]: + traj = file_interface.read_tum_trajectory_file(gt_traj_file) + xyz = traj.positions_xyz + quat = traj.orientations_quat_wxyz + timestamps_mat = traj.timestamps + traj_tum = np.column_stack((xyz, quat)) + else: + raise NotImplementedError + + traj_tum = traj_tum[skip::stride] + timestamps_mat = timestamps_mat[skip::stride] + if num_frames is not None: + traj_tum = traj_tum[:num_frames] + timestamps_mat = timestamps_mat[:num_frames] + return traj_tum, timestamps_mat + + +def update_timestamps(gt_file, traj_format, skip=0, stride=1): + """Update timestamps given a""" + if traj_format == "tum": + traj_t_map_file = gt_file.replace("groundtruth.txt", "rgb.txt") + timestamps = load_timestamps(traj_t_map_file, traj_format) + return timestamps[skip::stride] + elif traj_format == "tartanair": + traj_t_map_file = gt_file.replace("gt_pose.txt", "times.txt") + timestamps = load_timestamps(traj_t_map_file, traj_format) + return timestamps[skip::stride] + + +def load_timestamps(time_file, traj_format="replica"): + if traj_format in ["tum", "tartanair"]: + with open(time_file, "r+") as f: + lines = f.readlines() + timestamps_mat = [ + float(x.split(" ")[0]) for x in lines if not x.startswith("#") + ] + return timestamps_mat + + +def make_traj(args) -> PoseTrajectory3D: + if isinstance(args, tuple) or isinstance(args, list): + traj, tstamps = args + return PoseTrajectory3D( + positions_xyz=traj[:, :3], + orientations_quat_wxyz=traj[:, 3:], + timestamps=tstamps, + ) + assert isinstance(args, PoseTrajectory3D), type(args) + return deepcopy(args) + + +def eval_metrics(pred_traj, gt_traj=None, seq="", filename="", sample_stride=1): + + if sample_stride > 1: + pred_traj[0] = pred_traj[0][::sample_stride] + pred_traj[1] = pred_traj[1][::sample_stride] + if gt_traj is not None: + updated_gt_traj = [] + updated_gt_traj.append(gt_traj[0][::sample_stride]) + updated_gt_traj.append(gt_traj[1][::sample_stride]) + gt_traj = updated_gt_traj + + pred_traj = make_traj(pred_traj) + + if gt_traj is not None: + gt_traj = make_traj(gt_traj) + + if pred_traj.timestamps.shape[0] == gt_traj.timestamps.shape[0]: + pred_traj.timestamps = gt_traj.timestamps + else: + print(pred_traj.timestamps.shape[0], gt_traj.timestamps.shape[0]) + + gt_traj, pred_traj = sync.associate_trajectories(gt_traj, pred_traj) + + # ATE + traj_ref = gt_traj + traj_est = pred_traj + + ate_result = main_ape.ape( + traj_ref, + traj_est, + est_name="traj", + pose_relation=PoseRelation.translation_part, + align=True, + correct_scale=True, + ) + + ate = ate_result.stats["rmse"] + + # RPE rotation and translation + delta_list = [1] + rpe_rots, rpe_transs = [], [] + for delta in delta_list: + rpe_rots_result = main_rpe.rpe( + traj_ref, + traj_est, + est_name="traj", + pose_relation=PoseRelation.rotation_angle_deg, + align=True, + correct_scale=True, + delta=delta, + delta_unit=Unit.frames, + rel_delta_tol=0.01, + all_pairs=True, + ) + + rot = rpe_rots_result.stats["rmse"] + rpe_rots.append(rot) + + for delta in delta_list: + rpe_transs_result = main_rpe.rpe( + traj_ref, + traj_est, + est_name="traj", + pose_relation=PoseRelation.translation_part, + align=True, + correct_scale=True, + delta=delta, + delta_unit=Unit.frames, + rel_delta_tol=0.01, + all_pairs=True, + ) + + trans = rpe_transs_result.stats["rmse"] + rpe_transs.append(trans) + + rpe_trans, rpe_rot = np.mean(rpe_transs), np.mean(rpe_rots) + with open(filename, "w+") as f: + f.write(f"Seq: {seq} \n\n") + f.write(f"{ate_result}") + f.write(f"{rpe_rots_result}") + f.write(f"{rpe_transs_result}") + + # print(f"Save results to {filename}") + return ate, rpe_trans, rpe_rot + + +def best_plotmode(traj): + _, i1, i2 = np.argsort(np.var(traj.positions_xyz, axis=0)) + plot_axes = "xyz"[i2] + "xyz"[i1] + return getattr(plot.PlotMode, plot_axes) + + +def plot_trajectory( + pred_traj, gt_traj=None, title="", filename="", align=True, correct_scale=True +): + pred_traj = make_traj(pred_traj) + + if gt_traj is not None: + gt_traj = make_traj(gt_traj) + if pred_traj.timestamps.shape[0] == gt_traj.timestamps.shape[0]: + pred_traj.timestamps = gt_traj.timestamps + else: + print("WARNING", pred_traj.timestamps.shape[0], gt_traj.timestamps.shape[0]) + + gt_traj, pred_traj = sync.associate_trajectories(gt_traj, pred_traj) + + if align: + pred_traj.align(gt_traj, correct_scale=correct_scale) + + plot_collection = plot.PlotCollection("PlotCol") + fig = plt.figure(figsize=(8, 8)) + plot_mode = best_plotmode(gt_traj if (gt_traj is not None) else pred_traj) + ax = plot.prepare_axis(fig, plot_mode) + ax.set_title(title) + if gt_traj is not None: + plot.traj(ax, plot_mode, gt_traj, "--", "gray", "Ground Truth") + plot.traj(ax, plot_mode, pred_traj, "-", "blue", "Predicted") + plot_collection.add_figure("traj_error", fig) + plot_collection.export(filename, confirm_overwrite=False) + plt.close(fig=fig) + # print(f"Saved trajectory to {filename.replace('.png','')}_traj_error.png") + + +def save_trajectory_tum_format(traj, filename): + traj = make_traj(traj) + tostr = lambda a: " ".join(map(str, a)) + with Path(filename).open("w") as f: + for i in range(traj.num_poses): + f.write( + f"{traj.timestamps[i]} {tostr(traj.positions_xyz[i])} {tostr(traj.orientations_quat_wxyz[i][[0,1,2,3]])}\n" + ) + # print(f"Saved trajectory to {filename}") + + +def extract_metrics(file_path): + with open(file_path, 'r') as file: + content = file.read() + + # Extract metrics using regex + ate_match = re.search(r'APE w.r.t. translation part \(m\).*?rmse\s+([0-9.]+)', content, re.DOTALL) + rpe_trans_match = re.search(r'RPE w.r.t. translation part \(m\).*?rmse\s+([0-9.]+)', content, re.DOTALL) + rpe_rot_match = re.search(r'RPE w.r.t. rotation angle in degrees \(deg\).*?rmse\s+([0-9.]+)', content, re.DOTALL) + + ate = float(ate_match.group(1)) if ate_match else 0.0 + rpe_trans = float(rpe_trans_match.group(1)) if rpe_trans_match else 0.0 + rpe_rot = float(rpe_rot_match.group(1)) if rpe_rot_match else 0.0 + + return ate, rpe_trans, rpe_rot + +def process_directory(directory): + results = [] + for root, _, files in os.walk(directory): + if files is not None: + files = sorted(files) + for file in files: + if file.endswith('_metric.txt'): + file_path = os.path.join(root, file) + seq_name = file.replace('_eval_metric.txt', '') + ate, rpe_trans, rpe_rot = extract_metrics(file_path) + results.append((seq_name, ate, rpe_trans, rpe_rot)) + + return results + +def calculate_averages(results): + total_ate = sum(r[1] for r in results) + total_rpe_trans = sum(r[2] for r in results) + total_rpe_rot = sum(r[3] for r in results) + count = len(results) + + if count == 0: + return 0.0, 0.0, 0.0 + + avg_ate = total_ate / count + avg_rpe_trans = total_rpe_trans / count + avg_rpe_rot = total_rpe_rot / count + + return avg_ate, avg_rpe_trans, avg_rpe_rot diff --git a/dynamic_predictor/dust3r/viz.py b/dynamic_predictor/dust3r/viz.py new file mode 100644 index 0000000000000000000000000000000000000000..6fafbca374f5bda08478b2596ddb5ba31f753337 --- /dev/null +++ b/dynamic_predictor/dust3r/viz.py @@ -0,0 +1,393 @@ +# Copyright (C) 2024-present Naver Corporation. All rights reserved. +# Licensed under CC BY-NC-SA 4.0 (non-commercial use only). +# +# -------------------------------------------------------- +# Visualization utilities using trimesh +# -------------------------------------------------------- +import PIL.Image +import numpy as np +from scipy.spatial.transform import Rotation +import torch +import io +from PIL import Image + +from dust3r.utils.geometry import geotrf, get_med_dist_between_poses, depthmap_to_absolute_camera_coordinates, depthmap_to_pts3d +from dust3r.utils.device import to_numpy +from dust3r.utils.image import rgb, img_to_arr + +try: + import trimesh +except ImportError: + print('/!\\ module trimesh is not installed, cannot visualize results /!\\') + + + +def cat_3d(vecs): + if isinstance(vecs, (np.ndarray, torch.Tensor)): + vecs = [vecs] + return np.concatenate([p.reshape(-1, 3) for p in to_numpy(vecs)]) + + +def show_raw_pointcloud(pts3d, colors, point_size=2): + scene = trimesh.Scene() + + pct = trimesh.PointCloud(cat_3d(pts3d), colors=cat_3d(colors)) + scene.add_geometry(pct) + + scene.show(line_settings={'point_size': point_size}) + + +def pts3d_to_trimesh(img, pts3d, valid=None): + H, W, THREE = img.shape + assert THREE == 3 + assert img.shape == pts3d.shape + + vertices = pts3d.reshape(-1, 3) + + # make squares: each pixel == 2 triangles + idx = np.arange(len(vertices)).reshape(H, W) + idx1 = idx[:-1, :-1].ravel() # top-left corner + idx2 = idx[:-1, +1:].ravel() # right-left corner + idx3 = idx[+1:, :-1].ravel() # bottom-left corner + idx4 = idx[+1:, +1:].ravel() # bottom-right corner + faces = np.concatenate(( + np.c_[idx1, idx2, idx3], + np.c_[idx3, idx2, idx1], # same triangle, but backward (cheap solution to cancel face culling) + np.c_[idx2, idx3, idx4], + np.c_[idx4, idx3, idx2], # same triangle, but backward (cheap solution to cancel face culling) + ), axis=0) + + # prepare triangle colors + face_colors = np.concatenate(( + img[:-1, :-1].reshape(-1, 3), + img[:-1, :-1].reshape(-1, 3), + img[+1:, +1:].reshape(-1, 3), + img[+1:, +1:].reshape(-1, 3) + ), axis=0) + + # remove invalid faces + if valid is not None: + assert valid.shape == (H, W) + valid_idxs = valid.ravel() + valid_faces = valid_idxs[faces].all(axis=-1) + faces = faces[valid_faces] + face_colors = face_colors[valid_faces] + + assert len(faces) == len(face_colors) + return dict(vertices=vertices, face_colors=face_colors, faces=faces) + + +def cat_meshes(meshes): + vertices, faces, colors = zip(*[(m['vertices'], m['faces'], m['face_colors']) for m in meshes]) + n_vertices = np.cumsum([0]+[len(v) for v in vertices]) + for i in range(len(faces)): + faces[i][:] += n_vertices[i] + + vertices = np.concatenate(vertices) + colors = np.concatenate(colors) + faces = np.concatenate(faces) + return dict(vertices=vertices, face_colors=colors, faces=faces) + + +def show_duster_pairs(view1, view2, pred1, pred2): + import matplotlib.pyplot as pl + pl.ion() + + for e in range(len(view1['instance'])): + i = view1['idx'][e] + j = view2['idx'][e] + img1 = rgb(view1['img'][e]) + img2 = rgb(view2['img'][e]) + conf1 = pred1['conf'][e].squeeze() + conf2 = pred2['conf'][e].squeeze() + score = conf1.mean()*conf2.mean() + print(f">> Showing pair #{e} {i}-{j} {score=:g}") + pl.clf() + pl.subplot(221).imshow(img1) + pl.subplot(223).imshow(img2) + pl.subplot(222).imshow(conf1, vmin=1, vmax=30) + pl.subplot(224).imshow(conf2, vmin=1, vmax=30) + pts1 = pred1['pts3d'][e] + pts2 = pred2['pts3d_in_other_view'][e] + pl.subplots_adjust(0, 0, 1, 1, 0, 0) + if input('show pointcloud? (y/n) ') == 'y': + show_raw_pointcloud(cat(pts1, pts2), cat(img1, img2), point_size=5) + + +def auto_cam_size(im_poses): + return 0.1 * get_med_dist_between_poses(im_poses) + + +class SceneViz: + def __init__(self): + self.scene = trimesh.Scene() + + def add_rgbd(self, image, depth, intrinsics=None, cam2world=None, zfar=np.inf, mask=None): + image = img_to_arr(image) + + # make up some intrinsics + if intrinsics is None: + H, W, THREE = image.shape + focal = max(H, W) + intrinsics = np.float32([[focal, 0, W/2], [0, focal, H/2], [0, 0, 1]]) + + # compute 3d points + pts3d = depthmap_to_pts3d(depth, intrinsics, cam2world=cam2world) + + return self.add_pointcloud(pts3d, image, mask=(depth 150) + mask |= (hsv[:, :, 1] < 30) & (hsv[:, :, 2] > 180) + mask |= (hsv[:, :, 1] < 50) & (hsv[:, :, 2] > 220) + + # Morphological operations + kernel = np.ones((5, 5), np.uint8) + mask2 = ndimage.binary_opening(mask, structure=kernel) + + # keep only largest CC + _, labels, stats, _ = cv2.connectedComponentsWithStats(mask2.view(np.uint8), connectivity=8) + cc_sizes = stats[1:, cv2.CC_STAT_AREA] + order = cc_sizes.argsort()[::-1] # bigger first + i = 0 + selection = [] + while i < len(order) and cc_sizes[order[i]] > cc_sizes[order[0]] / 2: + selection.append(1 + order[i]) + i += 1 + mask3 = np.in1d(labels, selection).reshape(labels.shape) + + # Apply mask + return torch.from_numpy(mask3) diff --git a/dynamic_predictor/launch.py b/dynamic_predictor/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..15980136c8840aaa33019170af93a09e183e124b --- /dev/null +++ b/dynamic_predictor/launch.py @@ -0,0 +1,41 @@ +# -------------------------------------------------------- +# training executable for DUSt3R +# -------------------------------------------------------- +from dust3r.training import get_args_parser, train, load_model +from dust3r.pose_eval import eval_pose_estimation, pose_estimation_custom +from dust3r.depth_eval import eval_mono_depth_estimation +import croco.utils.misc as misc # noqa +import torch +import torch.backends.cudnn as cudnn +import numpy as np +import os + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + if args.mode.startswith('eval'): + misc.init_distributed_mode(args) + global_rank = misc.get_rank() + world_size = misc.get_world_size() + device = "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # fix the seed + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + cudnn.benchmark = args.cudnn_benchmark + model, _ = load_model(args, device) + os.makedirs(args.output_dir, exist_ok=True) + + if args.mode == 'eval_pose': + ate_mean, rpe_trans_mean, rpe_rot_mean, outfile_list, bug = eval_pose_estimation(args, model, device, save_dir=args.output_dir) + print(f'ATE mean: {ate_mean}, RPE trans mean: {rpe_trans_mean}, RPE rot mean: {rpe_rot_mean}') + if args.mode == 'eval_pose_custom': + pose_estimation_custom(args, model, device, save_dir=args.output_dir) + + if args.mode == 'eval_depth': + eval_mono_depth_estimation(args, model, device) + + exit(0) + train(args) diff --git a/dynamic_predictor/third_party/RAFT/LICENSE b/dynamic_predictor/third_party/RAFT/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ed13d8404f0f1315ee323b2c8d1b2d8f77b5c82f --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, princeton-vl +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dynamic_predictor/third_party/RAFT/README.md b/dynamic_predictor/third_party/RAFT/README.md new file mode 100644 index 0000000000000000000000000000000000000000..650275ed7c4cda12822587c6a4358f057fffe494 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/README.md @@ -0,0 +1,80 @@ +# RAFT +This repository contains the source code for our paper: + +[RAFT: Recurrent All Pairs Field Transforms for Optical Flow](https://arxiv.org/pdf/2003.12039.pdf)
+ECCV 2020
+Zachary Teed and Jia Deng
+ + + +## Requirements +The code has been tested with PyTorch 1.6 and Cuda 10.1. +```Shell +conda create --name raft +conda activate raft +conda install pytorch=1.6.0 torchvision=0.7.0 cudatoolkit=10.1 matplotlib tensorboard scipy opencv -c pytorch +``` + +## Demos +Pretrained models can be downloaded by running +```Shell +./download_models.sh +``` +or downloaded from [google drive](https://drive.google.com/drive/folders/1sWDsfuZ3Up38EUQt7-JDTT1HcGHuJgvT?usp=sharing) + +You can demo a trained model on a sequence of frames +```Shell +python demo.py --model=models/raft-things.pth --path=demo-frames +``` + +## Required Data +To evaluate/train RAFT, you will need to download the required datasets. +* [FlyingChairs](https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs) +* [FlyingThings3D](https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html) +* [Sintel](http://sintel.is.tue.mpg.de/) +* [KITTI](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow) +* [HD1K](http://hci-benchmark.iwr.uni-heidelberg.de/) (optional) + + +By default `datasets.py` will search for the datasets in these locations. You can create symbolic links to wherever the datasets were downloaded in the `datasets` folder + +```Shell +├── datasets + ├── Sintel + ├── test + ├── training + ├── KITTI + ├── testing + ├── training + ├── devkit + ├── FlyingChairs_release + ├── data + ├── FlyingThings3D + ├── frames_cleanpass + ├── frames_finalpass + ├── optical_flow +``` + +## Evaluation +You can evaluate a trained model using `evaluate.py` +```Shell +python evaluate.py --model=models/raft-things.pth --dataset=sintel --mixed_precision +``` + +## Training +We used the following training schedule in our paper (2 GPUs). Training logs will be written to the `runs` which can be visualized using tensorboard +```Shell +./train_standard.sh +``` + +If you have a RTX GPU, training can be accelerated using mixed precision. You can expect similiar results in this setting (1 GPU) +```Shell +./train_mixed.sh +``` + +## (Optional) Efficent Implementation +You can optionally use our alternate (efficent) implementation by compiling the provided cuda extension +```Shell +cd alt_cuda_corr && python setup.py install && cd .. +``` +and running `demo.py` and `evaluate.py` with the `--alternate_corr` flag Note, this implementation is somewhat slower than all-pairs, but uses significantly less GPU memory during the forward pass. diff --git a/dynamic_predictor/third_party/RAFT/alt_cuda_corr/correlation.cpp b/dynamic_predictor/third_party/RAFT/alt_cuda_corr/correlation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b01584d19edb99e7feec5f2e4c51169a1ed208db --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/alt_cuda_corr/correlation.cpp @@ -0,0 +1,54 @@ +#include +#include + +// CUDA forward declarations +std::vector corr_cuda_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius); + +std::vector corr_cuda_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius); + +// C++ interface +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) + +std::vector corr_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius) { + CHECK_INPUT(fmap1); + CHECK_INPUT(fmap2); + CHECK_INPUT(coords); + + return corr_cuda_forward(fmap1, fmap2, coords, radius); +} + + +std::vector corr_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) { + CHECK_INPUT(fmap1); + CHECK_INPUT(fmap2); + CHECK_INPUT(coords); + CHECK_INPUT(corr_grad); + + return corr_cuda_backward(fmap1, fmap2, coords, corr_grad, radius); +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &corr_forward, "CORR forward"); + m.def("backward", &corr_backward, "CORR backward"); +} \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/alt_cuda_corr/correlation_kernel.cu b/dynamic_predictor/third_party/RAFT/alt_cuda_corr/correlation_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..145e5804a16ece51b8ff5f1cb61ae8dab4fc3bb7 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/alt_cuda_corr/correlation_kernel.cu @@ -0,0 +1,324 @@ +#include +#include +#include +#include + + +#define BLOCK_H 4 +#define BLOCK_W 8 +#define BLOCK_HW BLOCK_H * BLOCK_W +#define CHANNEL_STRIDE 32 + + +__forceinline__ __device__ +bool within_bounds(int h, int w, int H, int W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +template +__global__ void corr_forward_kernel( + const torch::PackedTensorAccessor32 fmap1, + const torch::PackedTensorAccessor32 fmap2, + const torch::PackedTensorAccessor32 coords, + torch::PackedTensorAccessor32 corr, + int r) +{ + const int b = blockIdx.x; + const int h0 = blockIdx.y * blockDim.x; + const int w0 = blockIdx.z * blockDim.y; + const int tid = threadIdx.x * blockDim.y + threadIdx.y; + + const int H1 = fmap1.size(1); + const int W1 = fmap1.size(2); + const int H2 = fmap2.size(1); + const int W2 = fmap2.size(2); + const int N = coords.size(1); + const int C = fmap1.size(3); + + __shared__ scalar_t f1[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t x2s[BLOCK_HW]; + __shared__ scalar_t y2s[BLOCK_HW]; + + for (int c=0; c(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + auto fptr = fmap2[b][h2][w2]; + if (within_bounds(h2, w2, H2, W2)) + f2[c2][k1] = fptr[c+c2]; + else + f2[c2][k1] = 0.0; + } + + __syncthreads(); + + scalar_t s = 0.0; + for (int k=0; k 0 && ix > 0 && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_nw) += nw; + + if (iy > 0 && ix < rd && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_ne) += ne; + + if (iy < rd && ix > 0 && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_sw) += sw; + + if (iy < rd && ix < rd && within_bounds(h1, w1, H1, W1)) + *(corr_ptr + ix_se) += se; + } + } + } + } +} + + +template +__global__ void corr_backward_kernel( + const torch::PackedTensorAccessor32 fmap1, + const torch::PackedTensorAccessor32 fmap2, + const torch::PackedTensorAccessor32 coords, + const torch::PackedTensorAccessor32 corr_grad, + torch::PackedTensorAccessor32 fmap1_grad, + torch::PackedTensorAccessor32 fmap2_grad, + torch::PackedTensorAccessor32 coords_grad, + int r) +{ + + const int b = blockIdx.x; + const int h0 = blockIdx.y * blockDim.x; + const int w0 = blockIdx.z * blockDim.y; + const int tid = threadIdx.x * blockDim.y + threadIdx.y; + + const int H1 = fmap1.size(1); + const int W1 = fmap1.size(2); + const int H2 = fmap2.size(1); + const int W2 = fmap2.size(2); + const int N = coords.size(1); + const int C = fmap1.size(3); + + __shared__ scalar_t f1[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2[CHANNEL_STRIDE][BLOCK_HW+1]; + + __shared__ scalar_t f1_grad[CHANNEL_STRIDE][BLOCK_HW+1]; + __shared__ scalar_t f2_grad[CHANNEL_STRIDE][BLOCK_HW+1]; + + __shared__ scalar_t x2s[BLOCK_HW]; + __shared__ scalar_t y2s[BLOCK_HW]; + + for (int c=0; c(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + auto fptr = fmap2[b][h2][w2]; + if (within_bounds(h2, w2, H2, W2)) + f2[c2][k1] = fptr[c+c2]; + else + f2[c2][k1] = 0.0; + + f2_grad[c2][k1] = 0.0; + } + + __syncthreads(); + + const scalar_t* grad_ptr = &corr_grad[b][n][0][h1][w1]; + scalar_t g = 0.0; + + int ix_nw = H1*W1*((iy-1) + rd*(ix-1)); + int ix_ne = H1*W1*((iy-1) + rd*ix); + int ix_sw = H1*W1*(iy + rd*(ix-1)); + int ix_se = H1*W1*(iy + rd*ix); + + if (iy > 0 && ix > 0 && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_nw) * dy * dx; + + if (iy > 0 && ix < rd && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_ne) * dy * (1-dx); + + if (iy < rd && ix > 0 && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_sw) * (1-dy) * dx; + + if (iy < rd && ix < rd && within_bounds(h1, w1, H1, W1)) + g += *(grad_ptr + ix_se) * (1-dy) * (1-dx); + + for (int k=0; k(floor(y2s[k1]))-r+iy; + int w2 = static_cast(floor(x2s[k1]))-r+ix; + int c2 = tid % CHANNEL_STRIDE; + + scalar_t* fptr = &fmap2_grad[b][h2][w2][0]; + if (within_bounds(h2, w2, H2, W2)) + atomicAdd(fptr+c+c2, f2_grad[c2][k1]); + } + } + } + } + __syncthreads(); + + + for (int k=0; k corr_cuda_forward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + int radius) +{ + const auto B = coords.size(0); + const auto N = coords.size(1); + const auto H = coords.size(2); + const auto W = coords.size(3); + + const auto rd = 2 * radius + 1; + auto opts = fmap1.options(); + auto corr = torch::zeros({B, N, rd*rd, H, W}, opts); + + const dim3 blocks(B, (H+BLOCK_H-1)/BLOCK_H, (W+BLOCK_W-1)/BLOCK_W); + const dim3 threads(BLOCK_H, BLOCK_W); + + corr_forward_kernel<<>>( + fmap1.packed_accessor32(), + fmap2.packed_accessor32(), + coords.packed_accessor32(), + corr.packed_accessor32(), + radius); + + return {corr}; +} + +std::vector corr_cuda_backward( + torch::Tensor fmap1, + torch::Tensor fmap2, + torch::Tensor coords, + torch::Tensor corr_grad, + int radius) +{ + const auto B = coords.size(0); + const auto N = coords.size(1); + + const auto H1 = fmap1.size(1); + const auto W1 = fmap1.size(2); + const auto H2 = fmap2.size(1); + const auto W2 = fmap2.size(2); + const auto C = fmap1.size(3); + + auto opts = fmap1.options(); + auto fmap1_grad = torch::zeros({B, H1, W1, C}, opts); + auto fmap2_grad = torch::zeros({B, H2, W2, C}, opts); + auto coords_grad = torch::zeros({B, N, H1, W1, 2}, opts); + + const dim3 blocks(B, (H1+BLOCK_H-1)/BLOCK_H, (W1+BLOCK_W-1)/BLOCK_W); + const dim3 threads(BLOCK_H, BLOCK_W); + + + corr_backward_kernel<<>>( + fmap1.packed_accessor32(), + fmap2.packed_accessor32(), + coords.packed_accessor32(), + corr_grad.packed_accessor32(), + fmap1_grad.packed_accessor32(), + fmap2_grad.packed_accessor32(), + coords_grad.packed_accessor32(), + radius); + + return {fmap1_grad, fmap2_grad, coords_grad}; +} \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/alt_cuda_corr/setup.py b/dynamic_predictor/third_party/RAFT/alt_cuda_corr/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..c0207ff285ffac4c8146c79d154f12416dbef48c --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/alt_cuda_corr/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + + +setup( + name='correlation', + ext_modules=[ + CUDAExtension('alt_cuda_corr', + sources=['correlation.cpp', 'correlation_kernel.cu'], + extra_compile_args={'cxx': [], 'nvcc': ['-O3']}), + ], + cmdclass={ + 'build_ext': BuildExtension + }) + diff --git a/dynamic_predictor/third_party/RAFT/chairs_split.txt b/dynamic_predictor/third_party/RAFT/chairs_split.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ae8f0b72a22fc061552604c94664e3a0287914e --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/chairs_split.txt @@ -0,0 +1,22872 @@ +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +2 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +1 +1 +1 +1 +1 +1 +1 +2 +1 +1 +1 +1 +1 \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/core/__init__.py b/dynamic_predictor/third_party/RAFT/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamic_predictor/third_party/RAFT/core/configs/congif_spring_M.json b/dynamic_predictor/third_party/RAFT/core/configs/congif_spring_M.json new file mode 100644 index 0000000000000000000000000000000000000000..7e9c8a4c4b2f6c93c65d5f62692606102c0aa842 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/configs/congif_spring_M.json @@ -0,0 +1,30 @@ +{ + "name": "spring-M", + "dataset": "spring", + "gpus": [0, 1, 2, 3, 4, 5, 6, 7], + + "use_var": true, + "var_min": 0, + "var_max": 10, + "pretrain": "resnet34", + "initial_dim": 64, + "block_dims": [64, 128, 256], + "radius": 4, + "dim": 128, + "num_blocks": 2, + "iters": 4, + + "image_size": [540, 960], + "scale": -1, + "batch_size": 32, + "epsilon": 1e-8, + "lr": 4e-4, + "wdecay": 1e-5, + "dropout": 0, + "clip": 1.0, + "gamma": 0.85, + "num_steps": 120000, + + "restore_ckpt": null, + "coarse_config": null +} \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/core/corr.py b/dynamic_predictor/third_party/RAFT/core/corr.py new file mode 100644 index 0000000000000000000000000000000000000000..c977addc5350f75c44b33912abe7d276aa80b690 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/corr.py @@ -0,0 +1,142 @@ +import torch +import torch.nn.functional as F +from utils.utils import bilinear_sampler, coords_grid + +try: + import alt_cuda_corr +except: + # alt_cuda_corr is not compiled + pass + +class CorrBlock2: + def __init__(self, fmap1, fmap2, args): + self.num_levels = args.corr_levels + self.radius = args.corr_radius + self.args = args + self.corr_pyramid = [] + # all pairs correlation + for i in range(self.num_levels): + corr = CorrBlock2.corr(fmap1, fmap2, 1) + batch, h1, w1, dim, h2, w2 = corr.shape + corr = corr.reshape(batch*h1*w1, dim, h2, w2) + fmap2 = F.interpolate(fmap2, scale_factor=0.5, mode='bilinear', align_corners=False) + self.corr_pyramid.append(corr) + + def __call__(self, coords, dilation=None): + r = self.radius + coords = coords.permute(0, 2, 3, 1) + batch, h1, w1, _ = coords.shape + + if dilation is None: + dilation = torch.ones(batch, 1, h1, w1, device=coords.device) + + # print(dilation.max(), dilation.mean(), dilation.min()) + out_pyramid = [] + for i in range(self.num_levels): + corr = self.corr_pyramid[i] + device = coords.device + dx = torch.linspace(-r, r, 2*r+1, device=device) + dy = torch.linspace(-r, r, 2*r+1, device=device) + delta = torch.stack(torch.meshgrid(dy, dx), axis=-1) + delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) + delta_lvl = delta_lvl * dilation.view(batch * h1 * w1, 1, 1, 1) + centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i + coords_lvl = centroid_lvl + delta_lvl + corr = bilinear_sampler(corr, coords_lvl) + corr = corr.view(batch, h1, w1, -1) + out_pyramid.append(corr) + + out = torch.cat(out_pyramid, dim=-1) + out = out.permute(0, 3, 1, 2).contiguous().float() + return out + + @staticmethod + def corr(fmap1, fmap2, num_head): + batch, dim, h1, w1 = fmap1.shape + h2, w2 = fmap2.shape[2:] + fmap1 = fmap1.view(batch, num_head, dim // num_head, h1*w1) + fmap2 = fmap2.view(batch, num_head, dim // num_head, h2*w2) + corr = fmap1.transpose(2, 3) @ fmap2 + corr = corr.reshape(batch, num_head, h1, w1, h2, w2).permute(0, 2, 3, 1, 4, 5) + return corr / torch.sqrt(torch.tensor(dim).float()) + +class CorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=4): + self.num_levels = num_levels + self.radius = radius + self.corr_pyramid = [] + + # all pairs correlation + corr = CorrBlock.corr(fmap1, fmap2) + + batch, h1, w1, dim, h2, w2 = corr.shape + corr = corr.reshape(batch*h1*w1, dim, h2, w2) + + self.corr_pyramid.append(corr) + for i in range(self.num_levels-1): + corr = F.avg_pool2d(corr, 2, stride=2) + self.corr_pyramid.append(corr) + + def __call__(self, coords): + r = self.radius + coords = coords.permute(0, 2, 3, 1) + batch, h1, w1, _ = coords.shape + + out_pyramid = [] + for i in range(self.num_levels): + corr = self.corr_pyramid[i] + dx = torch.linspace(-r, r, 2*r+1) + dy = torch.linspace(-r, r, 2*r+1) + delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device) + + centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i + delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) + coords_lvl = centroid_lvl + delta_lvl + + corr = bilinear_sampler(corr, coords_lvl) + corr = corr.view(batch, h1, w1, -1) + out_pyramid.append(corr) + + out = torch.cat(out_pyramid, dim=-1) + return out.permute(0, 3, 1, 2).contiguous().float() + + @staticmethod + def corr(fmap1, fmap2): + batch, dim, ht, wd = fmap1.shape + fmap1 = fmap1.view(batch, dim, ht*wd) + fmap2 = fmap2.view(batch, dim, ht*wd) + + corr = torch.matmul(fmap1.transpose(1,2), fmap2) + corr = corr.view(batch, ht, wd, 1, ht, wd) + return corr / torch.sqrt(torch.tensor(dim).float()) + + +class AlternateCorrBlock: + def __init__(self, fmap1, fmap2, num_levels=4, radius=4): + self.num_levels = num_levels + self.radius = radius + + self.pyramid = [(fmap1, fmap2)] + for i in range(self.num_levels): + fmap1 = F.avg_pool2d(fmap1, 2, stride=2) + fmap2 = F.avg_pool2d(fmap2, 2, stride=2) + self.pyramid.append((fmap1, fmap2)) + + def __call__(self, coords): + coords = coords.permute(0, 2, 3, 1) + B, H, W, _ = coords.shape + dim = self.pyramid[0][0].shape[1] + + corr_list = [] + for i in range(self.num_levels): + r = self.radius + fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous() + fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous() + + coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous() + corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r) + corr_list.append(corr.squeeze(1)) + + corr = torch.stack(corr_list, dim=1) + corr = corr.reshape(B, -1, H, W) + return corr / torch.sqrt(torch.tensor(dim).float()) diff --git a/dynamic_predictor/third_party/RAFT/core/datasets.py b/dynamic_predictor/third_party/RAFT/core/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..3411fdacfb900024005e8997d07c600e963a95ca --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/datasets.py @@ -0,0 +1,235 @@ +# Data loading based on https://github.com/NVIDIA/flownet2-pytorch + +import numpy as np +import torch +import torch.utils.data as data +import torch.nn.functional as F + +import os +import math +import random +from glob import glob +import os.path as osp + +from utils import frame_utils +from utils.augmentor import FlowAugmentor, SparseFlowAugmentor + + +class FlowDataset(data.Dataset): + def __init__(self, aug_params=None, sparse=False): + self.augmentor = None + self.sparse = sparse + if aug_params is not None: + if sparse: + self.augmentor = SparseFlowAugmentor(**aug_params) + else: + self.augmentor = FlowAugmentor(**aug_params) + + self.is_test = False + self.init_seed = False + self.flow_list = [] + self.image_list = [] + self.extra_info = [] + + def __getitem__(self, index): + + if self.is_test: + img1 = frame_utils.read_gen(self.image_list[index][0]) + img2 = frame_utils.read_gen(self.image_list[index][1]) + img1 = np.array(img1).astype(np.uint8)[..., :3] + img2 = np.array(img2).astype(np.uint8)[..., :3] + img1 = torch.from_numpy(img1).permute(2, 0, 1).float() + img2 = torch.from_numpy(img2).permute(2, 0, 1).float() + return img1, img2, self.extra_info[index] + + if not self.init_seed: + worker_info = torch.utils.data.get_worker_info() + if worker_info is not None: + torch.manual_seed(worker_info.id) + np.random.seed(worker_info.id) + random.seed(worker_info.id) + self.init_seed = True + + index = index % len(self.image_list) + valid = None + if self.sparse: + flow, valid = frame_utils.readFlowKITTI(self.flow_list[index]) + else: + flow = frame_utils.read_gen(self.flow_list[index]) + + img1 = frame_utils.read_gen(self.image_list[index][0]) + img2 = frame_utils.read_gen(self.image_list[index][1]) + + flow = np.array(flow).astype(np.float32) + img1 = np.array(img1).astype(np.uint8) + img2 = np.array(img2).astype(np.uint8) + + # grayscale images + if len(img1.shape) == 2: + img1 = np.tile(img1[...,None], (1, 1, 3)) + img2 = np.tile(img2[...,None], (1, 1, 3)) + else: + img1 = img1[..., :3] + img2 = img2[..., :3] + + if self.augmentor is not None: + if self.sparse: + img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid) + else: + img1, img2, flow = self.augmentor(img1, img2, flow) + + img1 = torch.from_numpy(img1).permute(2, 0, 1).float() + img2 = torch.from_numpy(img2).permute(2, 0, 1).float() + flow = torch.from_numpy(flow).permute(2, 0, 1).float() + + if valid is not None: + valid = torch.from_numpy(valid) + else: + valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000) + + return img1, img2, flow, valid.float() + + + def __rmul__(self, v): + self.flow_list = v * self.flow_list + self.image_list = v * self.image_list + return self + + def __len__(self): + return len(self.image_list) + + +class MpiSintel(FlowDataset): + def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'): + super(MpiSintel, self).__init__(aug_params) + flow_root = osp.join(root, split, 'flow') + image_root = osp.join(root, split, dstype) + + if split == 'test': + self.is_test = True + + for scene in os.listdir(image_root): + image_list = sorted(glob(osp.join(image_root, scene, '*.png'))) + for i in range(len(image_list)-1): + self.image_list += [ [image_list[i], image_list[i+1]] ] + self.extra_info += [ (scene, i) ] # scene and frame_id + + if split != 'test': + self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo'))) + + +class FlyingChairs(FlowDataset): + def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'): + super(FlyingChairs, self).__init__(aug_params) + + images = sorted(glob(osp.join(root, '*.ppm'))) + flows = sorted(glob(osp.join(root, '*.flo'))) + assert (len(images)//2 == len(flows)) + + split_list = np.loadtxt('chairs_split.txt', dtype=np.int32) + for i in range(len(flows)): + xid = split_list[i] + if (split=='training' and xid==1) or (split=='validation' and xid==2): + self.flow_list += [ flows[i] ] + self.image_list += [ [images[2*i], images[2*i+1]] ] + + +class FlyingThings3D(FlowDataset): + def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'): + super(FlyingThings3D, self).__init__(aug_params) + + for cam in ['left']: + for direction in ['into_future', 'into_past']: + image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*'))) + image_dirs = sorted([osp.join(f, cam) for f in image_dirs]) + + flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*'))) + flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs]) + + for idir, fdir in zip(image_dirs, flow_dirs): + images = sorted(glob(osp.join(idir, '*.png')) ) + flows = sorted(glob(osp.join(fdir, '*.pfm')) ) + for i in range(len(flows)-1): + if direction == 'into_future': + self.image_list += [ [images[i], images[i+1]] ] + self.flow_list += [ flows[i] ] + elif direction == 'into_past': + self.image_list += [ [images[i+1], images[i]] ] + self.flow_list += [ flows[i+1] ] + + +class KITTI(FlowDataset): + def __init__(self, aug_params=None, split='training', root='datasets/KITTI'): + super(KITTI, self).__init__(aug_params, sparse=True) + if split == 'testing': + self.is_test = True + + root = osp.join(root, split) + images1 = sorted(glob(osp.join(root, 'image_2/*_10.png'))) + images2 = sorted(glob(osp.join(root, 'image_2/*_11.png'))) + + for img1, img2 in zip(images1, images2): + frame_id = img1.split('/')[-1] + self.extra_info += [ [frame_id] ] + self.image_list += [ [img1, img2] ] + + if split == 'training': + self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png'))) + + +class HD1K(FlowDataset): + def __init__(self, aug_params=None, root='datasets/HD1k'): + super(HD1K, self).__init__(aug_params, sparse=True) + + seq_ix = 0 + while 1: + flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix))) + images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix))) + + if len(flows) == 0: + break + + for i in range(len(flows)-1): + self.flow_list += [flows[i]] + self.image_list += [ [images[i], images[i+1]] ] + + seq_ix += 1 + + +def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'): + """ Create the data loader for the corresponding trainign set """ + + if args.stage == 'chairs': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True} + train_dataset = FlyingChairs(aug_params, split='training') + + elif args.stage == 'things': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True} + clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass') + final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass') + train_dataset = clean_dataset + final_dataset + + elif args.stage == 'sintel': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True} + things = FlyingThings3D(aug_params, dstype='frames_cleanpass') + sintel_clean = MpiSintel(aug_params, split='training', dstype='clean') + sintel_final = MpiSintel(aug_params, split='training', dstype='final') + + if TRAIN_DS == 'C+T+K+S+H': + kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True}) + hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True}) + train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things + + elif TRAIN_DS == 'C+T+K/S': + train_dataset = 100*sintel_clean + 100*sintel_final + things + + elif args.stage == 'kitti': + aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False} + train_dataset = KITTI(aug_params, split='training') + + train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, + pin_memory=False, shuffle=True, num_workers=4, drop_last=True) + + print('Training with %d image pairs' % len(train_dataset)) + return train_loader + diff --git a/dynamic_predictor/third_party/RAFT/core/extractor.py b/dynamic_predictor/third_party/RAFT/core/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1799c0c17325ed8a10e8283fdae5f70c852818 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/extractor.py @@ -0,0 +1,351 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from layer import conv1x1, conv3x3, BasicBlock + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) + + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + + + +class BottleneckBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(BottleneckBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0) + self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride) + self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes//4) + self.norm2 = nn.BatchNorm2d(planes//4) + self.norm3 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm4 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes//4) + self.norm2 = nn.InstanceNorm2d(planes//4) + self.norm3 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm4 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + self.norm3 = nn.Sequential() + if not stride == 1: + self.norm4 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4) + + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + y = self.relu(self.norm3(self.conv3(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + +class BasicEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): + super(BasicEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(64) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(64) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 64 + self.layer1 = self._make_layer(64, stride=1) + self.layer2 = self._make_layer(96, stride=2) + self.layer3 = self._make_layer(128, stride=2) + + # output convolution + self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + + def forward(self, x): + + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x + + +class SmallEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0): + super(SmallEncoder, self).__init__() + self.norm_fn = norm_fn + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(32) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(32) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = 32 + self.layer1 = self._make_layer(32, stride=1) + self.layer2 = self._make_layer(64, stride=2) + self.layer3 = self._make_layer(96, stride=2) + + self.dropout = None + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + + self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + + def forward(self, x): + + # if input is list, combine batch dimension + is_list = isinstance(x, tuple) or isinstance(x, list) + if is_list: + batch_dim = x[0].shape[0] + x = torch.cat(x, dim=0) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.conv2(x) + + if self.training and self.dropout is not None: + x = self.dropout(x) + + if is_list: + x = torch.split(x, [batch_dim, batch_dim], dim=0) + + return x + +class ResNetFPN(nn.Module): + """ + ResNet18, output resolution is 1/8. + Each block has 2 layers. + """ + def __init__(self, args, input_dim=3, output_dim=256, ratio=1.0, norm_layer=nn.BatchNorm2d, init_weight=False): + super().__init__() + # Config + block = BasicBlock + block_dims = args.block_dims + initial_dim = args.initial_dim + self.init_weight = init_weight + self.input_dim = input_dim + # Class Variable + self.in_planes = initial_dim + for i in range(len(block_dims)): + block_dims[i] = int(block_dims[i] * ratio) + # Networks + self.conv1 = nn.Conv2d(input_dim, initial_dim, kernel_size=7, stride=2, padding=3) + self.bn1 = norm_layer(initial_dim) + self.relu = nn.ReLU(inplace=True) + if args.pretrain == 'resnet34': + n_block = [3, 4, 6] + elif args.pretrain == 'resnet18': + n_block = [2, 2, 2] + else: + raise NotImplementedError + self.layer1 = self._make_layer(block, block_dims[0], stride=1, norm_layer=norm_layer, num=n_block[0]) # 1/2 + self.layer2 = self._make_layer(block, block_dims[1], stride=2, norm_layer=norm_layer, num=n_block[1]) # 1/4 + self.layer3 = self._make_layer(block, block_dims[2], stride=2, norm_layer=norm_layer, num=n_block[2]) # 1/8 + self.final_conv = conv1x1(block_dims[2], output_dim) + self._init_weights(args) + + def _init_weights(self, args): + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + if self.init_weight: + from torchvision.models import resnet18, ResNet18_Weights, resnet34, ResNet34_Weights + if args.pretrain == 'resnet18': + pretrained_dict = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1).state_dict() + else: + pretrained_dict = resnet34(weights=ResNet34_Weights.IMAGENET1K_V1).state_dict() + model_dict = self.state_dict() + pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} + if self.input_dim == 6: + for k, v in pretrained_dict.items(): + if k == 'conv1.weight': + pretrained_dict[k] = torch.cat((v, v), dim=1) + model_dict.update(pretrained_dict) + self.load_state_dict(model_dict, strict=False) + + + def _make_layer(self, block, dim, stride=1, norm_layer=nn.BatchNorm2d, num=2): + layers = [] + layers.append(block(self.in_planes, dim, stride=stride, norm_layer=norm_layer)) + for i in range(num - 1): + layers.append(block(dim, dim, stride=1, norm_layer=norm_layer)) + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + # ResNet Backbone + x = self.relu(self.bn1(self.conv1(x))) + for i in range(len(self.layer1)): + x = self.layer1[i](x) + for i in range(len(self.layer2)): + x = self.layer2[i](x) + for i in range(len(self.layer3)): + x = self.layer3[i](x) + # Output + output = self.final_conv(x) + return output \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/core/layer.py b/dynamic_predictor/third_party/RAFT/core/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf1e6484eed341f0dc2d8f46e87420754c9cf8e --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/layer.py @@ -0,0 +1,135 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import torch +import math +from torch.nn import Module, Dropout + +### Gradient Clipping and Zeroing Operations ### + +GRAD_CLIP = 0.1 + +class GradClip(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x + + @staticmethod + def backward(ctx, grad_x): + grad_x = torch.where(torch.isnan(grad_x), torch.zeros_like(grad_x), grad_x) + return grad_x.clamp(min=-0.01, max=0.01) + +class GradientClip(nn.Module): + def __init__(self): + super(GradientClip, self).__init__() + + def forward(self, x): + return GradClip.apply(x) + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + +class ConvNextBlock(nn.Module): + r""" ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + def __init__(self, dim, output_dim, layer_scale_init_value=1e-6): + super().__init__() + self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv + self.norm = LayerNorm(dim, eps=1e-6) + self.pwconv1 = nn.Linear(dim, 4 * output_dim) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * output_dim, dim) + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), + requires_grad=True) if layer_scale_init_value > 0 else None + self.final = nn.Conv2d(dim, output_dim, kernel_size=1, padding=0) + + def forward(self, x): + input = x + x = self.dwconv(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + x = self.final(input + x) + return x + +class LayerNorm(nn.Module): + r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == "channels_last": + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution without padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1) + +class BasicBlock(nn.Module): + def __init__(self, in_planes, planes, stride=1, norm_layer=nn.BatchNorm2d): + super().__init__() + + # self.sparse = sparse + self.conv1 = conv3x3(in_planes, planes, stride) + self.conv2 = conv3x3(planes, planes) + self.bn1 = norm_layer(planes) + self.bn2 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + if stride == 1 and in_planes == planes: + self.downsample = None + else: + self.bn3 = norm_layer(planes) + self.downsample = nn.Sequential( + conv1x1(in_planes, planes, stride=stride), + self.bn3 + ) + + def forward(self, x): + y = x + y = self.relu(self.bn1(self.conv1(y))) + y = self.relu(self.bn2(self.conv2(y))) + if self.downsample is not None: + x = self.downsample(x) + return self.relu(x+y) \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/core/raft.py b/dynamic_predictor/third_party/RAFT/core/raft.py new file mode 100644 index 0000000000000000000000000000000000000000..d1a5504f1f8dac4e618e598e8c6a0cafb698b13d --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/raft.py @@ -0,0 +1,291 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from update import BasicUpdateBlock, SmallUpdateBlock, BasicUpdateBlock2 +from extractor import BasicEncoder, SmallEncoder, ResNetFPN +from corr import CorrBlock, AlternateCorrBlock, CorrBlock2 +from utils.utils import bilinear_sampler, coords_grid, upflow8, InputPadder, coords_grid2 +from layer import conv3x3 +import math + +try: + autocast = torch.amp.autocast +except: + # dummy autocast for PyTorch < 1.6 + class autocast: + def __init__(self, enabled): + pass + def __enter__(self): + pass + def __exit__(self, *args): + pass + + +class RAFT(nn.Module): + def __init__(self, args): + super(RAFT, self).__init__() + self.args = args + + if args.small: + self.hidden_dim = hdim = 96 + self.context_dim = cdim = 64 + args.corr_levels = 4 + args.corr_radius = 3 + + else: + self.hidden_dim = hdim = 128 + self.context_dim = cdim = 128 + args.corr_levels = 4 + args.corr_radius = 4 + + if 'dropout' not in self.args: + self.args.dropout = 0 + + if 'alternate_corr' not in self.args: + self.args.alternate_corr = False + + # feature network, context network, and update block + if args.small: + self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout) + self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout) + self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim) + + else: + self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout) + self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout) + self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim) + + def freeze_bn(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + + def initialize_flow(self, img): + """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0""" + N, C, H, W = img.shape + coords0 = coords_grid(N, H//8, W//8).to(img.device) + coords1 = coords_grid(N, H//8, W//8).to(img.device) + + # optical flow computed as difference: flow = coords1 - coords0 + return coords0, coords1 + + def upsample_flow(self, flow, mask): + """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """ + N, _, H, W = flow.shape + mask = mask.view(N, 1, 9, 8, 8, H, W) + mask = torch.softmax(mask, dim=2) + + up_flow = F.unfold(8 * flow, [3,3], padding=1) + up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) + + up_flow = torch.sum(mask * up_flow, dim=2) + up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) + return up_flow.reshape(N, 2, 8*H, 8*W) + + + def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False): + """ Estimate optical flow between pair of frames """ + + image1 = 2 * (image1 / 255.0) - 1.0 + image2 = 2 * (image2 / 255.0) - 1.0 + + image1 = image1.contiguous() + image2 = image2.contiguous() + + hdim = self.hidden_dim + cdim = self.context_dim + + # run the feature network + with autocast(enabled=self.args.mixed_precision, device_type="cuda"): + fmap1, fmap2 = self.fnet([image1, image2]) + + fmap1 = fmap1.float() + fmap2 = fmap2.float() + if self.args.alternate_corr: + corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius) + else: + corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius) + + # run the context network + with autocast(enabled=self.args.mixed_precision, device_type="cuda"): + cnet = self.cnet(image1) + net, inp = torch.split(cnet, [hdim, cdim], dim=1) + net = torch.tanh(net) + inp = torch.relu(inp) + + coords0, coords1 = self.initialize_flow(image1) + + if flow_init is not None: + coords1 = coords1 + flow_init + + flow_predictions = [] + for itr in range(iters): + coords1 = coords1.detach() + corr = corr_fn(coords1) # index correlation volume + + flow = coords1 - coords0 + with autocast(enabled=self.args.mixed_precision, device_type="cuda"): + net, up_mask, delta_flow = self.update_block(net, inp, corr, flow) + + # F(t+1) = F(t) + \Delta(t) + coords1 = coords1 + delta_flow + + # upsample predictions + if up_mask is None: + flow_up = upflow8(coords1 - coords0) + else: + flow_up = self.upsample_flow(coords1 - coords0, up_mask) + + flow_predictions.append(flow_up) + + if test_mode: + return coords1 - coords0, flow_up + + return flow_predictions +## +# given depth, warp according to camera params. + +# given flow+depth, warp in 2D + +class RAFT2(nn.Module): + def __init__(self, args): + super(RAFT2, self).__init__() + self.args = args + self.output_dim = args.dim * 2 + + self.args.corr_levels = 4 + self.args.corr_radius = args.radius + self.args.corr_channel = args.corr_levels * (args.radius * 2 + 1) ** 2 + self.cnet = ResNetFPN(args, input_dim=6, output_dim=2 * self.args.dim, norm_layer=nn.BatchNorm2d, init_weight=True) + + # conv for iter 0 results + self.init_conv = conv3x3(2 * args.dim, 2 * args.dim) + self.upsample_weight = nn.Sequential( + # convex combination of 3x3 patches + nn.Conv2d(args.dim, args.dim * 2, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(args.dim * 2, 64 * 9, 1, padding=0) + ) + self.flow_head = nn.Sequential( + # flow(2) + weight(2) + log_b(2) + nn.Conv2d(args.dim, 2 * args.dim, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(2 * args.dim, 6, 3, padding=1) + ) + if args.iters > 0: + self.fnet = ResNetFPN(args, input_dim=3, output_dim=self.output_dim, norm_layer=nn.BatchNorm2d, init_weight=True) + self.update_block = BasicUpdateBlock2(args, hdim=args.dim, cdim=args.dim) + + def initialize_flow(self, img): + """ Flow is represented as difference between two coordinate grids flow = coords2 - coords1""" + N, C, H, W = img.shape + coords1 = coords_grid(N, H//8, W//8, device=img.device) + coords2 = coords_grid(N, H//8, W//8, device=img.device) + return coords1, coords2 + + def upsample_data(self, flow, info, mask): + """ Upsample [H/8, W/8, C] -> [H, W, C] using convex combination """ + N, C, H, W = info.shape + mask = mask.view(N, 1, 9, 8, 8, H, W) + mask = torch.softmax(mask, dim=2) + + up_flow = F.unfold(8 * flow, [3,3], padding=1) + up_flow = up_flow.view(N, 2, 9, 1, 1, H, W) + up_info = F.unfold(info, [3, 3], padding=1) + up_info = up_info.view(N, C, 9, 1, 1, H, W) + + up_flow = torch.sum(mask * up_flow, dim=2) + up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) + up_info = torch.sum(mask * up_info, dim=2) + up_info = up_info.permute(0, 1, 4, 2, 5, 3) + + return up_flow.reshape(N, 2, 8*H, 8*W), up_info.reshape(N, C, 8*H, 8*W) + + def forward(self, image1, image2, iters=None, flow_gt=None, test_mode=False): + """ Estimate optical flow between pair of frames """ + N, _, H, W = image1.shape + if iters is None: + iters = self.args.iters + if flow_gt is None: + flow_gt = torch.zeros(N, 2, H, W, device=image1.device) + + image1 = 2 * (image1 / 255.0) - 1.0 + image2 = 2 * (image2 / 255.0) - 1.0 + image1 = image1.contiguous() + image2 = image2.contiguous() + flow_predictions = [] + info_predictions = [] + + # padding + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1, image2) + N, _, H, W = image1.shape + dilation = torch.ones(N, 1, H//8, W//8, device=image1.device) + # run the context network + cnet = self.cnet(torch.cat([image1, image2], dim=1)) + cnet = self.init_conv(cnet) + net, context = torch.split(cnet, [self.args.dim, self.args.dim], dim=1) + + # init flow + flow_update = self.flow_head(net) + weight_update = .25 * self.upsample_weight(net) + flow_8x = flow_update[:, :2] + info_8x = flow_update[:, 2:] + flow_up, info_up = self.upsample_data(flow_8x, info_8x, weight_update) + flow_predictions.append(flow_up) + info_predictions.append(info_up) + + if self.args.iters > 0: + # run the feature network + fmap1_8x = self.fnet(image1) + fmap2_8x = self.fnet(image2) + corr_fn = CorrBlock2(fmap1_8x, fmap2_8x, self.args) + + for itr in range(iters): + N, _, H, W = flow_8x.shape + flow_8x = flow_8x.detach() + coords2 = (coords_grid2(N, H, W, device=image1.device) + flow_8x).detach() + corr = corr_fn(coords2, dilation=dilation) + net = self.update_block(net, context, corr, flow_8x) + flow_update = self.flow_head(net) + weight_update = .25 * self.upsample_weight(net) + flow_8x = flow_8x + flow_update[:, :2] + info_8x = flow_update[:, 2:] + # upsample predictions + flow_up, info_up = self.upsample_data(flow_8x, info_8x, weight_update) + flow_predictions.append(flow_up) + info_predictions.append(info_up) + + for i in range(len(info_predictions)): + flow_predictions[i] = padder.unpad(flow_predictions[i]) + info_predictions[i] = padder.unpad(info_predictions[i]) + + if test_mode == False: + # exlude invalid pixels and extremely large diplacements + nf_predictions = [] + for i in range(len(info_predictions)): + if not self.args.use_var: + var_max = var_min = 0 + else: + var_max = self.args.var_max + var_min = self.args.var_min + + raw_b = info_predictions[i][:, 2:] + log_b = torch.zeros_like(raw_b) + weight = info_predictions[i][:, :2] + # Large b Component + log_b[:, 0] = torch.clamp(raw_b[:, 0], min=0, max=var_max) + # Small b Component + log_b[:, 1] = torch.clamp(raw_b[:, 1], min=var_min, max=0) + # term2: [N, 2, m, H, W] + term2 = ((flow_gt - flow_predictions[i]).abs().unsqueeze(2)) * (torch.exp(-log_b).unsqueeze(1)) + # term1: [N, m, H, W] + term1 = weight - math.log(2) - log_b + nf_loss = torch.logsumexp(weight, dim=1, keepdim=True) - torch.logsumexp(term1.unsqueeze(1) - term2, dim=2) + nf_predictions.append(nf_loss) + + return {'final': flow_predictions[-1], 'flow': flow_predictions, 'info': info_predictions, 'nf': nf_predictions} + else: + return [flow_predictions,flow_predictions[-1]] \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/core/update.py b/dynamic_predictor/third_party/RAFT/core/update.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a023d6fb3eaad5f2acd5f279b589c734418b9f --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/update.py @@ -0,0 +1,174 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from layer import ConvNextBlock + +class FlowHead(nn.Module): + def __init__(self, input_dim=128, hidden_dim=256): + super(FlowHead, self).__init__() + self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) + self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + return self.conv2(self.relu(self.conv1(x))) + +class ConvGRU(nn.Module): + def __init__(self, hidden_dim=128, input_dim=192+128): + super(ConvGRU, self).__init__() + self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1) + + def forward(self, h, x): + hx = torch.cat([h, x], dim=1) + + z = torch.sigmoid(self.convz(hx)) + r = torch.sigmoid(self.convr(hx)) + q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1))) + + h = (1-z) * h + z * q + return h + +class SepConvGRU(nn.Module): + def __init__(self, hidden_dim=128, input_dim=192+128): + super(SepConvGRU, self).__init__() + self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2)) + + self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0)) + + + def forward(self, h, x): + # horizontal + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz1(hx)) + r = torch.sigmoid(self.convr1(hx)) + q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1))) + h = (1-z) * h + z * q + + # vertical + hx = torch.cat([h, x], dim=1) + z = torch.sigmoid(self.convz2(hx)) + r = torch.sigmoid(self.convr2(hx)) + q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1))) + h = (1-z) * h + z * q + + return h + +class SmallMotionEncoder(nn.Module): + def __init__(self, args): + super(SmallMotionEncoder, self).__init__() + cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2 + self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0) + self.convf1 = nn.Conv2d(2, 64, 7, padding=3) + self.convf2 = nn.Conv2d(64, 32, 3, padding=1) + self.conv = nn.Conv2d(128, 80, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + +class BasicMotionEncoder(nn.Module): + def __init__(self, args): + super(BasicMotionEncoder, self).__init__() + cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2 + self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0) + self.convc2 = nn.Conv2d(256, 192, 3, padding=1) + self.convf1 = nn.Conv2d(2, 128, 7, padding=3) + self.convf2 = nn.Conv2d(128, 64, 3, padding=1) + self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + cor = F.relu(self.convc2(cor)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + +class BasicMotionEncoder2(nn.Module): + def __init__(self, args, dim=128): + super(BasicMotionEncoder2, self).__init__() + cor_planes = args.corr_channel + self.convc1 = nn.Conv2d(cor_planes, dim*2, 1, padding=0) + self.convc2 = nn.Conv2d(dim*2, dim+dim//2, 3, padding=1) + self.convf1 = nn.Conv2d(2, dim, 7, padding=3) + self.convf2 = nn.Conv2d(dim, dim//2, 3, padding=1) + self.conv = nn.Conv2d(dim*2, dim-2, 3, padding=1) + + def forward(self, flow, corr): + cor = F.relu(self.convc1(corr)) + cor = F.relu(self.convc2(cor)) + flo = F.relu(self.convf1(flow)) + flo = F.relu(self.convf2(flo)) + + cor_flo = torch.cat([cor, flo], dim=1) + out = F.relu(self.conv(cor_flo)) + return torch.cat([out, flow], dim=1) + +class SmallUpdateBlock(nn.Module): + def __init__(self, args, hidden_dim=96): + super(SmallUpdateBlock, self).__init__() + self.encoder = SmallMotionEncoder(args) + self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64) + self.flow_head = FlowHead(hidden_dim, hidden_dim=128) + + def forward(self, net, inp, corr, flow): + motion_features = self.encoder(flow, corr) + inp = torch.cat([inp, motion_features], dim=1) + net = self.gru(net, inp) + delta_flow = self.flow_head(net) + + return net, None, delta_flow + +class BasicUpdateBlock(nn.Module): + def __init__(self, args, hidden_dim=128, input_dim=128): + super(BasicUpdateBlock, self).__init__() + self.args = args + self.encoder = BasicMotionEncoder(args) + self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim) + self.flow_head = FlowHead(hidden_dim, hidden_dim=256) + + self.mask = nn.Sequential( + nn.Conv2d(128, 256, 3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 64*9, 1, padding=0)) + + def forward(self, net, inp, corr, flow, upsample=True): + motion_features = self.encoder(flow, corr) + inp = torch.cat([inp, motion_features], dim=1) + + net = self.gru(net, inp) + delta_flow = self.flow_head(net) + + # scale mask to balence gradients + mask = .25 * self.mask(net) + return net, mask, delta_flow + +class BasicUpdateBlock2(nn.Module): + def __init__(self, args, hdim=128, cdim=128): + #net: hdim, inp: cdim + super(BasicUpdateBlock2, self).__init__() + self.args = args + self.encoder = BasicMotionEncoder2(args, dim=cdim) + self.refine = [] + for i in range(args.num_blocks): + self.refine.append(ConvNextBlock(2*cdim+hdim, hdim)) + self.refine = nn.ModuleList(self.refine) + + def forward(self, net, inp, corr, flow, upsample=True): + motion_features = self.encoder(flow, corr) + inp = torch.cat([inp, motion_features], dim=1) + for blk in self.refine: + net = blk(torch.cat([net, inp], dim=1)) + return net diff --git a/dynamic_predictor/third_party/RAFT/core/utils/__init__.py b/dynamic_predictor/third_party/RAFT/core/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamic_predictor/third_party/RAFT/core/utils/augmentor.py b/dynamic_predictor/third_party/RAFT/core/utils/augmentor.py new file mode 100644 index 0000000000000000000000000000000000000000..e81c4f2b5c16c31c0ae236d744f299d430228a04 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/utils/augmentor.py @@ -0,0 +1,246 @@ +import numpy as np +import random +import math +from PIL import Image + +import cv2 +cv2.setNumThreads(0) +cv2.ocl.setUseOpenCL(False) + +import torch +from torchvision.transforms import ColorJitter +import torch.nn.functional as F + + +class FlowAugmentor: + def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True): + + # spatial augmentation params + self.crop_size = crop_size + self.min_scale = min_scale + self.max_scale = max_scale + self.spatial_aug_prob = 0.8 + self.stretch_prob = 0.8 + self.max_stretch = 0.2 + + # flip augmentation params + self.do_flip = do_flip + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.1 + + # photometric augmentation params + self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14) + self.asymmetric_color_aug_prob = 0.2 + self.eraser_aug_prob = 0.5 + + def color_transform(self, img1, img2): + """ Photometric augmentation """ + + # asymmetric + if np.random.rand() < self.asymmetric_color_aug_prob: + img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8) + img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8) + + # symmetric + else: + image_stack = np.concatenate([img1, img2], axis=0) + image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) + img1, img2 = np.split(image_stack, 2, axis=0) + + return img1, img2 + + def eraser_transform(self, img1, img2, bounds=[50, 100]): + """ Occlusion augmentation """ + + ht, wd = img1.shape[:2] + if np.random.rand() < self.eraser_aug_prob: + mean_color = np.mean(img2.reshape(-1, 3), axis=0) + for _ in range(np.random.randint(1, 3)): + x0 = np.random.randint(0, wd) + y0 = np.random.randint(0, ht) + dx = np.random.randint(bounds[0], bounds[1]) + dy = np.random.randint(bounds[0], bounds[1]) + img2[y0:y0+dy, x0:x0+dx, :] = mean_color + + return img1, img2 + + def spatial_transform(self, img1, img2, flow): + # randomly sample scale + ht, wd = img1.shape[:2] + min_scale = np.maximum( + (self.crop_size[0] + 8) / float(ht), + (self.crop_size[1] + 8) / float(wd)) + + scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) + scale_x = scale + scale_y = scale + if np.random.rand() < self.stretch_prob: + scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch) + + scale_x = np.clip(scale_x, min_scale, None) + scale_y = np.clip(scale_y, min_scale, None) + + if np.random.rand() < self.spatial_aug_prob: + # rescale the images + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow = flow * [scale_x, scale_y] + + if self.do_flip: + if np.random.rand() < self.h_flip_prob: # h-flip + img1 = img1[:, ::-1] + img2 = img2[:, ::-1] + flow = flow[:, ::-1] * [-1.0, 1.0] + + if np.random.rand() < self.v_flip_prob: # v-flip + img1 = img1[::-1, :] + img2 = img2[::-1, :] + flow = flow[::-1, :] * [1.0, -1.0] + + y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0]) + x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1]) + + img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + + return img1, img2, flow + + def __call__(self, img1, img2, flow): + img1, img2 = self.color_transform(img1, img2) + img1, img2 = self.eraser_transform(img1, img2) + img1, img2, flow = self.spatial_transform(img1, img2, flow) + + img1 = np.ascontiguousarray(img1) + img2 = np.ascontiguousarray(img2) + flow = np.ascontiguousarray(flow) + + return img1, img2, flow + +class SparseFlowAugmentor: + def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False): + # spatial augmentation params + self.crop_size = crop_size + self.min_scale = min_scale + self.max_scale = max_scale + self.spatial_aug_prob = 0.8 + self.stretch_prob = 0.8 + self.max_stretch = 0.2 + + # flip augmentation params + self.do_flip = do_flip + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.1 + + # photometric augmentation params + self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14) + self.asymmetric_color_aug_prob = 0.2 + self.eraser_aug_prob = 0.5 + + def color_transform(self, img1, img2): + image_stack = np.concatenate([img1, img2], axis=0) + image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8) + img1, img2 = np.split(image_stack, 2, axis=0) + return img1, img2 + + def eraser_transform(self, img1, img2): + ht, wd = img1.shape[:2] + if np.random.rand() < self.eraser_aug_prob: + mean_color = np.mean(img2.reshape(-1, 3), axis=0) + for _ in range(np.random.randint(1, 3)): + x0 = np.random.randint(0, wd) + y0 = np.random.randint(0, ht) + dx = np.random.randint(50, 100) + dy = np.random.randint(50, 100) + img2[y0:y0+dy, x0:x0+dx, :] = mean_color + + return img1, img2 + + def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0): + ht, wd = flow.shape[:2] + coords = np.meshgrid(np.arange(wd), np.arange(ht)) + coords = np.stack(coords, axis=-1) + + coords = coords.reshape(-1, 2).astype(np.float32) + flow = flow.reshape(-1, 2).astype(np.float32) + valid = valid.reshape(-1).astype(np.float32) + + coords0 = coords[valid>=1] + flow0 = flow[valid>=1] + + ht1 = int(round(ht * fy)) + wd1 = int(round(wd * fx)) + + coords1 = coords0 * [fx, fy] + flow1 = flow0 * [fx, fy] + + xx = np.round(coords1[:,0]).astype(np.int32) + yy = np.round(coords1[:,1]).astype(np.int32) + + v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1) + xx = xx[v] + yy = yy[v] + flow1 = flow1[v] + + flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32) + valid_img = np.zeros([ht1, wd1], dtype=np.int32) + + flow_img[yy, xx] = flow1 + valid_img[yy, xx] = 1 + + return flow_img, valid_img + + def spatial_transform(self, img1, img2, flow, valid): + # randomly sample scale + + ht, wd = img1.shape[:2] + min_scale = np.maximum( + (self.crop_size[0] + 1) / float(ht), + (self.crop_size[1] + 1) / float(wd)) + + scale = 2 ** np.random.uniform(self.min_scale, self.max_scale) + scale_x = np.clip(scale, min_scale, None) + scale_y = np.clip(scale, min_scale, None) + + if np.random.rand() < self.spatial_aug_prob: + # rescale the images + img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR) + flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y) + + if self.do_flip: + if np.random.rand() < 0.5: # h-flip + img1 = img1[:, ::-1] + img2 = img2[:, ::-1] + flow = flow[:, ::-1] * [-1.0, 1.0] + valid = valid[:, ::-1] + + margin_y = 20 + margin_x = 50 + + y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y) + x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x) + + y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0]) + x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1]) + + img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + return img1, img2, flow, valid + + + def __call__(self, img1, img2, flow, valid): + img1, img2 = self.color_transform(img1, img2) + img1, img2 = self.eraser_transform(img1, img2) + img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid) + + img1 = np.ascontiguousarray(img1) + img2 = np.ascontiguousarray(img2) + flow = np.ascontiguousarray(flow) + valid = np.ascontiguousarray(valid) + + return img1, img2, flow, valid diff --git a/dynamic_predictor/third_party/RAFT/core/utils/flow_viz.py b/dynamic_predictor/third_party/RAFT/core/utils/flow_viz.py new file mode 100644 index 0000000000000000000000000000000000000000..dcee65e89b91b07ee0496aeb4c7e7436abf99641 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/utils/flow_viz.py @@ -0,0 +1,132 @@ +# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization + + +# MIT License +# +# Copyright (c) 2018 Tom Runia +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to conditions. +# +# Author: Tom Runia +# Date Created: 2018-08-03 + +import numpy as np + +def make_colorwheel(): + """ + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf + + Code follows the original C++ source code of Daniel Scharstein. + Code follows the the Matlab source code of Deqing Sun. + + Returns: + np.ndarray: Color wheel + """ + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = np.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY) + col = col+RY + # YG + colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG) + colorwheel[col:col+YG, 1] = 255 + col = col+YG + # GC + colorwheel[col:col+GC, 1] = 255 + colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC) + col = col+GC + # CB + colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) + colorwheel[col:col+CB, 2] = 255 + col = col+CB + # BM + colorwheel[col:col+BM, 2] = 255 + colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM) + col = col+BM + # MR + colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) + colorwheel[col:col+MR, 0] = 255 + return colorwheel + + +def flow_uv_to_colors(u, v, convert_to_bgr=False): + """ + Applies the flow color wheel to (possibly clipped) flow components u and v. + + According to the C++ source code of Daniel Scharstein + According to the Matlab source code of Deqing Sun + + Args: + u (np.ndarray): Input horizontal flow of shape [H,W] + v (np.ndarray): Input vertical flow of shape [H,W] + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) + colorwheel = make_colorwheel() # shape [55x3] + ncols = colorwheel.shape[0] + rad = np.sqrt(np.square(u) + np.square(v)) + a = np.arctan2(-v, -u)/np.pi + fk = (a+1) / 2*(ncols-1) + k0 = np.floor(fk).astype(np.int32) + k1 = k0 + 1 + k1[k1 == ncols] = 0 + f = fk - k0 + for i in range(colorwheel.shape[1]): + tmp = colorwheel[:,i] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1-f)*col0 + f*col1 + idx = (rad <= 1) + col[idx] = 1 - rad[idx] * (1-col[idx]) + col[~idx] = col[~idx] * 0.75 # out of range + # Note the 2-i => BGR instead of RGB + ch_idx = 2-i if convert_to_bgr else i + flow_image[:,:,ch_idx] = np.floor(255 * col) + return flow_image + + +def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False): + """ + Expects a two dimensional flow image of shape. + + Args: + flow_uv (np.ndarray): Flow UV image of shape [H,W,2] + clip_flow (float, optional): Clip maximum of flow values. Defaults to None. + convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. + + Returns: + np.ndarray: Flow visualization image of shape [H,W,3] + """ + assert flow_uv.ndim == 3, 'input flow must have three dimensions' + assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' + if clip_flow is not None: + flow_uv = np.clip(flow_uv, 0, clip_flow) + u = flow_uv[:,:,0] + v = flow_uv[:,:,1] + rad = np.sqrt(np.square(u) + np.square(v)) + rad_max = np.max(rad) + epsilon = 1e-5 + u = u / (rad_max + epsilon) + v = v / (rad_max + epsilon) + return flow_uv_to_colors(u, v, convert_to_bgr) \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/core/utils/frame_utils.py b/dynamic_predictor/third_party/RAFT/core/utils/frame_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c491135efaffc25bd61ec3ecde99d236f5deb12 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/utils/frame_utils.py @@ -0,0 +1,137 @@ +import numpy as np +from PIL import Image +from os.path import * +import re + +import cv2 +cv2.setNumThreads(0) +cv2.ocl.setUseOpenCL(False) + +TAG_CHAR = np.array([202021.25], np.float32) + +def readFlow(fn): + """ Read .flo file in Middlebury format""" + # Code adapted from: + # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy + + # WARNING: this will work on little-endian architectures (eg Intel x86) only! + # print 'fn = %s'%(fn) + with open(fn, 'rb') as f: + magic = np.fromfile(f, np.float32, count=1) + if 202021.25 != magic: + print('Magic number incorrect. Invalid .flo file') + return None + else: + w = np.fromfile(f, np.int32, count=1) + h = np.fromfile(f, np.int32, count=1) + # print 'Reading %d x %d flo file\n' % (w, h) + data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) + # Reshape data into 3D array (columns, rows, bands) + # The reshape here is for visualization, the original code is (w,h,2) + return np.resize(data, (int(h), int(w), 2)) + +def readPFM(file): + file = open(file, 'rb') + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header == b'PF': + color = True + elif header == b'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline()) + if dim_match: + width, height = map(int, dim_match.groups()) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data + +def writeFlow(filename,uv,v=None): + """ Write optical flow to file. + + If v is None, uv is assumed to contain both u and v channels, + stacked in depth. + Original code by Deqing Sun, adapted from Daniel Scharstein. + """ + nBands = 2 + + if v is None: + assert(uv.ndim == 3) + assert(uv.shape[2] == 2) + u = uv[:,:,0] + v = uv[:,:,1] + else: + u = uv + + assert(u.shape == v.shape) + height,width = u.shape + f = open(filename,'wb') + # write the header + f.write(TAG_CHAR) + np.array(width).astype(np.int32).tofile(f) + np.array(height).astype(np.int32).tofile(f) + # arrange into matrix form + tmp = np.zeros((height, width*nBands)) + tmp[:,np.arange(width)*2] = u + tmp[:,np.arange(width)*2 + 1] = v + tmp.astype(np.float32).tofile(f) + f.close() + + +def readFlowKITTI(filename): + flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR) + flow = flow[:,:,::-1].astype(np.float32) + flow, valid = flow[:, :, :2], flow[:, :, 2] + flow = (flow - 2**15) / 64.0 + return flow, valid + +def readDispKITTI(filename): + disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0 + valid = disp > 0.0 + flow = np.stack([-disp, np.zeros_like(disp)], -1) + return flow, valid + + +def writeFlowKITTI(filename, uv): + uv = 64.0 * uv + 2**15 + valid = np.ones([uv.shape[0], uv.shape[1], 1]) + uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16) + cv2.imwrite(filename, uv[..., ::-1]) + + +def read_gen(file_name, pil=False): + ext = splitext(file_name)[-1] + if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg': + return Image.open(file_name) + elif ext == '.bin' or ext == '.raw': + return np.load(file_name) + elif ext == '.flo': + return readFlow(file_name).astype(np.float32) + elif ext == '.pfm': + flow = readPFM(file_name).astype(np.float32) + if len(flow.shape) == 2: + return flow + else: + return flow[:, :, :-1] + return [] \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/core/utils/utils.py b/dynamic_predictor/third_party/RAFT/core/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..29b75f9c2792a86439870a49f2223bf7b7c9e877 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/core/utils/utils.py @@ -0,0 +1,86 @@ +import torch +import torch.nn.functional as F +import numpy as np +from scipy import interpolate + +def coords_grid2(batch, ht, wd, device): + coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + +class InputPadder: + """ Pads images such that dimensions are divisible by 8 """ + def __init__(self, dims, mode='sintel'): + self.ht, self.wd = dims[-2:] + pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8 + pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8 + if mode == 'sintel': + self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2] + else: + self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht] + + def pad(self, *inputs): + return [F.pad(x, self._pad, mode='replicate') for x in inputs] + + def unpad(self, x): + ht, wd = x.shape[-2:] + c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]] + return x[..., c[0]:c[1], c[2]:c[3]] + +def forward_interpolate(flow): + flow = flow.detach().cpu().numpy() + dx, dy = flow[0], flow[1] + + ht, wd = dx.shape + x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht)) + + x1 = x0 + dx + y1 = y0 + dy + + x1 = x1.reshape(-1) + y1 = y1.reshape(-1) + dx = dx.reshape(-1) + dy = dy.reshape(-1) + + valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht) + x1 = x1[valid] + y1 = y1[valid] + dx = dx[valid] + dy = dy[valid] + + flow_x = interpolate.griddata( + (x1, y1), dx, (x0, y0), method='nearest', fill_value=0) + + flow_y = interpolate.griddata( + (x1, y1), dy, (x0, y0), method='nearest', fill_value=0) + + flow = np.stack([flow_x, flow_y], axis=0) + return torch.from_numpy(flow).float() + + +def bilinear_sampler(img, coords, mode='bilinear', mask=False): + """ Wrapper for grid_sample, uses pixel coordinates """ + H, W = img.shape[-2:] + xgrid, ygrid = coords.split([1,1], dim=-1) + xgrid = 2*xgrid/(W-1) - 1 + ygrid = 2*ygrid/(H-1) - 1 + + grid = torch.cat([xgrid, ygrid], dim=-1) + img = F.grid_sample(img, grid, align_corners=True) + + if mask: + mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1) + return img, mask.float() + + return img + + +def coords_grid(batch, ht, wd): + coords = torch.meshgrid(torch.arange(ht), torch.arange(wd)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +def upflow8(flow, mode='bilinear'): + new_size = (8 * flow.shape[2], 8 * flow.shape[3]) + return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True) diff --git a/dynamic_predictor/third_party/RAFT/demo.py b/dynamic_predictor/third_party/RAFT/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..5abc1da863f1231af1247209739402b05fa8bf85 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/demo.py @@ -0,0 +1,75 @@ +import sys +sys.path.append('core') + +import argparse +import os +import cv2 +import glob +import numpy as np +import torch +from PIL import Image + +from raft import RAFT +from utils import flow_viz +from utils.utils import InputPadder + + + +DEVICE = 'cuda' + +def load_image(imfile): + img = np.array(Image.open(imfile)).astype(np.uint8) + img = torch.from_numpy(img).permute(2, 0, 1).float() + return img[None].to(DEVICE) + + +def viz(img, flo): + img = img[0].permute(1,2,0).cpu().numpy() + flo = flo[0].permute(1,2,0).cpu().numpy() + + # map flow to rgb image + flo = flow_viz.flow_to_image(flo) + img_flo = np.concatenate([img, flo], axis=0) + + # import matplotlib.pyplot as plt + # plt.imshow(img_flo / 255.0) + # plt.show() + + cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0) + cv2.waitKey() + + +def demo(args): + model = torch.nn.DataParallel(RAFT(args)) + model.load_state_dict(torch.load(args.model)) + + model = model.module + model.to(DEVICE) + model.eval() + + with torch.no_grad(): + images = glob.glob(os.path.join(args.path, '*.png')) + \ + glob.glob(os.path.join(args.path, '*.jpg')) + + images = sorted(images) + for imfile1, imfile2 in zip(images[:-1], images[1:]): + image1 = load_image(imfile1) + image2 = load_image(imfile2) + + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1, image2) + + flow_low, flow_up = model(image1, image2, iters=20, test_mode=True) + viz(image1, flow_up) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--model', help="restore checkpoint") + parser.add_argument('--path', help="dataset for evaluation") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') + parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') + args = parser.parse_args() + + demo(args) diff --git a/dynamic_predictor/third_party/RAFT/download_models.sh b/dynamic_predictor/third_party/RAFT/download_models.sh new file mode 100755 index 0000000000000000000000000000000000000000..7b6ed7e478b74699d3c8db3bd744643c35f7da76 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/download_models.sh @@ -0,0 +1,3 @@ +#!/bin/bash +wget https://www.dropbox.com/s/4j4z58wuv8o0mfz/models.zip +unzip models.zip diff --git a/dynamic_predictor/third_party/RAFT/evaluate.py b/dynamic_predictor/third_party/RAFT/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..431a0f58891bede2804454fa7f28e9434c4c8746 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/evaluate.py @@ -0,0 +1,197 @@ +import sys +sys.path.append('core') + +from PIL import Image +import argparse +import os +import time +import numpy as np +import torch +import torch.nn.functional as F +import matplotlib.pyplot as plt + +import datasets +from utils import flow_viz +from utils import frame_utils + +from raft import RAFT +from utils.utils import InputPadder, forward_interpolate + + +@torch.no_grad() +def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): + """ Create submission for the Sintel leaderboard """ + model.eval() + for dstype in ['clean', 'final']: + test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) + + flow_prev, sequence_prev = None, None + for test_id in range(len(test_dataset)): + image1, image2, (sequence, frame) = test_dataset[test_id] + if sequence != sequence_prev: + flow_prev = None + + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) + + flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) + flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() + + if warm_start: + flow_prev = forward_interpolate(flow_low[0])[None].cuda() + + output_dir = os.path.join(output_path, dstype, sequence) + output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1)) + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + frame_utils.writeFlow(output_file, flow) + sequence_prev = sequence + + +@torch.no_grad() +def create_kitti_submission(model, iters=24, output_path='kitti_submission'): + """ Create submission for the Sintel leaderboard """ + model.eval() + test_dataset = datasets.KITTI(split='testing', aug_params=None) + + if not os.path.exists(output_path): + os.makedirs(output_path) + + for test_id in range(len(test_dataset)): + image1, image2, (frame_id, ) = test_dataset[test_id] + padder = InputPadder(image1.shape, mode='kitti') + image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) + + _, flow_pr = model(image1, image2, iters=iters, test_mode=True) + flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() + + output_filename = os.path.join(output_path, frame_id) + frame_utils.writeFlowKITTI(output_filename, flow) + + +@torch.no_grad() +def validate_chairs(model, iters=24): + """ Perform evaluation on the FlyingChairs (test) split """ + model.eval() + epe_list = [] + + val_dataset = datasets.FlyingChairs(split='validation') + for val_id in range(len(val_dataset)): + image1, image2, flow_gt, _ = val_dataset[val_id] + image1 = image1[None].cuda() + image2 = image2[None].cuda() + + _, flow_pr = model(image1, image2, iters=iters, test_mode=True) + epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt() + epe_list.append(epe.view(-1).numpy()) + + epe = np.mean(np.concatenate(epe_list)) + print("Validation Chairs EPE: %f" % epe) + return {'chairs': epe} + + +@torch.no_grad() +def validate_sintel(model, iters=32): + """ Peform validation using the Sintel (train) split """ + model.eval() + results = {} + for dstype in ['clean', 'final']: + val_dataset = datasets.MpiSintel(split='training', dstype=dstype) + epe_list = [] + + for val_id in range(len(val_dataset)): + image1, image2, flow_gt, _ = val_dataset[val_id] + image1 = image1[None].cuda() + image2 = image2[None].cuda() + + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1, image2) + + flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) + flow = padder.unpad(flow_pr[0]).cpu() + + epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() + epe_list.append(epe.view(-1).numpy()) + + epe_all = np.concatenate(epe_list) + epe = np.mean(epe_all) + px1 = np.mean(epe_all<1) + px3 = np.mean(epe_all<3) + px5 = np.mean(epe_all<5) + + print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5)) + results[dstype] = np.mean(epe_list) + + return results + + +@torch.no_grad() +def validate_kitti(model, iters=24): + """ Peform validation using the KITTI-2015 (train) split """ + model.eval() + val_dataset = datasets.KITTI(split='training') + + out_list, epe_list = [], [] + for val_id in range(len(val_dataset)): + image1, image2, flow_gt, valid_gt = val_dataset[val_id] + image1 = image1[None].cuda() + image2 = image2[None].cuda() + + padder = InputPadder(image1.shape, mode='kitti') + image1, image2 = padder.pad(image1, image2) + + flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) + flow = padder.unpad(flow_pr[0]).cpu() + + epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() + mag = torch.sum(flow_gt**2, dim=0).sqrt() + + epe = epe.view(-1) + mag = mag.view(-1) + val = valid_gt.view(-1) >= 0.5 + + out = ((epe > 3.0) & ((epe/mag) > 0.05)).float() + epe_list.append(epe[val].mean().item()) + out_list.append(out[val].cpu().numpy()) + + epe_list = np.array(epe_list) + out_list = np.concatenate(out_list) + + epe = np.mean(epe_list) + f1 = 100 * np.mean(out_list) + + print("Validation KITTI: %f, %f" % (epe, f1)) + return {'kitti-epe': epe, 'kitti-f1': f1} + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--model', help="restore checkpoint") + parser.add_argument('--dataset', help="dataset for evaluation") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') + parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') + args = parser.parse_args() + + model = torch.nn.DataParallel(RAFT(args)) + model.load_state_dict(torch.load(args.model)) + + model.cuda() + model.eval() + + # create_sintel_submission(model.module, warm_start=True) + # create_kitti_submission(model.module) + + with torch.no_grad(): + if args.dataset == 'chairs': + validate_chairs(model.module) + + elif args.dataset == 'sintel': + validate_sintel(model.module) + + elif args.dataset == 'kitti': + validate_kitti(model.module) + + diff --git a/dynamic_predictor/third_party/RAFT/models/Tartan-C-T-TSKH-spring540x960-M.pth b/dynamic_predictor/third_party/RAFT/models/Tartan-C-T-TSKH-spring540x960-M.pth new file mode 100644 index 0000000000000000000000000000000000000000..b5466b61993e9bb9c9934433633886bca8b0737a --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/models/Tartan-C-T-TSKH-spring540x960-M.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adcc169244e99d4e6fe645b60aa8eaf3e4263698a3e870b8fbae618e3d2acc28 +size 78899562 diff --git a/dynamic_predictor/third_party/RAFT/models/raft-chairs.pth b/dynamic_predictor/third_party/RAFT/models/raft-chairs.pth new file mode 100644 index 0000000000000000000000000000000000000000..47a3a2c420bee48a5642b00e0f3d2a6276e2e40c --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/models/raft-chairs.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6c75465cf995d137f89ca2e2d08594ed390befbb8859f7f65c48bcc8feb0fd7 +size 21108000 diff --git a/dynamic_predictor/third_party/RAFT/models/raft-kitti.pth b/dynamic_predictor/third_party/RAFT/models/raft-kitti.pth new file mode 100644 index 0000000000000000000000000000000000000000..c4ded38c16a8db5a3409d2714051124843511b82 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/models/raft-kitti.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d170362415e1a27bd8402ee966a3ddf0d60df9b2df2c0b4949f5ced490a9e6 +size 21108000 diff --git a/dynamic_predictor/third_party/RAFT/models/raft-sintel.pth b/dynamic_predictor/third_party/RAFT/models/raft-sintel.pth new file mode 100644 index 0000000000000000000000000000000000000000..054ab495809cac00bb9290514ae141e034f8332c --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/models/raft-sintel.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90630d2e7d488a0d3ccb5e8194524850c4c05c732ea4ff99799822c7fa5c5cbf +size 21108000 diff --git a/dynamic_predictor/third_party/RAFT/models/raft-small.pth b/dynamic_predictor/third_party/RAFT/models/raft-small.pth new file mode 100644 index 0000000000000000000000000000000000000000..2b0a4800f74d758ca52fa7a5af70c3caff467e1c --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/models/raft-small.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7d41b9cc88442bb8aa911dbb33086dac55a226394b142937ff22d5578717332 +size 3984814 diff --git a/dynamic_predictor/third_party/RAFT/models/raft-things.pth b/dynamic_predictor/third_party/RAFT/models/raft-things.pth new file mode 100644 index 0000000000000000000000000000000000000000..1e206ac8a2f660bc7620b0806a9278ddb3fc594d --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/models/raft-things.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcfa4125d6418f4de95d84aec20a3c5f4e205101715a79f193243c186ac9a7e1 +size 21108000 diff --git a/dynamic_predictor/third_party/RAFT/train.py b/dynamic_predictor/third_party/RAFT/train.py new file mode 100644 index 0000000000000000000000000000000000000000..307573097f13ee30c67bbe11658f457fdf1ead3c --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/train.py @@ -0,0 +1,247 @@ +from __future__ import print_function, division +import sys +sys.path.append('core') + +import argparse +import os +import cv2 +import time +import numpy as np +import matplotlib.pyplot as plt + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F + +from torch.utils.data import DataLoader +from raft import RAFT +import evaluate +import datasets + +from torch.utils.tensorboard import SummaryWriter + +try: + from torch.cuda.amp import GradScaler +except: + # dummy GradScaler for PyTorch < 1.6 + class GradScaler: + def __init__(self): + pass + def scale(self, loss): + return loss + def unscale_(self, optimizer): + pass + def step(self, optimizer): + optimizer.step() + def update(self): + pass + + +# exclude extremly large displacements +MAX_FLOW = 400 +SUM_FREQ = 100 +VAL_FREQ = 5000 + + +def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW): + """ Loss function defined over sequence of flow predictions """ + + n_predictions = len(flow_preds) + flow_loss = 0.0 + + # exlude invalid pixels and extremely large diplacements + mag = torch.sum(flow_gt**2, dim=1).sqrt() + valid = (valid >= 0.5) & (mag < max_flow) + + for i in range(n_predictions): + i_weight = gamma**(n_predictions - i - 1) + i_loss = (flow_preds[i] - flow_gt).abs() + flow_loss += i_weight * (valid[:, None] * i_loss).mean() + + epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt() + epe = epe.view(-1)[valid.view(-1)] + + metrics = { + 'epe': epe.mean().item(), + '1px': (epe < 1).float().mean().item(), + '3px': (epe < 3).float().mean().item(), + '5px': (epe < 5).float().mean().item(), + } + + return flow_loss, metrics + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +def fetch_optimizer(args, model): + """ Create the optimizer and learning rate scheduler """ + optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon) + + scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100, + pct_start=0.05, cycle_momentum=False, anneal_strategy='linear') + + return optimizer, scheduler + + +class Logger: + def __init__(self, model, scheduler): + self.model = model + self.scheduler = scheduler + self.total_steps = 0 + self.running_loss = {} + self.writer = None + + def _print_training_status(self): + metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())] + training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0]) + metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data) + + # print the training status + print(training_str + metrics_str) + + if self.writer is None: + self.writer = SummaryWriter() + + for k in self.running_loss: + self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps) + self.running_loss[k] = 0.0 + + def push(self, metrics): + self.total_steps += 1 + + for key in metrics: + if key not in self.running_loss: + self.running_loss[key] = 0.0 + + self.running_loss[key] += metrics[key] + + if self.total_steps % SUM_FREQ == SUM_FREQ-1: + self._print_training_status() + self.running_loss = {} + + def write_dict(self, results): + if self.writer is None: + self.writer = SummaryWriter() + + for key in results: + self.writer.add_scalar(key, results[key], self.total_steps) + + def close(self): + self.writer.close() + + +def train(args): + + model = nn.DataParallel(RAFT(args), device_ids=args.gpus) + print("Parameter Count: %d" % count_parameters(model)) + + if args.restore_ckpt is not None: + model.load_state_dict(torch.load(args.restore_ckpt), strict=False) + + model.cuda() + model.train() + + if args.stage != 'chairs': + model.module.freeze_bn() + + train_loader = datasets.fetch_dataloader(args) + optimizer, scheduler = fetch_optimizer(args, model) + + total_steps = 0 + scaler = GradScaler(enabled=args.mixed_precision) + logger = Logger(model, scheduler) + + VAL_FREQ = 5000 + add_noise = True + + should_keep_training = True + while should_keep_training: + + for i_batch, data_blob in enumerate(train_loader): + optimizer.zero_grad() + image1, image2, flow, valid = [x.cuda() for x in data_blob] + + if args.add_noise: + stdv = np.random.uniform(0.0, 5.0) + image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0) + image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0) + + flow_predictions = model(image1, image2, iters=args.iters) + + loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma) + scaler.scale(loss).backward() + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) + + scaler.step(optimizer) + scheduler.step() + scaler.update() + + logger.push(metrics) + + if total_steps % VAL_FREQ == VAL_FREQ - 1: + PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name) + torch.save(model.state_dict(), PATH) + + results = {} + for val_dataset in args.validation: + if val_dataset == 'chairs': + results.update(evaluate.validate_chairs(model.module)) + elif val_dataset == 'sintel': + results.update(evaluate.validate_sintel(model.module)) + elif val_dataset == 'kitti': + results.update(evaluate.validate_kitti(model.module)) + + logger.write_dict(results) + + model.train() + if args.stage != 'chairs': + model.module.freeze_bn() + + total_steps += 1 + + if total_steps > args.num_steps: + should_keep_training = False + break + + logger.close() + PATH = 'checkpoints/%s.pth' % args.name + torch.save(model.state_dict(), PATH) + + return PATH + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--name', default='raft', help="name your experiment") + parser.add_argument('--stage', help="determines which dataset to use for training") + parser.add_argument('--restore_ckpt', help="restore checkpoint") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--validation', type=str, nargs='+') + + parser.add_argument('--lr', type=float, default=0.00002) + parser.add_argument('--num_steps', type=int, default=100000) + parser.add_argument('--batch_size', type=int, default=6) + parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512]) + parser.add_argument('--gpus', type=int, nargs='+', default=[0,1]) + parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') + + parser.add_argument('--iters', type=int, default=12) + parser.add_argument('--wdecay', type=float, default=.00005) + parser.add_argument('--epsilon', type=float, default=1e-8) + parser.add_argument('--clip', type=float, default=1.0) + parser.add_argument('--dropout', type=float, default=0.0) + parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting') + parser.add_argument('--add_noise', action='store_true') + args = parser.parse_args() + + torch.manual_seed(1234) + np.random.seed(1234) + + if not os.path.isdir('checkpoints'): + os.mkdir('checkpoints') + + train(args) \ No newline at end of file diff --git a/dynamic_predictor/third_party/RAFT/train_mixed.sh b/dynamic_predictor/third_party/RAFT/train_mixed.sh new file mode 100755 index 0000000000000000000000000000000000000000..d9b979f143902a17a0ba7b0a8f960598b7096e0b --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/train_mixed.sh @@ -0,0 +1,6 @@ +#!/bin/bash +mkdir -p checkpoints +python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 --num_steps 120000 --batch_size 8 --lr 0.00025 --image_size 368 496 --wdecay 0.0001 --mixed_precision +python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 --num_steps 120000 --batch_size 5 --lr 0.0001 --image_size 400 720 --wdecay 0.0001 --mixed_precision +python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 --num_steps 120000 --batch_size 5 --lr 0.0001 --image_size 368 768 --wdecay 0.00001 --gamma=0.85 --mixed_precision +python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 --num_steps 50000 --batch_size 5 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85 --mixed_precision diff --git a/dynamic_predictor/third_party/RAFT/train_standard.sh b/dynamic_predictor/third_party/RAFT/train_standard.sh new file mode 100755 index 0000000000000000000000000000000000000000..7f559b386b6b596ec14a94f0d8c13974309b7d80 --- /dev/null +++ b/dynamic_predictor/third_party/RAFT/train_standard.sh @@ -0,0 +1,6 @@ +#!/bin/bash +mkdir -p checkpoints +python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 1 --num_steps 100000 --batch_size 10 --lr 0.0004 --image_size 368 496 --wdecay 0.0001 +python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 400 720 --wdecay 0.0001 +python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 368 768 --wdecay 0.00001 --gamma=0.85 +python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 1 --num_steps 50000 --batch_size 6 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85 diff --git a/dynamic_predictor/third_party/__init__.py b/dynamic_predictor/third_party/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamic_predictor/third_party/raft.py b/dynamic_predictor/third_party/raft.py new file mode 100644 index 0000000000000000000000000000000000000000..ea19e91399e74452870d2758466d2e941d02a034 --- /dev/null +++ b/dynamic_predictor/third_party/raft.py @@ -0,0 +1,77 @@ + +import sys +import argparse +import torch +import json +from os.path import dirname, join +RAFT_PATH_ROOT = join(dirname(__file__), 'RAFT') +RAFT_PATH_CORE = join(RAFT_PATH_ROOT, 'core') +sys.path.append(RAFT_PATH_CORE) +from raft import RAFT, RAFT2 # nopep8 +from utils.utils import InputPadder # nopep8 + +# %% +# utility functions + +def json_to_args(json_path): + # return a argparse.Namespace object + with open(json_path, 'r') as f: + data = json.load(f) + args = argparse.Namespace() + args_dict = args.__dict__ + for key, value in data.items(): + args_dict[key] = value + return args + +def parse_args(parser): + entry = parser.parse_args(args=[]) + json_path = entry.cfg + args = json_to_args(json_path) + args_dict = args.__dict__ + for index, (key, value) in enumerate(vars(entry).items()): + args_dict[key] = value + return args + +def get_input_padder(shape): + return InputPadder(shape, mode='sintel') + + +def load_RAFT(model_path=None): + if model_path is None or 'M' not in model_path: # RAFT1 + parser = argparse.ArgumentParser() + parser.add_argument('--model', help="restore checkpoint", default=model_path) + parser.add_argument('--path', help="dataset for evaluation") + parser.add_argument('--small', action='store_true', help='use small model') + parser.add_argument('--mixed_precision', + action='store_true', help='use mixed precision') + parser.add_argument('--alternate_corr', action='store_true', + help='use efficient correlation implementation') + + # Set default value for --model if model_path is provided + args = parser.parse_args( + ['--model', model_path if model_path else join(RAFT_PATH_ROOT, 'models', 'raft-sintel.pth'), '--path', './']) + + net = RAFT(args) + else: # RAFT2 + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', help='experiment configure file name', default="third_party/RAFT/core/configs/congif_spring_M.json") + parser.add_argument('--model', help='checkpoint path', default=model_path) + parser.add_argument('--device', help='inference device', type=str, default='cpu') + args = parse_args(parser) + net = RAFT2(args) + + state_dict = torch.load(args.model, weights_only=False) + # print('Loaded pretrained RAFT model from', args.model) + new_state_dict = {} + for k in state_dict: + if 'module' in k: + name = k[7:] + else: + name = k + new_state_dict[name] = state_dict[k] + net.load_state_dict(new_state_dict) + return net.eval() + +if __name__ == "__main__": + net = load_RAFT(model_path='third_party/RAFT/models/Tartan-C-T432x960-M.pth') + print(net) \ No newline at end of file diff --git a/gaussian_renderer/__init__.py b/gaussian_renderer/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..ca33dc0033c6416fa715049f9fbaea362b5e5be4 --- /dev/null +++ b/gaussian_renderer/__init__.py @@ -0,0 +1,510 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +import math +from diff_gaussian_rasterization import ( + GaussianRasterizationSettings, + GaussianRasterizer, +) +from scene.gaussian_model import GaussianModel +from utils.sh_utils import eval_sh +from utils.pose_utils import get_camera_from_tensor, quadmultiply + + +def render( + viewpoint_camera, + pc: GaussianModel, + pipe, + bg_color: torch.Tensor, + scaling_modifier=1.0, + override_color=None, + camera_pose=None, + filtering=None +): + """ + Render the scene. + + Background tensor (bg_color) must be on GPU! + """ + if filtering is None: + filtering = torch.ones(pc.get_xyz.shape[0], dtype=torch.bool, device="cuda") + # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means + screenspace_points = ( + torch.zeros_like( + pc.get_xyz[filtering], dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda" + ) + + 0 + ) + try: + screenspace_points.retain_grad() + except: + pass + + # Set up rasterization configuration + tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) + tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) + + # Set camera pose as identity. Then, we will transform the Gaussians around camera_pose + w2c = torch.eye(4).cuda() + projmatrix = ( + w2c.unsqueeze(0).bmm(viewpoint_camera.projection_matrix.unsqueeze(0)) + ).squeeze(0) + camera_pos = w2c.inverse()[3, :3] + raster_settings = GaussianRasterizationSettings( + image_height=int(viewpoint_camera.image_height), + image_width=int(viewpoint_camera.image_width), + tanfovx=tanfovx, + tanfovy=tanfovy, + bg=bg_color, + scale_modifier=scaling_modifier, + # viewmatrix=viewpoint_camera.world_view_transform, + # projmatrix=viewpoint_camera.full_proj_transform, + viewmatrix=w2c, + projmatrix=projmatrix, + sh_degree=pc.active_sh_degree, + # campos=viewpoint_camera.camera_center, + campos=camera_pos, + prefiltered=False, + debug=pipe.debug, + ) + + rasterizer = GaussianRasterizer(raster_settings=raster_settings) + + # means3D = pc.get_xyz + rel_w2c = get_camera_from_tensor(camera_pose) + # Transform mean and rot of Gaussians to camera frame + gaussians_xyz = pc._xyz.clone()[filtering] + gaussians_rot = pc._rotation.clone()[filtering] + + xyz_ones = torch.ones(gaussians_xyz.shape[0], 1).cuda().float() + xyz_homo = torch.cat((gaussians_xyz, xyz_ones), dim=1) + gaussians_xyz_trans = (rel_w2c @ xyz_homo.T).T[:, :3] + gaussians_rot_trans = quadmultiply(camera_pose[:4], gaussians_rot) + means3D = gaussians_xyz_trans + means2D = screenspace_points + + opacity = pc.get_opacity[filtering] + + opacity = opacity * pc._conf_static.reshape(-1, 1)[pc.aggregated_mask] + + # If precomputed 3d covariance is provided, use it. If not, then it will be computed from + # scaling / rotation by the rasterizer. + scales = None + rotations = None + cov3D_precomp = None + if pipe.compute_cov3D_python: + cov3D_precomp = pc.get_covariance(scaling_modifier) + else: + scales = pc.get_scaling[filtering] + rotations = gaussians_rot_trans # pc.get_rotation + + # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors + # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. + shs = None + colors_precomp = None + if override_color is None: + if pipe.convert_SHs_python: + shs_view = pc.get_features.transpose(1, 2).view( + -1, 3, (pc.max_sh_degree + 1) ** 2 + ) + dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat( + pc.get_features.shape[0], 1 + ) + dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True) + sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) + colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) + else: + shs = pc.get_features[filtering] + else: + colors_precomp = override_color + + # Rasterize visible Gaussians to image, obtain their radii (on screen). + rendered_image, radii = rasterizer( + means3D=means3D, + means2D=means2D, + shs=shs, + colors_precomp=colors_precomp, + opacities=opacity, + scales=scales, + rotations=rotations, + cov3D_precomp=cov3D_precomp, + ) + + # Those Gaussians that were frustum culled or had a radius of 0 were not visible. + # They will be excluded from value updates used in the splitting criteria. + return { + "render": rendered_image, + "viewspace_points": screenspace_points, + "visibility_filter": radii > 0, + "radii": radii, + } + + +def render_test( + viewpoint_camera, + pc: GaussianModel, + pipe, + bg_color: torch.Tensor, + scaling_modifier=1.0, + override_color=None, + camera_pose=None, + filtering=None +): + """ + Render the scene. + + Background tensor (bg_color) must be on GPU! + """ + if filtering is None: + filtering = torch.ones(pc.get_xyz.shape[0], dtype=torch.bool, device="cuda") + # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means + screenspace_points = ( + torch.zeros_like( + pc.get_xyz[filtering], dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda" + ) + + 0 + ) + try: + screenspace_points.retain_grad() + except: + pass + + # Set up rasterization configuration + tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) + tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) + + # Set camera pose as identity. Then, we will transform the Gaussians around camera_pose + w2c = torch.eye(4).cuda() + projmatrix = ( + w2c.unsqueeze(0).bmm(viewpoint_camera.projection_matrix.unsqueeze(0)) + ).squeeze(0) + camera_pos = w2c.inverse()[3, :3] + raster_settings = GaussianRasterizationSettings( + image_height=int(viewpoint_camera.image_height), + image_width=int(viewpoint_camera.image_width), + tanfovx=tanfovx, + tanfovy=tanfovy, + bg=bg_color, + scale_modifier=scaling_modifier, + # viewmatrix=viewpoint_camera.world_view_transform, + # projmatrix=viewpoint_camera.full_proj_transform, + viewmatrix=w2c, + projmatrix=projmatrix, + sh_degree=pc.active_sh_degree, + # campos=viewpoint_camera.camera_center, + campos=camera_pos, + prefiltered=False, + debug=pipe.debug, + ) + + rasterizer = GaussianRasterizer(raster_settings=raster_settings) + + # means3D = pc.get_xyz + rel_w2c = get_camera_from_tensor(camera_pose) + # Transform mean and rot of Gaussians to camera frame + gaussians_xyz = pc._xyz.clone()[filtering] + gaussians_rot = pc._rotation.clone()[filtering] + + xyz_ones = torch.ones(gaussians_xyz.shape[0], 1).cuda().float() + xyz_homo = torch.cat((gaussians_xyz, xyz_ones), dim=1) + gaussians_xyz_trans = (rel_w2c @ xyz_homo.T).T[:, :3] + gaussians_rot_trans = quadmultiply(camera_pose[:4], gaussians_rot) + means3D = gaussians_xyz_trans + means2D = screenspace_points + + opacity = pc.get_opacity[filtering] + + opacity = opacity * pc._conf_static + # If precomputed 3d covariance is provided, use it. If not, then it will be computed from + # scaling / rotation by the rasterizer. + scales = None + rotations = None + cov3D_precomp = None + if pipe.compute_cov3D_python: + cov3D_precomp = pc.get_covariance(scaling_modifier) + else: + scales = pc.get_scaling[filtering] + rotations = gaussians_rot_trans # pc.get_rotation + + # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors + # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. + shs = None + colors_precomp = None + if override_color is None: + if pipe.convert_SHs_python: + shs_view = pc.get_features.transpose(1, 2).view( + -1, 3, (pc.max_sh_degree + 1) ** 2 + ) + dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat( + pc.get_features.shape[0], 1 + ) + dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True) + sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) + colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) + else: + shs = pc.get_features[filtering] + else: + colors_precomp = override_color + + # Rasterize visible Gaussians to image, obtain their radii (on screen). + rendered_image, radii = rasterizer( + means3D=means3D, + means2D=means2D, + shs=shs, + colors_precomp=colors_precomp, + opacities=opacity, + scales=scales, + rotations=rotations, + cov3D_precomp=cov3D_precomp, + ) + + # Those Gaussians that were frustum culled or had a radius of 0 were not visible. + # They will be excluded from value updates used in the splitting criteria. + return { + "render": rendered_image, + "viewspace_points": screenspace_points, + "visibility_filter": radii > 0, + "radii": radii, + } + +def render_no_soft( + viewpoint_camera, + pc: GaussianModel, + pipe, + bg_color: torch.Tensor, + scaling_modifier=1.0, + override_color=None, + camera_pose=None, + filtering=None +): + """ + Render the scene. + + Background tensor (bg_color) must be on GPU! + """ + if filtering is None: + filtering = torch.ones(pc.get_xyz.shape[0], dtype=torch.bool, device="cuda") + # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means + screenspace_points = ( + torch.zeros_like( + pc.get_xyz[filtering], dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda" + ) + + 0 + ) + try: + screenspace_points.retain_grad() + except: + pass + + FoVx = pc.FoVx + FoVy = pc.FoVy + # print(f"Rendering with FoVx: {FoVx}, FoVy: {FoVy}") + # Set up rasterization configuration + tanfovx = math.tan(FoVx * 0.5) + tanfovy = math.tan(FoVy * 0.5) + + # Set camera pose as identity. Then, we will transform the Gaussians around camera_pose + w2c = torch.eye(4).cuda() + projmatrix = ( + w2c.unsqueeze(0).bmm(viewpoint_camera.get_projection_matrix(FoVx, FoVy).unsqueeze(0)) + ).squeeze(0) + camera_pos = w2c.inverse()[3, :3] + raster_settings = GaussianRasterizationSettings( + image_height=int(viewpoint_camera.image_height), + image_width=int(viewpoint_camera.image_width), + tanfovx=tanfovx, + tanfovy=tanfovy, + bg=bg_color, + scale_modifier=scaling_modifier, + # viewmatrix=viewpoint_camera.world_view_transform, + # projmatrix=viewpoint_camera.full_proj_transform, + viewmatrix=w2c, + projmatrix=projmatrix, + sh_degree=pc.active_sh_degree, + # campos=viewpoint_camera.camera_center, + campos=camera_pos, + prefiltered=False, + debug=pipe.debug, + ) + + rasterizer = GaussianRasterizer(raster_settings=raster_settings) + + # means3D = pc.get_xyz + rel_w2c = get_camera_from_tensor(camera_pose) + # Transform mean and rot of Gaussians to camera frame + gaussians_xyz = pc._xyz.clone()[filtering] + gaussians_rot = pc._rotation.clone()[filtering] + + xyz_ones = torch.ones(gaussians_xyz.shape[0], 1).cuda().float() + xyz_homo = torch.cat((gaussians_xyz, xyz_ones), dim=1) + gaussians_xyz_trans = (rel_w2c @ xyz_homo.T).T[:, :3] + gaussians_rot_trans = quadmultiply(camera_pose[:4], gaussians_rot) + means3D = gaussians_xyz_trans + means2D = screenspace_points + + opacity = pc.get_opacity[filtering] + + # opacity = opacity * pc._conf_static.reshape(-1, 1)[pc.aggregated_mask] + + # If precomputed 3d covariance is provided, use it. If not, then it will be computed from + # scaling / rotation by the rasterizer. + scales = None + rotations = None + cov3D_precomp = None + if pipe.compute_cov3D_python: + cov3D_precomp = pc.get_covariance(scaling_modifier) + else: + scales = pc.get_scaling[filtering] + rotations = gaussians_rot_trans # pc.get_rotation + + # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors + # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. + shs = None + colors_precomp = None + if override_color is None: + if pipe.convert_SHs_python: + shs_view = pc.get_features.transpose(1, 2).view( + -1, 3, (pc.max_sh_degree + 1) ** 2 + ) + dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat( + pc.get_features.shape[0], 1 + ) + dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True) + sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) + colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) + else: + shs = pc.get_features[filtering] + else: + colors_precomp = override_color + + # Rasterize visible Gaussians to image, obtain their radii (on screen). + rendered_image, radii = rasterizer( + means3D=means3D, + means2D=means2D, + shs=shs, + colors_precomp=colors_precomp, + opacities=opacity, + scales=scales, + rotations=rotations, + cov3D_precomp=cov3D_precomp, + ) + + # Those Gaussians that were frustum culled or had a radius of 0 were not visible. + # They will be excluded from value updates used in the splitting criteria. + return { + "render": rendered_image, + "viewspace_points": screenspace_points, + "visibility_filter": radii > 0, + "radii": radii, + } + +def render_confidence( + viewpoint_camera, + pc: GaussianModel, + pipe, + bg_color: torch.Tensor, + scaling_modifier=1.0, + override_color=None, + camera_pose=None, + filtering=None +): + """ + Render the confidence. + + Background tensor (bg_color) must be on GPU! + """ + if filtering is None: + filtering = torch.ones(pc.get_xyz.shape[0], dtype=torch.bool, device="cuda") + # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means + screenspace_points = ( + torch.zeros_like( + pc.get_xyz[filtering], dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda" + ) + + 0 + ) + try: + screenspace_points.retain_grad() + except: + pass + + # Set up rasterization configuration + tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) + tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) + + # Set camera pose as identity. Then, we will transform the Gaussians around camera_pose + w2c = torch.eye(4).cuda() + projmatrix = ( + w2c.unsqueeze(0).bmm(viewpoint_camera.projection_matrix.unsqueeze(0)) + ).squeeze(0) + camera_pos = w2c.inverse()[3, :3] + raster_settings = GaussianRasterizationSettings( + image_height=int(viewpoint_camera.image_height), + image_width=int(viewpoint_camera.image_width), + tanfovx=tanfovx, + tanfovy=tanfovy, + bg=bg_color, + scale_modifier=scaling_modifier, + # viewmatrix=viewpoint_camera.world_view_transform, + # projmatrix=viewpoint_camera.full_proj_transform, + viewmatrix=w2c, + projmatrix=projmatrix, + sh_degree=pc.active_sh_degree, + # campos=viewpoint_camera.camera_center, + campos=camera_pos, + prefiltered=False, + debug=pipe.debug, + ) + + rasterizer = GaussianRasterizer(raster_settings=raster_settings) + + # means3D = pc.get_xyz + rel_w2c = get_camera_from_tensor(camera_pose) + # Transform mean and rot of Gaussians to camera frame + gaussians_xyz = pc._xyz.clone()[filtering] + gaussians_rot = pc._rotation.clone()[filtering] + + xyz_ones = torch.ones(gaussians_xyz.shape[0], 1).cuda().float() + xyz_homo = torch.cat((gaussians_xyz, xyz_ones), dim=1) + gaussians_xyz_trans = (rel_w2c @ xyz_homo.T).T[:, :3] + gaussians_rot_trans = quadmultiply(camera_pose[:4], gaussians_rot) + means3D = gaussians_xyz_trans + means2D = screenspace_points + opacity = pc.get_opacity[filtering] + opacity = torch.ones_like(opacity) + + # If precomputed 3d covariance is provided, use it. If not, then it will be computed from + # scaling / rotation by the rasterizer. + scales = None + rotations = None + cov3D_precomp = None + if pipe.compute_cov3D_python: + cov3D_precomp = pc.get_covariance(scaling_modifier) + else: + scales = pc.get_scaling[filtering] + rotations = gaussians_rot_trans # pc.get_rotation + + shs = None + colors_precomp = pc._conf[filtering].unsqueeze(1).repeat(1, 3) + + # Rasterize visible Gaussians to image, obtain their radii (on screen). + rendered_image, radii = rasterizer( + means3D=means3D, + means2D=means2D, + shs=shs, + colors_precomp=colors_precomp, + opacities=opacity, + scales=scales, + rotations=rotations, + cov3D_precomp=cov3D_precomp, + ) + + return rendered_image diff --git a/gaussian_renderer/__init__3dgs.py b/gaussian_renderer/__init__3dgs.py new file mode 100755 index 0000000000000000000000000000000000000000..65cf9e9df4653797d7955b781c685b085d5d7bd1 --- /dev/null +++ b/gaussian_renderer/__init__3dgs.py @@ -0,0 +1,100 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +import math +from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer +from scene.gaussian_model import GaussianModel +from utils.sh_utils import eval_sh + +def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None): + """ + Render the scene. + + Background tensor (bg_color) must be on GPU! + """ + + # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means + screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0 + try: + screenspace_points.retain_grad() + except: + pass + + # Set up rasterization configuration + tanfovx = math.tan(viewpoint_camera.FoVx * 0.5) + tanfovy = math.tan(viewpoint_camera.FoVy * 0.5) + + raster_settings = GaussianRasterizationSettings( + image_height=int(viewpoint_camera.image_height), + image_width=int(viewpoint_camera.image_width), + tanfovx=tanfovx, + tanfovy=tanfovy, + bg=bg_color, + scale_modifier=scaling_modifier, + viewmatrix=viewpoint_camera.world_view_transform, + projmatrix=viewpoint_camera.full_proj_transform, + sh_degree=pc.active_sh_degree, + campos=viewpoint_camera.camera_center, + prefiltered=False, + debug=pipe.debug + ) + + rasterizer = GaussianRasterizer(raster_settings=raster_settings) + + means3D = pc.get_xyz + means2D = screenspace_points + opacity = pc.get_opacity + + # If precomputed 3d covariance is provided, use it. If not, then it will be computed from + # scaling / rotation by the rasterizer. + scales = None + rotations = None + cov3D_precomp = None + if pipe.compute_cov3D_python: + cov3D_precomp = pc.get_covariance(scaling_modifier) + else: + scales = pc.get_scaling + rotations = pc.get_rotation + + # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors + # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. + shs = None + colors_precomp = None + if override_color is None: + if pipe.convert_SHs_python: + shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2) + dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1)) + dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True) + sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) + colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) + else: + shs = pc.get_features + else: + colors_precomp = override_color + + # Rasterize visible Gaussians to image, obtain their radii (on screen). + rendered_image, radii = rasterizer( + means3D = means3D, + means2D = means2D, + shs = shs, + colors_precomp = colors_precomp, + opacities = opacity, + scales = scales, + rotations = rotations, + cov3D_precomp = cov3D_precomp) + + # Those Gaussians that were frustum culled or had a radius of 0 were not visible. + # They will be excluded from value updates used in the splitting criteria. + return {"render": rendered_image, + "viewspace_points": screenspace_points, + "visibility_filter" : radii > 0, + "radii": radii} \ No newline at end of file diff --git a/gaussian_renderer/network_gui.py b/gaussian_renderer/network_gui.py new file mode 100755 index 0000000000000000000000000000000000000000..df2f9dae782b24527ae5b09f91ca4009361de53f --- /dev/null +++ b/gaussian_renderer/network_gui.py @@ -0,0 +1,86 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +import traceback +import socket +import json +from scene.cameras import MiniCam + +host = "127.0.0.1" +port = 6009 + +conn = None +addr = None + +listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + +def init(wish_host, wish_port): + global host, port, listener + host = wish_host + port = wish_port + listener.bind((host, port)) + listener.listen() + listener.settimeout(0) + +def try_connect(): + global conn, addr, listener + try: + conn, addr = listener.accept() + print(f"\nConnected by {addr}") + conn.settimeout(None) + except Exception as inst: + pass + +def read(): + global conn + messageLength = conn.recv(4) + messageLength = int.from_bytes(messageLength, 'little') + message = conn.recv(messageLength) + return json.loads(message.decode("utf-8")) + +def send(message_bytes, verify): + global conn + if message_bytes != None: + conn.sendall(message_bytes) + conn.sendall(len(verify).to_bytes(4, 'little')) + conn.sendall(bytes(verify, 'ascii')) + +def receive(): + message = read() + + width = message["resolution_x"] + height = message["resolution_y"] + + if width != 0 and height != 0: + try: + do_training = bool(message["train"]) + fovy = message["fov_y"] + fovx = message["fov_x"] + znear = message["z_near"] + zfar = message["z_far"] + do_shs_python = bool(message["shs_python"]) + do_rot_scale_python = bool(message["rot_scale_python"]) + keep_alive = bool(message["keep_alive"]) + scaling_modifier = message["scaling_modifier"] + world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda() + world_view_transform[:,1] = -world_view_transform[:,1] + world_view_transform[:,2] = -world_view_transform[:,2] + full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda() + full_proj_transform[:,1] = -full_proj_transform[:,1] + custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform) + except Exception as e: + print("") + traceback.print_exc() + raise e + return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier + else: + return None, None, None, None, None, None \ No newline at end of file diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0000.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a80ffa8856d21e0e0ce248aaa28f1d0c0b34e552 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0000.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0001.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0001.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a03a653b661b3642cae3252d5cff8ce34d62424 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0001.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0002.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45d4cee6499a17d42e2cb802a072f7dcf56484d3 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0002.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0003.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d104333700949e4158ca6214124b66a5294a3e3 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0003.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0004.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53a8905ae8238221eee9de488da84f85ae5e1149 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0004.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0005.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0005.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a01fcfed9eaa6896fa256cbd8e0f9b7a130a3ab Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0005.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0006.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0006.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc590cbba10f2351aad3ca13b749e632e258197e Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0006.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0007.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0007.jpg new file mode 100644 index 0000000000000000000000000000000000000000..016945e123b82ae86187e1494dfa31e420ba90ec Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0007.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0008.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0008.jpg new file mode 100644 index 0000000000000000000000000000000000000000..890d85970686785162e4d34080ef1c9bb2fb2a9b Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0008.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0009.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f379d96ad082123aa91298e352488419d7d733b2 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0009.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0010.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0010.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e15962042fc33ac86dc98876990ef9e0f9d7d969 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0010.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0011.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cab6d2f908ef604db3196fd4448857eae8e9688 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0011.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0012.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0012.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9345f5dda0debc8405e30326c987451964fab3f Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0012.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0013.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0013.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb2e8d88fefa5a844b69e0591c8f6f9d61967eaf Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0013.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0014.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caff6b3feb8cba9a00e036a93b660bc6d28324c6 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0014.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0015.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0015.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85ca5a410828b249a508417e3f8a0c1d59904078 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0015.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0016.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0016.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dd6700e13d3cdbdd7947756dc43595ce1e99ac5 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0016.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0017.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0017.jpg new file mode 100644 index 0000000000000000000000000000000000000000..974ed8d6bad6a788578f370a1943a491c89c4f4b Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0017.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0018.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0018.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3d517eb86ca001ceee5e40a63c6ea242942800c Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0018.jpg differ diff --git a/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0019.jpg b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0019.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89deb5393c73c54f05d5b5ae6a625f1cd4ad48d7 Binary files /dev/null and b/gradio_cache_folder/1fd0addf-d42a-4fa4-866d-51457a3458a3/0019.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0000.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a80ffa8856d21e0e0ce248aaa28f1d0c0b34e552 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0000.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0001.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0001.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a03a653b661b3642cae3252d5cff8ce34d62424 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0001.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0002.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45d4cee6499a17d42e2cb802a072f7dcf56484d3 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0002.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0003.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d104333700949e4158ca6214124b66a5294a3e3 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0003.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0004.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53a8905ae8238221eee9de488da84f85ae5e1149 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0004.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0005.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0005.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a01fcfed9eaa6896fa256cbd8e0f9b7a130a3ab Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0005.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0006.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0006.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc590cbba10f2351aad3ca13b749e632e258197e Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0006.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0007.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0007.jpg new file mode 100644 index 0000000000000000000000000000000000000000..016945e123b82ae86187e1494dfa31e420ba90ec Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0007.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0008.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0008.jpg new file mode 100644 index 0000000000000000000000000000000000000000..890d85970686785162e4d34080ef1c9bb2fb2a9b Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0008.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0009.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f379d96ad082123aa91298e352488419d7d733b2 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0009.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0010.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0010.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e15962042fc33ac86dc98876990ef9e0f9d7d969 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0010.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0011.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0011.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cab6d2f908ef604db3196fd4448857eae8e9688 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0011.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0012.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0012.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9345f5dda0debc8405e30326c987451964fab3f Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0012.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0013.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0013.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb2e8d88fefa5a844b69e0591c8f6f9d61967eaf Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0013.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0014.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caff6b3feb8cba9a00e036a93b660bc6d28324c6 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0014.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0015.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0015.jpg new file mode 100644 index 0000000000000000000000000000000000000000..85ca5a410828b249a508417e3f8a0c1d59904078 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0015.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0016.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0016.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8dd6700e13d3cdbdd7947756dc43595ce1e99ac5 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0016.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0017.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0017.jpg new file mode 100644 index 0000000000000000000000000000000000000000..974ed8d6bad6a788578f370a1943a491c89c4f4b Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0017.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0018.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0018.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3d517eb86ca001ceee5e40a63c6ea242942800c Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0018.jpg differ diff --git a/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0019.jpg b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0019.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89deb5393c73c54f05d5b5ae6a625f1cd4ad48d7 Binary files /dev/null and b/gradio_cache_folder/fe0d52a7-6668-4a47-b0d7-fe7ad7c7cd4f/0019.jpg differ diff --git a/render.py b/render.py new file mode 100755 index 0000000000000000000000000000000000000000..cbf1a54aae49b27744f6fab3ec2ec8f9e9da2b82 --- /dev/null +++ b/render.py @@ -0,0 +1,152 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +from scene import Scene +import os +from tqdm import tqdm +from os import makedirs +from gaussian_renderer import render_test as render +import torchvision +from utils.general_utils import safe_state +from argparse import ArgumentParser +from arguments import ModelParams, PipelineParams, get_combined_args +from gaussian_renderer import GaussianModel +from utils.pose_utils import get_tensor_from_camera +from utils.camera_utils import generate_interpolated_path +from utils.camera_utils import visualizer +import cv2 +import numpy as np +import imageio + + +def save_interpolate_pose(model_path, iter, n_views=0): + + org_pose = np.load(model_path + f"pose/pose_{iter}.npy") + visualizer(org_pose, ["green" for _ in org_pose], model_path + "pose/poses_optimized.png") + # n_interp = int(10 * 30 / n_views) # 10second, fps=30 + # all_inter_pose = [] + # for i in range(n_views-1): + # tmp_inter_pose = generate_interpolated_path(poses=org_pose[i:i+2], n_interp=n_interp) + # all_inter_pose.append(tmp_inter_pose) + # all_inter_pose = np.array(all_inter_pose).reshape(-1, 3, 4) + + all_inter_pose = org_pose + + inter_pose_list = [] + for p in all_inter_pose: + tmp_view = np.eye(4) + tmp_view[:3, :3] = p[:3, :3] + tmp_view[:3, 3] = p[:3, 3] + inter_pose_list.append(tmp_view) + inter_pose = np.stack(inter_pose_list, 0) + visualizer(inter_pose, ["blue" for _ in inter_pose], model_path + "pose/poses_interpolated.png") + np.save(model_path + "pose/pose_interpolated.npy", inter_pose) + + +def images_to_video(image_folder, output_video_path, fps=30): + """ + Convert images in a folder to a video. + + Args: + - image_folder (str): The path to the folder containing the images. + - output_video_path (str): The path where the output video will be saved. + - fps (int): Frames per second for the output video. + """ + images = [] + + for filename in sorted(os.listdir(image_folder)): + if filename.endswith(('.png', '.jpg', '.jpeg', '.JPG', '.PNG')): + image_path = os.path.join(image_folder, filename) + image = imageio.imread(image_path) + images.append(image) + imageio.mimwrite(output_video_path, images, fps=fps) + + +def render_set(model_path, name, iteration, views, gaussians, pipeline, background): + render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders") + makedirs(render_path, exist_ok=True) + + for idx, view in enumerate(tqdm(views, desc="Rendering progress")): + camera_pose = get_tensor_from_camera(view.world_view_transform.transpose(0, 1)) + rendering = render( + view, gaussians, pipeline, background, camera_pose=camera_pose + )["render"] + gt = view.original_image[0:3, :, :] + torchvision.utils.save_image( + rendering, os.path.join(render_path, "{0:05d}".format(idx) + ".png") + ) + + +def render_sets( + dataset: ModelParams, + iteration: int, + pipeline: PipelineParams, + skip_train: bool, + skip_test: bool, + args, +): + + # Applying interpolation + # save_interpolate_pose(dataset.model_path, iteration, args.n_views) + save_interpolate_pose(dataset.model_path, iteration) + + with torch.no_grad(): + gaussians = GaussianModel(dataset.sh_degree) + scene = Scene(dataset, gaussians, load_iteration=iteration, opt=args, shuffle=False) + + bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0] + background = torch.tensor(bg_color, dtype=torch.float32, device="cuda") + + # render interpolated views + render_set( + dataset.model_path, + "interp", + scene.loaded_iter, + scene.getTrainCameras(), + gaussians, + pipeline, + background, + ) + + if args.get_video: + image_folder = os.path.join(dataset.model_path, f'interp/ours_{args.iteration}/renders') + output_video_file = os.path.join(dataset.model_path, f'rendered.mp4') + images_to_video(image_folder, output_video_file, fps=15) + + +if __name__ == "__main__": + # Set up command line argument parser + parser = ArgumentParser(description="Testing script parameters") + model = ModelParams(parser, sentinel=True) + pipeline = PipelineParams(parser) + parser.add_argument("--iteration", default=-1, type=int) + parser.add_argument("--skip_train", action="store_true") + parser.add_argument("--skip_test", action="store_true") + parser.add_argument("--quiet", action="store_true") + + parser.add_argument("--get_video", action="store_true") + parser.add_argument("--n_views", default=None, type=int) + parser.add_argument("--scene", default=None, type=str) + args = get_combined_args(parser) + print("Rendering " + args.model_path) + + # Initialize system state (RNG) + # safe_state(args.quiet) + args.eval = False + render_sets( + model.extract(args), + args.iteration, + pipeline.extract(args), + args.skip_train, + args.skip_test, + args, + ) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..21a2020b0503d4b6c051804bd4f74a4d0b929958 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,28 @@ +torch +torchvision +roma +evo +gradio +matplotlib +tqdm +opencv-python +scipy +einops +trimesh +tensorboard +pyglet<2 +huggingface-hub[torch]>=0.22 +gdown +imageio +wandb +scikit-learn +scikit-image +prettytable +pyntcloud +h5py +plyfile +open3d +dearpygui +imageio-ffmpeg +spaces +gradio \ No newline at end of file diff --git a/scene/__init__.py b/scene/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..7ee76550c0afdaf8b973ec3fdea8b588cec34b90 --- /dev/null +++ b/scene/__init__.py @@ -0,0 +1,104 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import os +import random +import json +from utils.system_utils import searchForMaxIteration +from scene.dataset_readers import sceneLoadTypeCallbacks +from scene.gaussian_model import GaussianModel +from arguments import ModelParams +from utils.camera_utils import cameraList_from_camInfos, camera_to_JSON +import open3d as o3d + +class Scene: + + gaussians : GaussianModel + + def __init__(self, args : ModelParams, gaussians : GaussianModel, load_iteration=None, opt=None, shuffle=True, resolution_scales=[1.0]): + """b + :param path: Path to colmap scene main folder. + """ + self.model_path = args.model_path + self.loaded_iter = None + self.gaussians = gaussians + + if load_iteration: + if load_iteration == -1: + self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud")) + else: + self.loaded_iter = load_iteration + print("Loading trained model at iteration {}".format(self.loaded_iter)) + + self.train_cameras = {} + self.test_cameras = {} + + if os.path.exists(os.path.join(args.source_path, "sparse")): + scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval, args, opt) + + elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")): + print("Found transforms_train.json file, assuming Blender data set!") + scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval) + else: + assert False, "Could not recognize scene type!" + + # if not self.loaded_iter: + # with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file: + # dest_file.write(src_file.read()) + # json_cams = [] + # camlist = [] + # if scene_info.test_cameras: + # camlist.extend(scene_info.test_cameras) + # if scene_info.train_cameras: + # camlist.extend(scene_info.train_cameras) + # for id, cam in enumerate(camlist): + # json_cams.append(camera_to_JSON(id, cam)) + # with open(os.path.join(self.model_path, "cameras.json"), 'w') as file: + # json.dump(json_cams, file) + + if shuffle: + random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling + random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling + + self.cameras_extent = scene_info.nerf_normalization["radius"] + + for resolution_scale in resolution_scales: + print("Loading Training Cameras") + self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args) + print('train_camera_num: ', len(self.train_cameras[resolution_scale])) + print("Loading Test Cameras") + self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args) + print('test_camera_num: ', len(self.test_cameras[resolution_scale])) + + if self.loaded_iter: + self.gaussians.load_ply(os.path.join(self.model_path, + "point_cloud", + "iteration_" + str(self.loaded_iter), + "point_cloud.ply")) + else: + if scene_info.point_cloud is None: + self.gaussians.create_from_cameras(self.train_cameras, self.cameras_extent) + else: + self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent) + self.gaussians.init_RT_seq(self.train_cameras) + self.gaussians.init_fov(self.train_cameras) + self.gaussians.init_test_RT_seq(self.test_cameras) + + + def save(self, iteration): + point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration)) + self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply")) + + def getTrainCameras(self, scale=1.0): + return self.train_cameras[scale] + + def getTestCameras(self, scale=1.0): + return self.test_cameras[scale] \ No newline at end of file diff --git a/scene/cameras.py b/scene/cameras.py new file mode 100755 index 0000000000000000000000000000000000000000..07e7d6b5ec728291670bcf680e9fc3025408bf0f --- /dev/null +++ b/scene/cameras.py @@ -0,0 +1,113 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +from torch import nn +import numpy as np +from utils.graphics_utils import getWorld2View2, getProjectionMatrix + +class Camera(nn.Module): + def __init__(self, colmap_id, intr, R, T, original_pose, FoVx, FoVy, image, gt_alpha_mask, dynamic_mask, enlarged_dynamic_mask, + dyna_avg_map, dyna_max_map, gt_dynamic_mask, + conf_map, depth_map, + image_name, uid, + trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" + ): + super(Camera, self).__init__() + + self.uid = uid + self.colmap_id = colmap_id + self.R = R + self.T = T + self.FoVx = FoVx + self.FoVy = FoVy + self.image_name = image_name + + try: + self.data_device = torch.device(data_device) + except Exception as e: + print(e) + print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) + self.data_device = torch.device("cuda") + + self.original_image = image.clamp(0.0, 1.0).to(self.data_device) + self.image_width = self.original_image.shape[2] + self.image_height = self.original_image.shape[1] + + if original_pose is not None: + self.original_pose = torch.tensor(original_pose, dtype=torch.float32).to(self.data_device) + + if intr is not None: + self.intr = intr + + if conf_map is not None: + self.conf_map = conf_map.to(self.data_device) + + if depth_map is not None: + self.depth_map = depth_map.to(self.data_device) + + if dynamic_mask is not None: + self.dynamic_mask = dynamic_mask.to(self.data_device) + + if gt_dynamic_mask is not None: + gt_dynamic_mask = gt_dynamic_mask.to(self.data_device) + gt_dynamic_mask = gt_dynamic_mask.unsqueeze(0).repeat(3, 1, 1).unsqueeze(0).float() + self.gt_dynamic_mask = torch.nn.functional.interpolate( + gt_dynamic_mask, + size=(self.image_height, self.image_width), + mode="nearest", + ).squeeze(0) + + + if enlarged_dynamic_mask is not None: + self.enlarged_dynamic_mask = enlarged_dynamic_mask.to(self.data_device) + + if dyna_avg_map is not None: + self.dyna_avg_map = dyna_avg_map.to(self.data_device) + + if dyna_max_map is not None: + self.dyna_max_map = dyna_max_map.to(self.data_device) + + if gt_alpha_mask is not None: + self.original_image *= gt_alpha_mask.to(self.data_device) + else: + self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) + + self.zfar = 100.0 + self.znear = 0.01 + + self.trans = trans + self.scale = scale + + self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda() + self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda() + self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0) + self.camera_center = self.world_view_transform.inverse()[3, :3] + + def get_full_proj_transform(self, FoVx, FoVy): + self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=FoVx, fovY=FoVy).transpose(0,1).cuda() + return (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0) + + def get_projection_matrix(self, FoVx, FoVy): + return getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=FoVx, fovY=FoVy).transpose(0,1).cuda() +class MiniCam: + def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform): + self.image_width = width + self.image_height = height + self.FoVy = fovy + self.FoVx = fovx + self.znear = znear + self.zfar = zfar + self.world_view_transform = world_view_transform + self.full_proj_transform = full_proj_transform + view_inv = torch.inverse(self.world_view_transform) + self.camera_center = view_inv[3][:3] + diff --git a/scene/colmap_loader.py b/scene/colmap_loader.py new file mode 100755 index 0000000000000000000000000000000000000000..88b24f39303a768eed9513b68f21b75b8fe14227 --- /dev/null +++ b/scene/colmap_loader.py @@ -0,0 +1,296 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import numpy as np +import collections +import struct + +CameraModel = collections.namedtuple( + "CameraModel", ["model_id", "model_name", "num_params"]) +Camera = collections.namedtuple( + "Camera", ["id", "model", "width", "height", "params"]) +BaseImage = collections.namedtuple( + "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) +Point3D = collections.namedtuple( + "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]) +CAMERA_MODELS = { + CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), + CameraModel(model_id=1, model_name="PINHOLE", num_params=4), + CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), + CameraModel(model_id=3, model_name="RADIAL", num_params=5), + CameraModel(model_id=4, model_name="OPENCV", num_params=8), + CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), + CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), + CameraModel(model_id=7, model_name="FOV", num_params=5), + CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), + CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), + CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12) +} +CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) + for camera_model in CAMERA_MODELS]) +CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) + for camera_model in CAMERA_MODELS]) + + +def qvec2rotmat(qvec): + return np.array([ + [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, + 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], + 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], + [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], + 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, + 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], + [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], + 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], + 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) + +def rotmat2qvec(R): + Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat + K = np.array([ + [Rxx - Ryy - Rzz, 0, 0, 0], + [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], + [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], + [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0 + eigvals, eigvecs = np.linalg.eigh(K) + qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] + if qvec[0] < 0: + qvec *= -1 + return qvec + +class Image(BaseImage): + def qvec2rotmat(self): + return qvec2rotmat(self.qvec) + +def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): + """Read and unpack the next bytes from a binary file. + :param fid: + :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. + :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. + :param endian_character: Any of {@, =, <, >, !} + :return: Tuple of read and unpacked values. + """ + data = fid.read(num_bytes) + return struct.unpack(endian_character + format_char_sequence, data) + +def read_points3D_text(path): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadPoints3DText(const std::string& path) + void Reconstruction::WritePoints3DText(const std::string& path) + """ + xyzs = None + rgbs = None + errors = None + num_points = 0 + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + num_points += 1 + + + xyzs = np.empty((num_points, 3)) + rgbs = np.empty((num_points, 3)) + errors = np.empty((num_points, 1)) + count = 0 + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + xyz = np.array(tuple(map(float, elems[1:4]))) + rgb = np.array(tuple(map(int, elems[4:7]))) + error = np.array(float(elems[7])) + xyzs[count] = xyz + rgbs[count] = rgb + errors[count] = error + count += 1 + + return xyzs, rgbs, errors + +def read_points3D_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadPoints3DBinary(const std::string& path) + void Reconstruction::WritePoints3DBinary(const std::string& path) + """ + + + with open(path_to_model_file, "rb") as fid: + num_points = read_next_bytes(fid, 8, "Q")[0] + + xyzs = np.empty((num_points, 3)) + rgbs = np.empty((num_points, 3)) + errors = np.empty((num_points, 1)) + + for p_id in range(num_points): + binary_point_line_properties = read_next_bytes( + fid, num_bytes=43, format_char_sequence="QdddBBBd") + xyz = np.array(binary_point_line_properties[1:4]) + rgb = np.array(binary_point_line_properties[4:7]) + error = np.array(binary_point_line_properties[7]) + track_length = read_next_bytes( + fid, num_bytes=8, format_char_sequence="Q")[0] + track_elems = read_next_bytes( + fid, num_bytes=8*track_length, + format_char_sequence="ii"*track_length) + xyzs[p_id] = xyz + rgbs[p_id] = rgb + errors[p_id] = error + return xyzs, rgbs, errors + +def read_intrinsics_text(path): + """ + Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py + """ + cameras = {} + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + camera_id = int(elems[0]) + model = elems[1] + assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE" + width = int(elems[2]) + height = int(elems[3]) + params = np.array(tuple(map(float, elems[4:]))) + cameras[camera_id] = Camera(id=camera_id, model=model, + width=width, height=height, + params=params) + return cameras + +def read_extrinsics_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadImagesBinary(const std::string& path) + void Reconstruction::WriteImagesBinary(const std::string& path) + """ + images = {} + with open(path_to_model_file, "rb") as fid: + num_reg_images = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_reg_images): + binary_image_properties = read_next_bytes( + fid, num_bytes=64, format_char_sequence="idddddddi") + image_id = binary_image_properties[0] + qvec = np.array(binary_image_properties[1:5]) + tvec = np.array(binary_image_properties[5:8]) + camera_id = binary_image_properties[8] + image_name = "" + current_char = read_next_bytes(fid, 1, "c")[0] + while current_char != b"\x00": # look for the ASCII 0 entry + image_name += current_char.decode("utf-8") + current_char = read_next_bytes(fid, 1, "c")[0] + num_points2D = read_next_bytes(fid, num_bytes=8, + format_char_sequence="Q")[0] + x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D, + format_char_sequence="ddq"*num_points2D) + xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), + tuple(map(float, x_y_id_s[1::3]))]) + point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) + images[image_id] = Image( + id=image_id, qvec=qvec, tvec=tvec, + camera_id=camera_id, name=image_name, + xys=xys, point3D_ids=point3D_ids) + return images + + +def read_intrinsics_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::WriteCamerasBinary(const std::string& path) + void Reconstruction::ReadCamerasBinary(const std::string& path) + """ + cameras = {} + with open(path_to_model_file, "rb") as fid: + num_cameras = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_cameras): + camera_properties = read_next_bytes( + fid, num_bytes=24, format_char_sequence="iiQQ") + camera_id = camera_properties[0] + model_id = camera_properties[1] + model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name + width = camera_properties[2] + height = camera_properties[3] + num_params = CAMERA_MODEL_IDS[model_id].num_params + params = read_next_bytes(fid, num_bytes=8*num_params, + format_char_sequence="d"*num_params) + cameras[camera_id] = Camera(id=camera_id, + model=model_name, + width=width, + height=height, + params=np.array(params)) + assert len(cameras) == num_cameras + return cameras + + +def read_extrinsics_text(path): + """ + Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py + """ + images = {} + poses = [] + with open(path, "r") as fid: + while True: + line = fid.readline() + if not line: + break + line = line.strip() + if len(line) > 0 and line[0] != "#": + elems = line.split() + image_id = int(elems[0]) + qvec = np.array(tuple(map(float, elems[1:5]))) + tvec = np.array(tuple(map(float, elems[5:8]))) + camera_id = int(elems[8]) + image_name = elems[9] + elems = fid.readline().split() + xys = np.column_stack([tuple(map(float, elems[0::3])), + tuple(map(float, elems[1::3]))]) + point3D_ids = np.array(tuple(map(int, elems[2::3]))) + images[image_id] = Image( + id=image_id, qvec=qvec, tvec=tvec, + camera_id=camera_id, name=image_name, + xys=xys, point3D_ids=point3D_ids) + poses.append(elems[1:8]) + return images + + +def read_colmap_bin_array(path): + """ + Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py + + :param path: path to the colmap binary file. + :return: nd array with the floating point values in the value + """ + with open(path, "rb") as fid: + width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1, + usecols=(0, 1, 2), dtype=int) + fid.seek(0) + num_delimiter = 0 + byte = fid.read(1) + while True: + if byte == b"&": + num_delimiter += 1 + if num_delimiter >= 3: + break + byte = fid.read(1) + array = np.fromfile(fid, np.float32) + array = array.reshape((width, height, channels), order="F") + return np.transpose(array, (1, 0, 2)).squeeze() diff --git a/scene/dataset_readers.py b/scene/dataset_readers.py new file mode 100755 index 0000000000000000000000000000000000000000..c43d3b77b181cfce205bdde9b5de2db914d19185 --- /dev/null +++ b/scene/dataset_readers.py @@ -0,0 +1,475 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import os +import sys +from PIL import Image +from typing import NamedTuple +from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ + read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text +from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal, to_open3d_point_cloud +import numpy as np +import json +from pathlib import Path +from plyfile import PlyData, PlyElement +from utils.sh_utils import SH2RGB +from scene.gaussian_model import BasicPointCloud +from utils.vo_eval import file_interface +import torch +from utils.pose_utils import quad2rotation + +class CameraInfo(NamedTuple): + uid: int + intr: object + R: np.array + T: np.array + FovY: np.array + FovX: np.array + image: np.array + image_path: str + image_name: str + width: int + height: int + dynamic_mask: np.array + enlarged_dynamic_mask: np.array + conf_map: np.array + depth_map: np.array + dyna_avg_map: np.array + dyna_max_map: np.array + original_pose: np.array + gt_dynamic_mask: np.array + +class SceneInfo(NamedTuple): + point_cloud: BasicPointCloud + train_cameras: list + test_cameras: list + nerf_normalization: dict + ply_path: str + train_poses: list + test_poses: list + +def getNerfppNorm(cam_info): + def get_center_and_diag(cam_centers): + cam_centers = np.hstack(cam_centers) + avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) + center = avg_cam_center + dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) + diagonal = np.max(dist) + return center.flatten(), diagonal + + cam_centers = [] + + for cam in cam_info: + W2C = getWorld2View2(cam.R, cam.T) + C2W = np.linalg.inv(W2C) + cam_centers.append(C2W[:3, 3:4]) + + center, diagonal = get_center_and_diag(cam_centers) + radius = diagonal * 1.1 + + translate = -center + + return {"translate": translate, "radius": radius} + +def tumpose_to_c2w(tum_pose): + """ + Convert a TUM pose (translation and quaternion) back to a camera-to-world matrix (4x4) in CUDA mode. + + input: tum_pose - 7-element array: [x, y, z, qw, qx, qy, qz] + output: c2w - 4x4 camera-to-world matrix + """ + # Extract translation and quaternion from the TUM pose + xyz = tum_pose[:3] + + # the order should be qx qy qz qw + qw, qx, qy, qz = tum_pose[3:] + quat = torch.tensor([qx, qy, qz, qw]) + + # Convert quaternion to rotation matrix using PyTorch3D + R = quad2rotation(quat.unsqueeze(0)).squeeze(0).numpy() # 3x3 rotation matrix + + # Create the 4x4 camera-to-world matrix + c2w = np.eye(4) + c2w[:3, :3] = R # Rotation part + c2w[:3, 3] = xyz # Translation part + + return c2w + + +def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder, eval, opt): + + cam_infos = [] + poses = [] + original_poses=[] + extrinsics_path = os.path.join(os.path.dirname(images_folder), "pred_traj.txt") + + traj = file_interface.read_tum_trajectory_file(extrinsics_path) + xyz = traj.positions_xyz + quat = traj.orientations_quat_wxyz + timestamps_mat = traj.timestamps + traj_tum = np.column_stack((xyz, quat)) + for i in range(traj_tum.shape[0]): + pose = tumpose_to_c2w(traj_tum[i]) + original_poses.append(pose) + + for idx, key in enumerate(cam_extrinsics): + sys.stdout.write('\r') + # the exact output you're looking for: + sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) + sys.stdout.flush() + + + extr = cam_extrinsics[key] + intr = cam_intrinsics[extr.camera_id] + uid = intr.id + + height = intr.height + width = intr.width + R = np.transpose(qvec2rotmat(extr.qvec)) + T = np.array(extr.tvec) + pose = np.vstack((np.hstack((R, T.reshape(3,-1))),np.array([[0, 0, 0, 1]]))) + poses.append(pose) + + if intr.model=="SIMPLE_PINHOLE": + focal_length_x = intr.params[0] + FovY = focal2fov(focal_length_x, height) + FovX = focal2fov(focal_length_x, width) + elif intr.model=="PINHOLE": + focal_length_x = intr.params[0] + focal_length_y = intr.params[1] + FovY = focal2fov(focal_length_y, height) + FovX = focal2fov(focal_length_x, width) + else: + assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" + + + image_path = os.path.join(images_folder, os.path.basename(extr.name)) + image_name = os.path.basename(image_path).split(".")[0] + image = Image.open(image_path) + + seq_name = image_path.split("/")[-3] + idx_str = image_path.split("/")[-1].split(".")[0].split("_")[-1] + seq_path = '/'.join(image_path.split("/")[:-2]) + intrinsics_path = os.path.join(seq_path, "pred_intrinsics.txt") + conf_path = os.path.join(seq_path, "confidence_maps", f"conf_{idx_str}.npy") + depth_path = os.path.join(seq_path, "depth_maps", f"frame_{idx_str}.npy") + dyna_avg_path = os.path.join(seq_path, "dyna_avg", f"dyna_avg_{idx_str}.npy") + dyna_max_path = os.path.join(seq_path, "dyna_max", f"dyna_max_{idx_str}.npy") + + dynamic_mask_path = os.path.join(seq_path, "dynamic_masks", f"dynamic_mask_{idx_str}.png") + enlarged_dynamic_mask_path = os.path.join(seq_path, "enlarged_dynamic_masks", f"enlarged_dynamic_mask_{idx_str}.png") + + if opt.dataset == 'sintel': + gt_dynamic_mask_path = os.path.join(opt.gt_dynamic_mask, seq_name, f"frame_{int(idx_str)+1:04d}.png") + elif opt.dataset == 'davis': + gt_dynamic_mask_path = os.path.join(opt.gt_dynamic_mask, seq_name, f"{int(idx_str):05d}.png") + + try: + conf_map = np.load(conf_path) + except: + conf_map = None + + try: + K_flattened = np.loadtxt(intrinsics_path, dtype=np.float32) + K = K_flattened.reshape(-1, 3, 3) + K = K[int(idx_str)] + except: + K = None + + try: + depth_map = np.load(depth_path) + except: + depth_map = None + + try: + dyna_avg_map = np.load(dyna_avg_path) + except: + dyna_avg_map = None + try: + dyna_max_map = np.load(dyna_max_path) + except: + dyna_max_map = None + try: + dynamic_mask = np.array(Image.open(dynamic_mask_path)) / 255.0 > 0.5 + except: + dynamic_mask = None + try: + enlarged_dynamic_mask = np.array(Image.open(enlarged_dynamic_mask_path)) / 255.0 > 0.5 + except: + enlarged_dynamic_mask = None + + try: + if opt.dataset == 'davis': + gt_dynamic_mask = np.array(Image.open(gt_dynamic_mask_path)) > 0.5 + else: + gt_dynamic_mask = np.array(Image.open(gt_dynamic_mask_path)) / 255.0 > 0.5 + except: + gt_dynamic_mask = None + + # original_pose = None + original_pose = original_poses[int(idx_str)] + + + + cam_info = CameraInfo(uid=uid, intr = intr, R=R, T=T, original_pose = original_pose, FovY=FovY, FovX=FovX, image=image, conf_map=conf_map, depth_map=depth_map, + image_path=image_path, image_name=image_name, width=width, height=height, dynamic_mask = dynamic_mask, enlarged_dynamic_mask = enlarged_dynamic_mask, dyna_avg_map=dyna_avg_map, dyna_max_map=dyna_max_map, gt_dynamic_mask=gt_dynamic_mask) + + cam_infos.append(cam_info) + sys.stdout.write('\n') + return cam_infos, poses + +# For interpolated video, open when only render interpolated video +def readColmapCamerasInterp(cam_extrinsics, cam_intrinsics, images_folder, model_path): + + pose_interpolated_path = model_path + 'pose/pose_interpolated.npy' + pose_interpolated = np.load(pose_interpolated_path) + intr = cam_intrinsics[1] + + cam_infos = [] + poses=[] + for idx, pose_npy in enumerate(pose_interpolated): + sys.stdout.write('\r') + sys.stdout.write("Reading camera {}/{}".format(idx+1, pose_interpolated.shape[0])) + sys.stdout.flush() + + extr = pose_npy + intr = intr + height = intr.height + width = intr.width + + uid = idx + R = extr[:3, :3].transpose() + T = extr[:3, 3] + pose = np.vstack((np.hstack((R, T.reshape(3,-1))),np.array([[0, 0, 0, 1]]))) + # print(uid) + # print(pose.shape) + # pose = np.linalg.inv(pose) + poses.append(pose) + if intr.model=="SIMPLE_PINHOLE": + focal_length_x = intr.params[0] + FovY = focal2fov(focal_length_x, height) + FovX = focal2fov(focal_length_x, width) + elif intr.model=="PINHOLE": + focal_length_x = intr.params[0] + focal_length_y = intr.params[1] + FovY = focal2fov(focal_length_y, height) + FovX = focal2fov(focal_length_x, width) + else: + assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" + + images_list = os.listdir(os.path.join(images_folder)) + image_name_0 = images_list[0] + image_name = str(idx).zfill(4) + image = Image.open(images_folder + '/' + image_name_0) + + cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, + image_path=images_folder, image_name=image_name, width=width, height=height, + dynamic_mask = None, enlarged_dynamic_mask = None, + intr=None, conf_map=None, depth_map=None, dyna_avg_map=None, dyna_max_map=None, gt_dynamic_mask=None, original_pose=None) + cam_infos.append(cam_info) + + sys.stdout.write('\n') + return cam_infos, poses + + +def fetchPly(path): + plydata = PlyData.read(path) + vertices = plydata['vertex'] + positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T + colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 + normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T + return BasicPointCloud(points=positions, colors=colors, normals=normals) + +def storePly(path, xyz, rgb): + # Define the dtype for the structured array + dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), + ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), + ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] + + normals = np.zeros_like(xyz) + + elements = np.empty(xyz.shape[0], dtype=dtype) + attributes = np.concatenate((xyz, normals, rgb), axis=1) + elements[:] = list(map(tuple, attributes)) + + # Create the PlyData object and write to file + vertex_element = PlyElement.describe(elements, 'vertex') + ply_data = PlyData([vertex_element]) + ply_data.write(path) + +def readColmapSceneInfo(path, images, eval, args, opt, llffhold=2): + # try: + # cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") + # cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") + # cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) + # cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) + # except: + + ##### For initializing test pose using PCD_Registration + + cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") + + cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") + cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) + cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) + + reading_dir = "images" if images == None else images + + if opt.get_video: + cam_infos_unsorted, poses = readColmapCamerasInterp(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir), model_path=args.model_path) + else: + cam_infos_unsorted, poses = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir), eval=eval, opt=opt) + sorting_indices = sorted(range(len(cam_infos_unsorted)), key=lambda x: cam_infos_unsorted[x].image_name) + cam_infos = [cam_infos_unsorted[i] for i in sorting_indices] + sorted_poses = [poses[i] for i in sorting_indices] + cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) + + + + if eval: + # train_cam_infos = [c for idx, c in enumerate(cam_infos) if (idx+1) % llffhold != 0] + # test_cam_infos = [c for idx, c in enumerate(cam_infos) if (idx+1) % llffhold == 0] + # train_poses = [c for idx, c in enumerate(sorted_poses) if (idx+1) % llffhold != 0] + # test_poses = [c for idx, c in enumerate(sorted_poses) if (idx+1) % llffhold == 0] + num_cams = len(cam_infos) + offset = 5 + test_cam_infos = [c for idx, c in enumerate(cam_infos) if (idx + offset) % 10 == 0] + train_cam_infos = [c for idx, c in enumerate(cam_infos) if (idx + offset) % 10 != 0] + train_poses = [c for idx, c in enumerate(sorted_poses) if (idx + offset) % 10 != 0] + test_poses = [c for idx, c in enumerate(sorted_poses) if (idx + offset) % 10 == 0] + + + else: + train_cam_infos = cam_infos + test_cam_infos = [] + train_poses = sorted_poses + test_poses = [] + + nerf_normalization = getNerfppNorm(train_cam_infos) + + try: + ply_path = os.path.join(path, "sparse/0/points3D.ply") + bin_path = os.path.join(path, "sparse/0/points3D.bin") + txt_path = os.path.join(path, "sparse/0/points3D.txt") + if not os.path.exists(ply_path): + print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") + try: + xyz, rgb, _ = read_points3D_binary(bin_path) + except: + xyz, rgb, _ = read_points3D_text(txt_path) + storePly(ply_path, xyz, rgb) + try: + pcd = fetchPly(ply_path) + except: + pcd = None + except: + pcd = None + ply_path = None + + # Create an Open3D point cloud object + # o3d.visualization.draw_geometries([to_open3d_point_cloud(pcd)]) + # np.save("poses_family.npy", sorted_poses) + # breakpoint() + # np.save("3dpoints.npy", pcd.points) + # np.save("3dcolors.npy", pcd.colors) + + + scene_info = SceneInfo(point_cloud=pcd, + train_cameras=train_cam_infos, + test_cameras=test_cam_infos, + nerf_normalization=nerf_normalization, + ply_path=ply_path, + train_poses=train_poses, + test_poses=test_poses) + return scene_info + +def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): + cam_infos = [] + + with open(os.path.join(path, transformsfile)) as json_file: + contents = json.load(json_file) + fovx = contents["camera_angle_x"] + + frames = contents["frames"] + for idx, frame in enumerate(frames): + cam_name = os.path.join(path, frame["file_path"] + extension) + + # NeRF 'transform_matrix' is a camera-to-world transform + c2w = np.array(frame["transform_matrix"]) + # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) + c2w[:3, 1:3] *= -1 + + # get the world-to-camera transform and set R, T + w2c = np.linalg.inv(c2w) + R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code + T = w2c[:3, 3] + + image_path = os.path.join(path, cam_name) + image_name = Path(cam_name).stem + image = Image.open(image_path) + + im_data = np.array(image.convert("RGBA")) + + bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) + + norm_data = im_data / 255.0 + arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) + image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") + + fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) + FovY = fovy + FovX = fovx + + cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, + image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1])) + + return cam_infos + +def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): + print("Reading Training Transforms") + train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) + print("Reading Test Transforms") + test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) + + if not eval: + train_cam_infos.extend(test_cam_infos) + test_cam_infos = [] + + nerf_normalization = getNerfppNorm(train_cam_infos) + + ply_path = os.path.join(path, "points3d.ply") + if not os.path.exists(ply_path): + # Since this data set has no colmap data, we start with random points + num_pts = 100_000 + print(f"Generating random point cloud ({num_pts})...") + + # We create random points inside the bounds of the synthetic Blender scenes + xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 + shs = np.random.random((num_pts, 3)) / 255.0 + pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) + + storePly(ply_path, xyz, SH2RGB(shs) * 255) + try: + pcd = fetchPly(ply_path) + except: + pcd = None + + scene_info = SceneInfo(point_cloud=pcd, + train_cameras=train_cam_infos, + test_cameras=test_cam_infos, + nerf_normalization=nerf_normalization, + ply_path=ply_path) + return scene_info + +sceneLoadTypeCallbacks = { + "Colmap": readColmapSceneInfo, + "Blender" : readNerfSyntheticInfo +} \ No newline at end of file diff --git a/scene/gaussian_model.py b/scene/gaussian_model.py new file mode 100755 index 0000000000000000000000000000000000000000..7326eaad432e9f7012779b913e25d565df466a8f --- /dev/null +++ b/scene/gaussian_model.py @@ -0,0 +1,659 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +import torch +# from lietorch import SO3, SE3, Sim3, LieGroupParameter +import numpy as np +from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation +from torch import nn +import os +from utils.system_utils import mkdir_p +from plyfile import PlyData, PlyElement +from utils.sh_utils import RGB2SH +from simple_knn._C import distCUDA2 +from utils.graphics_utils import BasicPointCloud +from utils.general_utils import strip_symmetric, build_scaling_rotation +from scipy.spatial.transform import Rotation as R +from utils.pose_utils import rotation2quad, get_tensor_from_camera +from utils.graphics_utils import getWorld2View2 +from utils.pose_utils import rotation2quad, get_tensor_from_camera, depth_to_pts3d + +class GaussianModel: + + def setup_functions(self): + def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): + L = build_scaling_rotation(scaling_modifier * scaling, rotation) + actual_covariance = L @ L.transpose(1, 2) + symm = strip_symmetric(actual_covariance) + return symm + + self.scaling_activation = torch.exp + self.scaling_inverse_activation = torch.log + + self.covariance_activation = build_covariance_from_scaling_rotation + + self.opacity_activation = torch.sigmoid + self.inverse_opacity_activation = inverse_sigmoid + + self.rotation_activation = torch.nn.functional.normalize + self.enable_test = True + + def __init__(self, sh_degree : int): + self.active_sh_degree = 0 + self.max_sh_degree = sh_degree + self._xyz = torch.empty(0) + self._features_dc = torch.empty(0) + self._features_rest = torch.empty(0) + self._scaling = torch.empty(0) + self._rotation = torch.empty(0) + self._opacity = torch.empty(0) + self.max_radii2D = torch.empty(0) + self.xyz_gradient_accum = torch.empty(0) + self.denom = torch.empty(0) + self.optimizer = None + self.percent_dense = 0 + self.spatial_lr_scale = 0 + self.setup_functions() + + def capture(self): + return ( + self.active_sh_degree, + self._xyz, + self._features_dc, + self._features_rest, + self._scaling, + self._rotation, + self._opacity, + self.max_radii2D, + self.xyz_gradient_accum, + self.denom, + self.optimizer.state_dict(), + self.spatial_lr_scale, + self.Q, + self.T, + ) + + def restore(self, model_args, training_args): + (self.active_sh_degree, + self._xyz, + self._features_dc, + self._features_rest, + self._scaling, + self._rotation, + self._opacity, + self.max_radii2D, + xyz_gradient_accum, + denom, + opt_dict, + self.spatial_lr_scale, + self.Q, self.T) = model_args + self.training_setup(training_args) + self.xyz_gradient_accum = xyz_gradient_accum + self.denom = denom + self.optimizer.load_state_dict(opt_dict) + + @property + def get_scaling(self): + return self.scaling_activation(self._scaling) + + @property + def get_rotation(self): + return self.rotation_activation(self._rotation) + + @property + def get_xyz(self): + return self._xyz + + def compute_relative_world_to_camera(self, R1, t1, R2, t2): + # Create a row of zeros with a one at the end, for homogeneous coordinates + zero_row = np.array([[0, 0, 0, 1]], dtype=np.float32) + + # Compute the inverse of the first extrinsic matrix + E1_inv = np.hstack([R1.T, -R1.T @ t1.reshape(-1, 1)]) # Transpose and reshape for correct dimensions + E1_inv = np.vstack([E1_inv, zero_row]) # Append the zero_row to make it a 4x4 matrix + + # Compute the second extrinsic matrix + E2 = np.hstack([R2, -R2 @ t2.reshape(-1, 1)]) # No need to transpose R2 + E2 = np.vstack([E2, zero_row]) # Append the zero_row to make it a 4x4 matrix + + # Compute the relative transformation + E_rel = E2 @ E1_inv + + return E_rel + + def init_test_RT_seq(self, cam_list): + if len(cam_list[1.0]) == 0: + self.enable_test = False + return + quats =[] + trans = [] + for cam in cam_list[1.0]: + pose = get_tensor_from_camera(cam.world_view_transform.transpose(0, 1)) # R T -> quat t + quat = pose[:4] + tran = pose[4:] + quats.append(quat) + trans.append(tran) + quats = torch.stack(quats) + trans = torch.stack(trans) + self.test_Q = quats.cuda().requires_grad_(True) + self.test_T = trans.cuda().requires_grad_(True) + + def init_RT_seq(self, cam_list): + quats =[] + trans = [] + for cam in cam_list[1.0]: + pose = get_tensor_from_camera(cam.world_view_transform.transpose(0, 1)) # R T -> quat t + quat = pose[:4] + tran = pose[4:] + quats.append(quat) + trans.append(tran) + quats = torch.stack(quats) + trans = torch.stack(trans) + self.Q = quats.cuda().requires_grad_(True) + self.T = trans.cuda().requires_grad_(True) + + def init_fov(self, cam_list): + cam = cam_list[1.0][0] + self.FoVx = torch.tensor(cam.FoVx).cuda().requires_grad_(True) + self.FoVy = torch.tensor(cam.FoVy).cuda().requires_grad_(True) + + + + def get_RT(self, idx): + quat = self.Q[idx] + tran = self.T[idx] + pose = torch.cat((quat, tran), dim=0) + return pose + + def get_P(self): + pose = torch.cat((self.Q, self.T), dim=1) + return pose + + def get_RT_test(self, idx): + quat = self.test_Q[idx] + tran = self.test_T[idx] + pose = torch.cat((quat, tran), dim=0) + return pose + + @property + def get_features(self): + features_dc = self._features_dc + features_rest = self._features_rest + return torch.cat((features_dc, features_rest), dim=1) + + @property + def get_opacity(self): + return self.opacity_activation(self._opacity) + + def get_covariance(self, scaling_modifier = 1): + return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) + + def oneupSHdegree(self): + if self.active_sh_degree < self.max_sh_degree: + self.active_sh_degree += 1 + + def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): + self.spatial_lr_scale = spatial_lr_scale + fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda() + fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda()) + features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda() + features[:, :3, 0 ] = fused_color + features[:, 3:, 1:] = 0.0 + + print("Number of points at initialisation : ", fused_point_cloud.shape[0]) + + dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001) + scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3) + rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda") + rots[:, 0] = 1 + + opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda")) + + self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True)) + self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True)) + self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True)) + self._scaling = nn.Parameter(scales.requires_grad_(True)) + self._rotation = nn.Parameter(rots.requires_grad_(True)) + self._opacity = nn.Parameter(opacities.requires_grad_(True)) + self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") + + def training_setup(self, training_args): + self.percent_dense = training_args.percent_dense + self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") + self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") + + conf_lr_init = 3e-3 + conf_lr_final = 3e-4 + + l = [ + {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"}, + {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"}, + {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"}, + {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"}, + {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"}, + {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}, + {'params': [self._conf_static], 'lr': conf_lr_init, "name": "conf_static"}, + ] + + cam_lr_init_Q = 0.00003 + cam_lr_final_Q = 0.000003 + cam_lr_init_T = 0.00003 + cam_lr_final_T = 0.000003 + l_cam = [ + {'params': [self.Q],'lr': cam_lr_init_Q, "name": "pose_Q"}, + {'params': [self.T],'lr': cam_lr_init_T, "name": "pose_T"}, + {'params': [self.FoVx],'lr': 0.0001, "name": "fovX"}, + {'params': [self.FoVy],'lr': 0.0001, "name": "fovY"} + ] + # l_cam = [{'params': [self.P],'lr': training_args.rotation_lr, "name": "pose"},] + + + self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15) + + self.optimizer_cam = torch.optim.Adam(l_cam, lr=0.0, eps=1e-15) + + if self.enable_test: + l_cam_test = [ + {'params': [self.test_Q],'lr': cam_lr_init_Q, "name": "test_pose_Q"}, + {'params': [self.test_T],'lr': cam_lr_init_T, "name": "test_pose_T"}, + ] + self.optimizer_cam_test = torch.optim.Adam(l_cam_test, lr=0.0, eps=1e-15) + + + self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale, + lr_final=training_args.position_lr_final*self.spatial_lr_scale, + lr_delay_mult=training_args.position_lr_delay_mult, + max_steps=training_args.position_lr_max_steps) + self.Q_scheduler_args = get_expon_lr_func( + # lr_init=0, + # lr_final=0, + lr_init=cam_lr_init_Q, + lr_final=cam_lr_final_Q, + # lr_init=training_args.position_lr_init*self.spatial_lr_scale*10, + # lr_final=training_args.position_lr_final*self.spatial_lr_scale*10, + lr_delay_mult=training_args.position_lr_delay_mult, + max_steps=1000) + + self.T_scheduler_args = get_expon_lr_func( + # lr_init=0, + # lr_final=0, + lr_init=cam_lr_init_T, + lr_final=cam_lr_final_T, + # lr_init=training_args.position_lr_init*self.spatial_lr_scale*10, + # lr_final=training_args.position_lr_final*self.spatial_lr_scale*10, + lr_delay_mult=training_args.position_lr_delay_mult, + max_steps=1000) + + self.conf_static_scheduler_args = get_expon_lr_func( + lr_init=conf_lr_init, + lr_final=conf_lr_final, + lr_delay_mult=training_args.position_lr_delay_mult, + max_steps=training_args.iterations) + + def update_learning_rate(self, iteration): + ''' Learning rate scheduling per step ''' + for param_group in self.optimizer_cam.param_groups: + if param_group["name"] == "pose_Q": + lr = self.Q_scheduler_args(iteration) + param_group['lr'] = lr + if param_group["name"] == "pose_T": + lr = self.T_scheduler_args(iteration) + param_group['lr'] = lr + if param_group["name"] == "test_pose_Q": + lr = self.Q_scheduler_args(iteration) + param_group['lr'] = lr + if param_group["name"] == "test_pose_T": + lr = self.T_scheduler_args(iteration) + param_group['lr'] = lr + + for param_group in self.optimizer.param_groups: + if param_group["name"] == "xyz": + lr = self.xyz_scheduler_args(iteration) + param_group['lr'] = lr + if param_group["name"] == "conf_static" or param_group["name"] == "conf": + lr = self.conf_static_scheduler_args(iteration) + param_group['lr'] = lr + # return lr + + def construct_list_of_attributes(self): + l = ['x', 'y', 'z', 'nx', 'ny', 'nz'] + # All channels except the 3 DC + for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]): + l.append('f_dc_{}'.format(i)) + for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]): + l.append('f_rest_{}'.format(i)) + l.append('opacity_ori') + l.append('opacity') + l.append('conf_static') + for i in range(self._scaling.shape[1]): + l.append('scale_{}'.format(i)) + for i in range(self._rotation.shape[1]): + l.append('rot_{}'.format(i)) + return l + + def save_ply(self, path): + mkdir_p(os.path.dirname(path)) + + xyz = self._xyz.detach().cpu().numpy() + normals = np.zeros_like(xyz) + f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() + f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() + + opacities = self.opacity_activation(self._opacity) * self._conf_static.reshape(-1, 1)[self.aggregated_mask] + opacities = self.inverse_opacity_activation(opacities).detach().cpu().numpy() + + opacities_ori = self._opacity.detach().cpu().numpy() + + scale = self._scaling.detach().cpu().numpy() + rotation = self._rotation.detach().cpu().numpy() + conf_static = self._conf_static.reshape(-1, 1)[self.aggregated_mask].detach().cpu().numpy() + + dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()] + elements = np.empty(xyz.shape[0], dtype=dtype_full) + attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities_ori, opacities, conf_static, scale, rotation), axis=1) + elements[:] = list(map(tuple, attributes)) + el = PlyElement.describe(elements, 'vertex') + PlyData([el]).write(path) + + def reset_opacity(self): + opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01)) + optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity") + self._opacity = optimizable_tensors["opacity"] + + def load_ply(self, path): + plydata = PlyData.read(path) + + xyz = np.stack((np.asarray(plydata.elements[0]["x"]), + np.asarray(plydata.elements[0]["y"]), + np.asarray(plydata.elements[0]["z"])), axis=1) + opacities_ori = np.asarray(plydata.elements[0]["opacity_ori"])[..., np.newaxis] + opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis] + + opacities = opacities_ori # for dynamic-aware rendering + conf_static = np.asarray(plydata.elements[0]["conf_static"])[..., np.newaxis] + + + features_dc = np.zeros((xyz.shape[0], 3, 1)) + features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"]) + features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"]) + features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"]) + + extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")] + extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) + assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3 + features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) + for idx, attr_name in enumerate(extra_f_names): + features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name]) + # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) + features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)) + + scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")] + scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) + scales = np.zeros((xyz.shape[0], len(scale_names))) + for idx, attr_name in enumerate(scale_names): + scales[:, idx] = np.asarray(plydata.elements[0][attr_name]) + + rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")] + rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) + rots = np.zeros((xyz.shape[0], len(rot_names))) + for idx, attr_name in enumerate(rot_names): + rots[:, idx] = np.asarray(plydata.elements[0][attr_name]) + + self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True)) + self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) + self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) + self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True)) + self._conf_static = nn.Parameter(torch.tensor(conf_static, dtype=torch.float, device="cuda").requires_grad_(True)) + self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True)) + self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True)) + + self.active_sh_degree = self.max_sh_degree + + def replace_tensor_to_optimizer(self, tensor, name): + optimizable_tensors = {} + for group in self.optimizer.param_groups: + if group["name"] == name: + # breakpoint() + stored_state = self.optimizer.state.get(group['params'][0], None) + stored_state["exp_avg"] = torch.zeros_like(tensor) + stored_state["exp_avg_sq"] = torch.zeros_like(tensor) + + del self.optimizer.state[group['params'][0]] + group["params"][0] = nn.Parameter(tensor.requires_grad_(True)) + self.optimizer.state[group['params'][0]] = stored_state + + optimizable_tensors[group["name"]] = group["params"][0] + return optimizable_tensors + + def _prune_optimizer(self, mask): + optimizable_tensors = {} + for group in self.optimizer.param_groups: + stored_state = self.optimizer.state.get(group['params'][0], None) + if stored_state is not None: + stored_state["exp_avg"] = stored_state["exp_avg"][mask] + stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask] + + del self.optimizer.state[group['params'][0]] + group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True))) + self.optimizer.state[group['params'][0]] = stored_state + + optimizable_tensors[group["name"]] = group["params"][0] + else: + group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True)) + optimizable_tensors[group["name"]] = group["params"][0] + return optimizable_tensors + + def prune_points(self, mask): + valid_points_mask = ~mask + optimizable_tensors = self._prune_optimizer(valid_points_mask) + + self._xyz = optimizable_tensors["xyz"] + self._features_dc = optimizable_tensors["f_dc"] + self._features_rest = optimizable_tensors["f_rest"] + self._opacity = optimizable_tensors["opacity"] + self._scaling = optimizable_tensors["scaling"] + self._rotation = optimizable_tensors["rotation"] + + self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] + + self.denom = self.denom[valid_points_mask] + self.max_radii2D = self.max_radii2D[valid_points_mask] + + def cat_tensors_to_optimizer(self, tensors_dict): + optimizable_tensors = {} + for group in self.optimizer.param_groups: + assert len(group["params"]) == 1 + extension_tensor = tensors_dict[group["name"]] + stored_state = self.optimizer.state.get(group['params'][0], None) + if stored_state is not None: + + stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0) + stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0) + + del self.optimizer.state[group['params'][0]] + group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) + self.optimizer.state[group['params'][0]] = stored_state + + optimizable_tensors[group["name"]] = group["params"][0] + else: + group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) + optimizable_tensors[group["name"]] = group["params"][0] + + return optimizable_tensors + + def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation): + d = {"xyz": new_xyz, + "f_dc": new_features_dc, + "f_rest": new_features_rest, + "opacity": new_opacities, + "scaling" : new_scaling, + "rotation" : new_rotation} + + optimizable_tensors = self.cat_tensors_to_optimizer(d) + self._xyz = optimizable_tensors["xyz"] + self._features_dc = optimizable_tensors["f_dc"] + self._features_rest = optimizable_tensors["f_rest"] + self._opacity = optimizable_tensors["opacity"] + self._scaling = optimizable_tensors["scaling"] + self._rotation = optimizable_tensors["rotation"] + + self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") + self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") + self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") + + def densify_and_split(self, grads, grad_threshold, scene_extent, N=2): + n_init_points = self.get_xyz.shape[0] + # Extract points that satisfy the gradient condition + padded_grad = torch.zeros((n_init_points), device="cuda") + padded_grad[:grads.shape[0]] = grads.squeeze() + selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False) + selected_pts_mask = torch.logical_and(selected_pts_mask, + torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent) + + stds = self.get_scaling[selected_pts_mask].repeat(N,1) + means =torch.zeros((stds.size(0), 3),device="cuda") + samples = torch.normal(mean=means, std=stds) + rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1) + new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1) + new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N)) + new_rotation = self._rotation[selected_pts_mask].repeat(N,1) + new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1) + new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1) + new_opacity = self._opacity[selected_pts_mask].repeat(N,1) + + self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation) + + prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool))) + self.prune_points(prune_filter) + + def densify_and_clone(self, grads, grad_threshold, scene_extent): + # Extract points that satisfy the gradient condition + selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False) + selected_pts_mask = torch.logical_and(selected_pts_mask, + torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent) + + new_xyz = self._xyz[selected_pts_mask] + new_features_dc = self._features_dc[selected_pts_mask] + new_features_rest = self._features_rest[selected_pts_mask] + new_opacities = self._opacity[selected_pts_mask] + new_scaling = self._scaling[selected_pts_mask] + new_rotation = self._rotation[selected_pts_mask] + + self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation) + + def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size): + grads = self.xyz_gradient_accum / self.denom + grads[grads.isnan()] = 0.0 + + # self.densify_and_clone(grads, max_grad, extent) + # self.densify_and_split(grads, max_grad, extent) + + prune_mask = (self.get_opacity < min_opacity).squeeze() + if max_screen_size: + big_points_vs = self.max_radii2D > max_screen_size + big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent + prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws) + self.prune_points(prune_mask) + + torch.cuda.empty_cache() + + def add_densification_stats(self, viewspace_point_tensor, update_filter): + self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True) + self.denom[update_filter] += 1 + + + def create_from_cameras(self, train_cameras, spatial_lr_scale : float, conf_thre = 1.0): + self.spatial_lr_scale = spatial_lr_scale + poses = [] + confidences = [] + dynamic_masks = [] + dyna_avg = [] + rgbs = [] + depth_maps = [] + K = [] + camera0 = train_cameras[1.0][0] + W = camera0.image_width + H = camera0.image_height + + for camera in train_cameras[1.0]: + camera.uid + intr = camera.intr + focal_length_x = intr.params[0] + focal_length_y = intr.params[1] + height = intr.height + width = intr.width + intr = torch.tensor([[focal_length_x, 0, width / 2], + [0, focal_length_y, height / 2], + [0, 0, 1]], device="cuda") + K.append(intr) + poses.append(camera.original_pose) + depth_maps.append(camera.depth_map) + confidences.append(camera.conf_map) + dynamic_masks.append(camera.dynamic_mask) + dyna_avg.append(camera.dyna_avg_map) + rgbs.append(camera.original_image) + + K = torch.stack(K) + rgbs = torch.stack(rgbs) + depth_maps = torch.stack(depth_maps) + confidences = torch.stack(confidences) + dynamic_masks = torch.stack(dynamic_masks) + dyna_avg = torch.stack(dyna_avg) + poses = torch.stack(poses).cuda() + + + + + p3d = depth_to_pts3d(K, poses, W, H, depth_maps).float() + p3d_color = rgbs.permute(0,2,3,1).reshape(-1, 3) + pts_4_3dgs = p3d.reshape(-1, 3) + + dyna = dyna_avg + conf_static = 1 - torch.tensor(dyna) + # confidences = conf_static * confidences + confidence = torch.tensor(confidences).reshape(-1) + + confidence_masks = confidence > torch.tensor(conf_thre).log() + print(f"Ratio of confidence masks: {confidence_masks.float().mean().item():.4f}") + self.aggregated_mask = confidence_masks + print(f"Ratio of aggreagted masks: {self.aggregated_mask.float().mean().item():.4f}") + print(f"Number of points before: {pts_4_3dgs.shape[0]}") + pts_4_3dgs = pts_4_3dgs[self.aggregated_mask] + color_4_3dgs = p3d_color.reshape(-1, 3)[self.aggregated_mask] + + + fused_point_cloud = pts_4_3dgs + fused_color = RGB2SH(color_4_3dgs) + features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda() + features[:, :3, 0 ] = fused_color + features[:, 3:, 1:] = 0.0 + + print("Number of points at initialisation : ", fused_point_cloud.shape[0]) + + dist2 = torch.clamp_min(distCUDA2(pts_4_3dgs), 0.0000001) + scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3) + rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda") + rots[:, 0] = 1 + + opa = 1/len(train_cameras[1.0]) + opacities = inverse_sigmoid(opa * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda")) + # opacities = inverse_sigmoid(conf_static_4_3dgs.reshape(-1, 1)) + + + self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True)) + self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(False)) + self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(False)) + self._scaling = nn.Parameter(scales.requires_grad_(False)) + self._rotation = nn.Parameter(rots.requires_grad_(False)) + self._opacity = nn.Parameter(opacities.requires_grad_(False)) + self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") + + self._conf_static = nn.Parameter(conf_static.requires_grad_(True)) diff --git a/scene/utils.py b/scene/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa328524fcb5dc345868d46422ba321f2ad9439 --- /dev/null +++ b/scene/utils.py @@ -0,0 +1 @@ +import torch diff --git a/scene/vo_eval.py b/scene/vo_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..96ce0187c5ccd5223d3440539b8527a5745fd6e5 --- /dev/null +++ b/scene/vo_eval.py @@ -0,0 +1,335 @@ +import os +import re +from copy import deepcopy +from pathlib import Path + +import evo.main_ape as main_ape +import evo.main_rpe as main_rpe +import matplotlib.pyplot as plt +import numpy as np +from evo.core import sync +from evo.core.metrics import PoseRelation, Unit +from evo.core.trajectory import PosePath3D, PoseTrajectory3D +from evo.tools import file_interface, plot +from scipy.spatial.transform import Rotation + + +def sintel_cam_read(filename): + """Read camera data, return (M,N) tuple. + + M is the intrinsic matrix, N is the extrinsic matrix, so that + + x = M*N*X, + with x being a point in homogeneous image pixel coordinates, X being a + point in homogeneous world coordinates. + """ + TAG_FLOAT = 202021.25 + + f = open(filename, "rb") + check = np.fromfile(f, dtype=np.float32, count=1)[0] + assert ( + check == TAG_FLOAT + ), " cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? ".format( + TAG_FLOAT, check + ) + M = np.fromfile(f, dtype="float64", count=9).reshape((3, 3)) + N = np.fromfile(f, dtype="float64", count=12).reshape((3, 4)) + return M, N + + +def load_replica_traj(gt_file): + traj_w_c = np.loadtxt(gt_file) + assert traj_w_c.shape[1] == 12 or traj_w_c.shape[1] == 16 + poses = [ + np.array( + [ + [r[0], r[1], r[2], r[3]], + [r[4], r[5], r[6], r[7]], + [r[8], r[9], r[10], r[11]], + [0, 0, 0, 1], + ] + ) + for r in traj_w_c + ] + + pose_path = PosePath3D(poses_se3=poses) + timestamps_mat = np.arange(traj_w_c.shape[0]).astype(float) + + traj = PoseTrajectory3D(poses_se3=pose_path.poses_se3, timestamps=timestamps_mat) + xyz = traj.positions_xyz + # shift -1 column -> w in back column + # quat = np.roll(traj.orientations_quat_wxyz, -1, axis=1) + # uncomment this line if the quaternion is in scalar-first format + quat = traj.orientations_quat_wxyz + + traj_tum = np.column_stack((xyz, quat)) + return (traj_tum, timestamps_mat) + + +def load_sintel_traj(gt_file): # './data/sintel/training/camdata_left/alley_2' + # Refer to ParticleSfM + gt_pose_lists = sorted(os.listdir(gt_file)) + gt_pose_lists = [os.path.join(gt_file, x) for x in gt_pose_lists if x.endswith(".cam")] + tstamps = [float(x.split("/")[-1][:-4].split("_")[-1]) for x in gt_pose_lists] + gt_poses = [sintel_cam_read(f)[1] for f in gt_pose_lists] # [1] means get the extrinsic + xyzs, wxyzs = [], [] + tum_gt_poses = [] + for gt_pose in gt_poses: + gt_pose = np.concatenate([gt_pose, np.array([[0, 0, 0, 1]])], 0) + gt_pose_inv = np.linalg.inv(gt_pose) # world2cam -> cam2world + xyz = gt_pose_inv[:3, -1] + xyzs.append(xyz) + R = Rotation.from_matrix(gt_pose_inv[:3, :3]) + xyzw = R.as_quat() # scalar-last for scipy + wxyz = np.array([xyzw[-1], xyzw[0], xyzw[1], xyzw[2]]) + wxyzs.append(wxyz) + tum_gt_pose = np.concatenate([xyz, wxyz], 0) #TODO: check if this is correct + tum_gt_poses.append(tum_gt_pose) + + tum_gt_poses = np.stack(tum_gt_poses, 0) + tum_gt_poses[:, :3] = tum_gt_poses[:, :3] - np.mean( + tum_gt_poses[:, :3], 0, keepdims=True + ) + tt = np.expand_dims(np.stack(tstamps, 0), -1) + return tum_gt_poses, tt + + +def load_traj(gt_traj_file, traj_format="sintel", skip=0, stride=1, num_frames=None): + """Read trajectory format. Return in TUM-RGBD format. + Returns: + traj_tum (N, 7): camera to world poses in (x,y,z,qx,qy,qz,qw) + timestamps_mat (N, 1): timestamps + """ + if traj_format == "replica": + traj_tum, timestamps_mat = load_replica_traj(gt_traj_file) + elif traj_format == "sintel": + traj_tum, timestamps_mat = load_sintel_traj(gt_traj_file) + elif traj_format in ["tum", "tartanair"]: + traj = file_interface.read_tum_trajectory_file(gt_traj_file) + xyz = traj.positions_xyz + quat = traj.orientations_quat_wxyz + timestamps_mat = traj.timestamps + traj_tum = np.column_stack((xyz, quat)) + else: + raise NotImplementedError + + traj_tum = traj_tum[skip::stride] + timestamps_mat = timestamps_mat[skip::stride] + if num_frames is not None: + traj_tum = traj_tum[:num_frames] + timestamps_mat = timestamps_mat[:num_frames] + return traj_tum, timestamps_mat + + +def update_timestamps(gt_file, traj_format, skip=0, stride=1): + """Update timestamps given a""" + if traj_format == "tum": + traj_t_map_file = gt_file.replace("groundtruth.txt", "rgb.txt") + timestamps = load_timestamps(traj_t_map_file, traj_format) + return timestamps[skip::stride] + elif traj_format == "tartanair": + traj_t_map_file = gt_file.replace("gt_pose.txt", "times.txt") + timestamps = load_timestamps(traj_t_map_file, traj_format) + return timestamps[skip::stride] + + +def load_timestamps(time_file, traj_format="replica"): + if traj_format in ["tum", "tartanair"]: + with open(time_file, "r+") as f: + lines = f.readlines() + timestamps_mat = [ + float(x.split(" ")[0]) for x in lines if not x.startswith("#") + ] + return timestamps_mat + + +def make_traj(args) -> PoseTrajectory3D: + if isinstance(args, tuple) or isinstance(args, list): + traj, tstamps = args + return PoseTrajectory3D( + positions_xyz=traj[:, :3], + orientations_quat_wxyz=traj[:, 3:], + timestamps=tstamps, + ) + assert isinstance(args, PoseTrajectory3D), type(args) + return deepcopy(args) + + +def eval_metrics(pred_traj, gt_traj=None, seq="", filename="", sample_stride=1): + + if sample_stride > 1: + pred_traj[0] = pred_traj[0][::sample_stride] + pred_traj[1] = pred_traj[1][::sample_stride] + if gt_traj is not None: + updated_gt_traj = [] + updated_gt_traj.append(gt_traj[0][::sample_stride]) + updated_gt_traj.append(gt_traj[1][::sample_stride]) + gt_traj = updated_gt_traj + + pred_traj = make_traj(pred_traj) + + if gt_traj is not None: + gt_traj = make_traj(gt_traj) + + if pred_traj.timestamps.shape[0] == gt_traj.timestamps.shape[0]: + pred_traj.timestamps = gt_traj.timestamps + else: + print(pred_traj.timestamps.shape[0], gt_traj.timestamps.shape[0]) + + gt_traj, pred_traj = sync.associate_trajectories(gt_traj, pred_traj) + + # ATE + traj_ref = gt_traj + traj_est = pred_traj + + ate_result = main_ape.ape( + traj_ref, + traj_est, + est_name="traj", + pose_relation=PoseRelation.translation_part, + align=True, + correct_scale=True, + ) + + ate = ate_result.stats["rmse"] + + # RPE rotation and translation + delta_list = [1] + rpe_rots, rpe_transs = [], [] + for delta in delta_list: + rpe_rots_result = main_rpe.rpe( + traj_ref, + traj_est, + est_name="traj", + pose_relation=PoseRelation.rotation_angle_deg, + align=True, + correct_scale=True, + delta=delta, + delta_unit=Unit.frames, + rel_delta_tol=0.01, + all_pairs=True, + ) + + rot = rpe_rots_result.stats["rmse"] + rpe_rots.append(rot) + + for delta in delta_list: + rpe_transs_result = main_rpe.rpe( + traj_ref, + traj_est, + est_name="traj", + pose_relation=PoseRelation.translation_part, + align=True, + correct_scale=True, + delta=delta, + delta_unit=Unit.frames, + rel_delta_tol=0.01, + all_pairs=True, + ) + + trans = rpe_transs_result.stats["rmse"] + rpe_transs.append(trans) + + rpe_trans, rpe_rot = np.mean(rpe_transs), np.mean(rpe_rots) + # with open(filename, "w+") as f: + # f.write(f"Seq: {seq} \n\n") + # f.write(f"{ate_result}") + # f.write(f"{rpe_rots_result}") + # f.write(f"{rpe_transs_result}") + + # print(f"Save results to {filename}") + return ate, rpe_trans, rpe_rot + + +def best_plotmode(traj): + _, i1, i2 = np.argsort(np.var(traj.positions_xyz, axis=0)) + plot_axes = "xyz"[i2] + "xyz"[i1] + return getattr(plot.PlotMode, plot_axes) + + +def plot_trajectory( + pred_traj, gt_traj=None, title="", filename="", align=True, correct_scale=True +): + pred_traj = make_traj(pred_traj) + + if gt_traj is not None: + gt_traj = make_traj(gt_traj) + if pred_traj.timestamps.shape[0] == gt_traj.timestamps.shape[0]: + pred_traj.timestamps = gt_traj.timestamps + else: + print("WARNING", pred_traj.timestamps.shape[0], gt_traj.timestamps.shape[0]) + + gt_traj, pred_traj = sync.associate_trajectories(gt_traj, pred_traj) + + if align: + pred_traj.align(gt_traj, correct_scale=correct_scale) + + plot_collection = plot.PlotCollection("PlotCol") + fig = plt.figure(figsize=(8, 8)) + plot_mode = best_plotmode(gt_traj if (gt_traj is not None) else pred_traj) + ax = plot.prepare_axis(fig, plot_mode) + ax.set_title(title) + if gt_traj is not None: + plot.traj(ax, plot_mode, gt_traj, "--", "gray", "Ground Truth") + plot.traj(ax, plot_mode, pred_traj, "-", "blue", "Predicted") + plot_collection.add_figure("traj_error", fig) + plot_collection.export(filename, confirm_overwrite=False) + plt.close(fig=fig) + print(f"Saved trajectory to {filename.replace('.png','')}_traj_error.png") + + + +def save_trajectory_tum_format(traj, filename): + traj = make_traj(traj) + tostr = lambda a: " ".join(map(str, a)) + with Path(filename).open("w") as f: + for i in range(traj.num_poses): + f.write( + f"{traj.timestamps[i]} {tostr(traj.positions_xyz[i])} {tostr(traj.orientations_quat_wxyz[i][[0,1,2,3]])}\n" + ) + print(f"Saved trajectory to {filename}") + + +def extract_metrics(file_path): + with open(file_path, 'r') as file: + content = file.read() + + # Extract metrics using regex + ate_match = re.search(r'APE w.r.t. translation part \(m\).*?rmse\s+([0-9.]+)', content, re.DOTALL) + rpe_trans_match = re.search(r'RPE w.r.t. translation part \(m\).*?rmse\s+([0-9.]+)', content, re.DOTALL) + rpe_rot_match = re.search(r'RPE w.r.t. rotation angle in degrees \(deg\).*?rmse\s+([0-9.]+)', content, re.DOTALL) + + ate = float(ate_match.group(1)) if ate_match else 0.0 + rpe_trans = float(rpe_trans_match.group(1)) if rpe_trans_match else 0.0 + rpe_rot = float(rpe_rot_match.group(1)) if rpe_rot_match else 0.0 + + return ate, rpe_trans, rpe_rot + +def process_directory(directory): + results = [] + for root, _, files in os.walk(directory): + if files is not None: + files = sorted(files) + for file in files: + if file.endswith('_metric.txt'): + file_path = os.path.join(root, file) + seq_name = file.replace('_eval_metric.txt', '') + ate, rpe_trans, rpe_rot = extract_metrics(file_path) + results.append((seq_name, ate, rpe_trans, rpe_rot)) + + return results + +def calculate_averages(results): + total_ate = sum(r[1] for r in results) + total_rpe_trans = sum(r[2] for r in results) + total_rpe_rot = sum(r[3] for r in results) + count = len(results) + + if count == 0: + return 0.0, 0.0, 0.0 + + avg_ate = total_ate / count + avg_rpe_trans = total_rpe_trans / count + avg_rpe_rot = total_rpe_rot / count + + return avg_ate, avg_rpe_trans, avg_rpe_rot diff --git a/scripts/get_testing_psnr_davis.py b/scripts/get_testing_psnr_davis.py new file mode 100644 index 0000000000000000000000000000000000000000..452582c33173d57952bec06a0ae4524ad7220d48 --- /dev/null +++ b/scripts/get_testing_psnr_davis.py @@ -0,0 +1,23 @@ +import os + +root = 'results/davis_rearranged' +exps = ['testing_pnsr_4000'] +results = {} +for exp in exps: + results[exp] = {} +for scene in sorted(os.listdir(root)): + if os.path.isdir(os.path.join(root, scene)): + for exp in exps: + train_log = os.path.join(root, scene, exp, 'test_log.txt') + if os.path.exists(train_log): + with open(train_log, 'r') as file: + data = file.read() + last_line = data.strip().split('\n')[-1] + last_number = float(last_line.split()[-1]) + results[exp][scene] = last_number + +print("Scene & " + " & ".join(results[exps[0]].keys()).replace('_', '-') + "& average") +for exp in exps: + avg_psnr = sum(results[exp].values()) / len(results[exp].values()) if results[exp].values() else 0 + print(f"PSNR & " + " & ".join(f"{results[exp].get(scene, 'N/A'):.2f}" for scene in results[exps[0]].keys()) + f" & {avg_psnr:.2f} ") + diff --git a/scripts/get_testing_psnr_sintel.py b/scripts/get_testing_psnr_sintel.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9834de2bad44cc6d7ec9546d8c4a0f42bdb1ec --- /dev/null +++ b/scripts/get_testing_psnr_sintel.py @@ -0,0 +1,24 @@ +import os + +root = 'results/sintel_rearranged' +exps = ['testing_pnsr_4000'] +results = {} +for exp in exps: + results[exp] = {} +for scene in sorted(os.listdir(root)): + if os.path.isdir(os.path.join(root, scene)): + for exp in exps: + train_log = os.path.join(root, scene, exp, 'test_log.txt') + if os.path.exists(train_log): + with open(train_log, 'r') as file: + data = file.read() + last_line = data.strip().split('\n')[-1] + last_number = float(last_line.split()[-1]) + results[exp][scene] = last_number + print(f'{scene},{exp[:10]}: {last_number}') + +print("Scene & " + " & ".join(results[exps[0]].keys()).replace('_', '-') + "& average") +for exp in exps: + avg_psnr = sum(results[exp].values()) / len(results[exp].values()) if results[exp].values() else 0 + print(f"PSNR & " + " & ".join(f"{results[exp].get(scene, 'N/A'):.2f}" for scene in results[exps[0]].keys()) + f" & {avg_psnr:.2f} ") + diff --git a/scripts/rendering_davis.sh b/scripts/rendering_davis.sh new file mode 100755 index 0000000000000000000000000000000000000000..96655210d1317788b87ae898a29ebb4f9bbd7472 --- /dev/null +++ b/scripts/rendering_davis.sh @@ -0,0 +1,69 @@ +#! /bin/bash + +GPU_ID=0 +DATA_ROOT_DIR="results" +DATASETS=( + davis_rearranged + ) + +SCENES=( + blackswan + camel + car-shadow + dog + horsejump-high + motocross-jump + parkour + soapbox + ) + +N_VIEWS=( + 50 + 50 + 40 + 50 + 50 + 40 + 50 + 50 + ) + +# increase iteration to get better metrics (e.g. gs_train_iter=5000) +gs_train_iter=4000 +tag="rendering_all_frames" + +for i in "${!SCENES[@]}"; do + for DATASET in "${DATASETS[@]}"; do + SCENE=${SCENES[$i]} + N_VIEW=${N_VIEWS[$i]} + # SOURCE_PATH must be Absolute path + SOURCE_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/ + MODEL_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/${tag}_${gs_train_iter}/ + + + CMD_T="CUDA_VISIBLE_DEVICES=${GPU_ID} python -W ignore ./train_gui.py \ + -s ${SOURCE_PATH} \ + -m ${MODEL_PATH} \ + --iter ${gs_train_iter} \ + --dataset davis \ + --gt_dynamic_mask data/davis/DAVIS/Annotations/480p \ + " + + CMD_RI="CUDA_VISIBLE_DEVICES=${GPU_ID} python -W ignore ./render.py \ + -s ${SOURCE_PATH} \ + -m ${MODEL_PATH} \ + --n_views ${N_VIEW} \ + --scene ${SCENE} \ + --iter ${gs_train_iter} \ + --eval \ + --get_video \ + " + + echo "========= ${DATASET}/${SCENE}: Train: jointly optimize pose with dynamic masking =========" + echo $CMD_T + eval $CMD_T + echo "========= ${DATASET}/${SCENE}: Render:Rendering Static Scene Reconstruction =========" + echo $CMD_RI + eval $CMD_RI + done +done diff --git a/scripts/rendering_sintel.sh b/scripts/rendering_sintel.sh new file mode 100755 index 0000000000000000000000000000000000000000..50f287db9f7ed61eed67ef02e6e42848e9e5d30a --- /dev/null +++ b/scripts/rendering_sintel.sh @@ -0,0 +1,81 @@ +#! /bin/bash + +GPU_ID=0 +DATA_ROOT_DIR="results" +DATASETS=( + sintel_rearranged + ) + +SCENES=( + alley_2 + ambush_4 + ambush_5 + ambush_6 + cave_2 + cave_4 + market_2 + market_5 + market_6 + shaman_3 + sleeping_1 + sleeping_2 + temple_2 + temple_3 + ) + +N_VIEWS=( + 50 + 33 + 50 + 20 + 50 + 50 + 50 + 50 + 40 + 50 + 50 + 50 + 50 + 50 + ) + +# increase iteration to get better metrics (e.g. gs_train_iter=5000) +gs_train_iter=4000 +tag="rendering_all_frames" + +for i in "${!SCENES[@]}"; do + for DATASET in "${DATASETS[@]}"; do + SCENE=${SCENES[$i]} + N_VIEW=${N_VIEWS[$i]} + # SOURCE_PATH must be Absolute path + SOURCE_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/ + MODEL_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/${tag}_${gs_train_iter}/ + + # # ----- (1) Train: jointly optimize pose ----- + CMD_T="CUDA_VISIBLE_DEVICES=${GPU_ID} python -W ignore ./train_gui.py \ + -s ${SOURCE_PATH} \ + -m ${MODEL_PATH} \ + --iter ${gs_train_iter} \ + --dataset sintel \ + --gt_dynamic_mask data/sintel/training/dynamic_label_perfect \ + " + + CMD_RI="CUDA_VISIBLE_DEVICES=${GPU_ID} python -W ignore ./render.py \ + -s ${SOURCE_PATH} \ + -m ${MODEL_PATH} \ + --n_views ${N_VIEW} \ + --scene ${SCENE} \ + --iter ${gs_train_iter} \ + --eval \ + --get_video \ + " + + echo "========= ${DATASET}/${SCENE}: Train: jointly optimize pose with dynamic masking =========" + echo $CMD_T + eval $CMD_T + echo "========= ${DATASET}/${SCENE}: Render:Rendering Static Scene Reconstruction =========" + echo $CMD_RI + eval $CMD_RI + done +done diff --git a/scripts/testing_psnr_davis.sh b/scripts/testing_psnr_davis.sh new file mode 100755 index 0000000000000000000000000000000000000000..637a001c7dbbd416ba39d0ceab41dca91f45c104 --- /dev/null +++ b/scripts/testing_psnr_davis.sh @@ -0,0 +1,61 @@ +#! /bin/bash + +GPU_ID=0 +DATA_ROOT_DIR="results" +DATASETS=( + davis_rearranged + ) + +SCENES=( + blackswan + camel + car-shadow + dog + horsejump-high + motocross-jump + parkour + soapbox + ) + +N_VIEWS=( + 50 + 50 + 40 + 50 + 50 + 40 + 50 + 50 + ) + +# increase iteration to get better metrics (e.g. gs_train_iter=5000) +gs_train_iter=4000 +tag="testing_pnsr" + +for i in "${!SCENES[@]}"; do + for DATASET in "${DATASETS[@]}"; do + SCENE=${SCENES[$i]} + N_VIEW=${N_VIEWS[$i]} + # SOURCE_PATH must be Absolute path + SOURCE_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/ + MODEL_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/${tag}_${gs_train_iter}/ + + + CMD_T="CUDA_VISIBLE_DEVICES=${GPU_ID} python -W ignore ./train_test_psnr.py \ + -s ${SOURCE_PATH} \ + -m ${MODEL_PATH} \ + --n_views ${N_VIEW} \ + --scene ${SCENE} \ + --iter ${gs_train_iter} \ + --optim_pose \ + --dataset davis \ + --gt_dynamic_mask data/davis/DAVIS/Annotations/480p \ + " + + echo "========= ${DATASET}/${SCENE}: Train: jointly optimize pose with dynamic masking =========" + echo $CMD_T + eval $CMD_T + done +done + +python scripts/get_testing_psnr_davis.py \ No newline at end of file diff --git a/scripts/testing_psnr_sintel.sh b/scripts/testing_psnr_sintel.sh new file mode 100755 index 0000000000000000000000000000000000000000..b2eb230a420b770d46a322a17544e4d93a984a64 --- /dev/null +++ b/scripts/testing_psnr_sintel.sh @@ -0,0 +1,73 @@ +#! /bin/bash + +GPU_ID=0 +DATA_ROOT_DIR="results" +DATASETS=( + sintel_rearranged + ) + +SCENES=( + alley_2 + ambush_4 + ambush_5 + ambush_6 + cave_2 + cave_4 + market_2 + market_5 + market_6 + shaman_3 + sleeping_1 + sleeping_2 + temple_2 + temple_3 + ) + +N_VIEWS=( + 50 + 33 + 50 + 20 + 50 + 50 + 50 + 50 + 40 + 50 + 50 + 50 + 50 + 50 + ) + +# increase iteration to get better metrics (e.g. gs_train_iter=5000) +gs_train_iter=4000 +tag="testing_pnsr" + +for i in "${!SCENES[@]}"; do + for DATASET in "${DATASETS[@]}"; do + SCENE=${SCENES[$i]} + N_VIEW=${N_VIEWS[$i]} + # SOURCE_PATH must be Absolute path + SOURCE_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/ + MODEL_PATH=${DATA_ROOT_DIR}/${DATASET}/${SCENE}/${tag}_${gs_train_iter}/ + + # # ----- (1) Train: jointly optimize pose ----- + CMD_T="CUDA_VISIBLE_DEVICES=${GPU_ID} python -W ignore ./train_test_psnr.py \ + -s ${SOURCE_PATH} \ + -m ${MODEL_PATH} \ + --n_views ${N_VIEW} \ + --scene ${SCENE} \ + --iter ${gs_train_iter} \ + --optim_pose \ + --dataset sintel \ + --gt_dynamic_mask data/sintel/training/dynamic_label_perfect \ + " + + echo "========= ${DATASET}/${SCENE}: Train: jointly optimize pose with dynamic masking =========" + echo $CMD_T + eval $CMD_T + done + done + +python scripts/get_testing_psnr_sintel.py \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/.gitignore b/submodules/diff-gaussian-rasterization/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1e1c4ca8a718b5cc403d16441996c98214bb30cb --- /dev/null +++ b/submodules/diff-gaussian-rasterization/.gitignore @@ -0,0 +1,3 @@ +build/ +diff_gaussian_rasterization.egg-info/ +dist/ diff --git a/submodules/diff-gaussian-rasterization/.gitmodules b/submodules/diff-gaussian-rasterization/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..4553c29f4224a9a8723482bc9aca759a97693a64 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/.gitmodules @@ -0,0 +1,3 @@ +[submodule "third_party/glm"] + path = third_party/glm + url = https://github.com/g-truc/glm.git diff --git a/submodules/diff-gaussian-rasterization/CMakeLists.txt b/submodules/diff-gaussian-rasterization/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8cf822e83c5f8d15288d497f5c3bb138cfd8c48 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/CMakeLists.txt @@ -0,0 +1,36 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +cmake_minimum_required(VERSION 3.20) + +project(DiffRast LANGUAGES CUDA CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_EXTENSIONS OFF) +set(CMAKE_CUDA_STANDARD 17) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + +add_library(CudaRasterizer + cuda_rasterizer/backward.h + cuda_rasterizer/backward.cu + cuda_rasterizer/forward.h + cuda_rasterizer/forward.cu + cuda_rasterizer/auxiliary.h + cuda_rasterizer/rasterizer_impl.cu + cuda_rasterizer/rasterizer_impl.h + cuda_rasterizer/rasterizer.h +) + +set_target_properties(CudaRasterizer PROPERTIES CUDA_ARCHITECTURES "70;75;86") + +target_include_directories(CudaRasterizer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/cuda_rasterizer) +target_include_directories(CudaRasterizer PRIVATE third_party/glm ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) diff --git a/submodules/diff-gaussian-rasterization/LICENSE.md b/submodules/diff-gaussian-rasterization/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..c869e695fa63bfde6f887d63a24a2a71f03480ac --- /dev/null +++ b/submodules/diff-gaussian-rasterization/LICENSE.md @@ -0,0 +1,83 @@ +Gaussian-Splatting License +=========================== + +**Inria** and **the Max Planck Institut for Informatik (MPII)** hold all the ownership rights on the *Software* named **gaussian-splatting**. +The *Software* is in the process of being registered with the Agence pour la Protection des +Programmes (APP). + +The *Software* is still being developed by the *Licensor*. + +*Licensor*'s goal is to allow the research community to use, test and evaluate +the *Software*. + +## 1. Definitions + +*Licensee* means any person or entity that uses the *Software* and distributes +its *Work*. + +*Licensor* means the owners of the *Software*, i.e Inria and MPII + +*Software* means the original work of authorship made available under this +License ie gaussian-splatting. + +*Work* means the *Software* and any additions to or derivative works of the +*Software* that are made available under this License. + + +## 2. Purpose +This license is intended to define the rights granted to the *Licensee* by +Licensors under the *Software*. + +## 3. Rights granted + +For the above reasons Licensors have decided to distribute the *Software*. +Licensors grant non-exclusive rights to use the *Software* for research purposes +to research users (both academic and industrial), free of charge, without right +to sublicense.. The *Software* may be used "non-commercially", i.e., for research +and/or evaluation purposes only. + +Subject to the terms and conditions of this License, you are granted a +non-exclusive, royalty-free, license to reproduce, prepare derivative works of, +publicly display, publicly perform and distribute its *Work* and any resulting +derivative works in any form. + +## 4. Limitations + +**4.1 Redistribution.** You may reproduce or distribute the *Work* only if (a) you do +so under this License, (b) you include a complete copy of this License with +your distribution, and (c) you retain without modification any copyright, +patent, trademark, or attribution notices that are present in the *Work*. + +**4.2 Derivative Works.** You may specify that additional or different terms apply +to the use, reproduction, and distribution of your derivative works of the *Work* +("Your Terms") only if (a) Your Terms provide that the use limitation in +Section 2 applies to your derivative works, and (b) you identify the specific +derivative works that are subject to Your Terms. Notwithstanding Your Terms, +this License (including the redistribution requirements in Section 3.1) will +continue to apply to the *Work* itself. + +**4.3** Any other use without of prior consent of Licensors is prohibited. Research +users explicitly acknowledge having received from Licensors all information +allowing to appreciate the adequacy between of the *Software* and their needs and +to undertake all necessary precautions for its execution and use. + +**4.4** The *Software* is provided both as a compiled library file and as source +code. In case of using the *Software* for a publication or other results obtained +through the use of the *Software*, users are strongly encouraged to cite the +corresponding publications as explained in the documentation of the *Software*. + +## 5. Disclaimer + +THE USER CANNOT USE, EXPLOIT OR DISTRIBUTE THE *SOFTWARE* FOR COMMERCIAL PURPOSES +WITHOUT PRIOR AND EXPLICIT CONSENT OF LICENSORS. YOU MUST CONTACT INRIA FOR ANY +UNAUTHORIZED USE: stip-sophia.transfert@inria.fr . ANY SUCH ACTION WILL +CONSTITUTE A FORGERY. THIS *SOFTWARE* IS PROVIDED "AS IS" WITHOUT ANY WARRANTIES +OF ANY NATURE AND ANY EXPRESS OR IMPLIED WARRANTIES, WITH REGARDS TO COMMERCIAL +USE, PROFESSIONNAL USE, LEGAL OR NOT, OR OTHER, OR COMMERCIALISATION OR +ADAPTATION. UNLESS EXPLICITLY PROVIDED BY LAW, IN NO EVENT, SHALL INRIA OR THE +AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING FROM, OUT OF OR +IN CONNECTION WITH THE *SOFTWARE* OR THE USE OR OTHER DEALINGS IN THE *SOFTWARE*. diff --git a/submodules/diff-gaussian-rasterization/README.md b/submodules/diff-gaussian-rasterization/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6e165b0bb683a987485e2730216d5fa2919288ef --- /dev/null +++ b/submodules/diff-gaussian-rasterization/README.md @@ -0,0 +1,19 @@ +# Differential Gaussian Rasterization + +Used as the rasterization engine for the paper "3D Gaussian Splatting for Real-Time Rendering of Radiance Fields". If you can make use of it in your own research, please be so kind to cite us. + +
+
+

BibTeX

+
@Article{kerbl3Dgaussians,
+      author       = {Kerbl, Bernhard and Kopanas, Georgios and Leimk{\"u}hler, Thomas and Drettakis, George},
+      title        = {3D Gaussian Splatting for Real-Time Radiance Field Rendering},
+      journal      = {ACM Transactions on Graphics},
+      number       = {4},
+      volume       = {42},
+      month        = {July},
+      year         = {2023},
+      url          = {https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/}
+}
+
+
\ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/auxiliary.h b/submodules/diff-gaussian-rasterization/cuda_rasterizer/auxiliary.h new file mode 100644 index 0000000000000000000000000000000000000000..40904cb644399b7dd62ab5da631b0605ce048265 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/auxiliary.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#ifndef CUDA_RASTERIZER_AUXILIARY_H_INCLUDED +#define CUDA_RASTERIZER_AUXILIARY_H_INCLUDED + +#include "config.h" +#include "stdio.h" + +#define BLOCK_SIZE (BLOCK_X * BLOCK_Y) +#define NUM_WARPS (BLOCK_SIZE/32) + +// Spherical harmonics coefficients +__device__ const float SH_C0 = 0.28209479177387814f; +__device__ const float SH_C1 = 0.4886025119029199f; +__device__ const float SH_C2[] = { + 1.0925484305920792f, + -1.0925484305920792f, + 0.31539156525252005f, + -1.0925484305920792f, + 0.5462742152960396f +}; +__device__ const float SH_C3[] = { + -0.5900435899266435f, + 2.890611442640554f, + -0.4570457994644658f, + 0.3731763325901154f, + -0.4570457994644658f, + 1.445305721320277f, + -0.5900435899266435f +}; + +__forceinline__ __device__ float ndc2Pix(float v, int S) +{ + return ((v + 1.0) * S - 1.0) * 0.5; +} + +__forceinline__ __device__ void getRect(const float2 p, int max_radius, uint2& rect_min, uint2& rect_max, dim3 grid) +{ + rect_min = { + min(grid.x, max((int)0, (int)((p.x - max_radius) / BLOCK_X))), + min(grid.y, max((int)0, (int)((p.y - max_radius) / BLOCK_Y))) + }; + rect_max = { + min(grid.x, max((int)0, (int)((p.x + max_radius + BLOCK_X - 1) / BLOCK_X))), + min(grid.y, max((int)0, (int)((p.y + max_radius + BLOCK_Y - 1) / BLOCK_Y))) + }; +} + +__forceinline__ __device__ float3 transformPoint4x3(const float3& p, const float* matrix) +{ + float3 transformed = { + matrix[0] * p.x + matrix[4] * p.y + matrix[8] * p.z + matrix[12], + matrix[1] * p.x + matrix[5] * p.y + matrix[9] * p.z + matrix[13], + matrix[2] * p.x + matrix[6] * p.y + matrix[10] * p.z + matrix[14], + }; + return transformed; +} + +__forceinline__ __device__ float4 transformPoint4x4(const float3& p, const float* matrix) +{ + float4 transformed = { + matrix[0] * p.x + matrix[4] * p.y + matrix[8] * p.z + matrix[12], + matrix[1] * p.x + matrix[5] * p.y + matrix[9] * p.z + matrix[13], + matrix[2] * p.x + matrix[6] * p.y + matrix[10] * p.z + matrix[14], + matrix[3] * p.x + matrix[7] * p.y + matrix[11] * p.z + matrix[15] + }; + return transformed; +} + +__forceinline__ __device__ float3 transformVec4x3(const float3& p, const float* matrix) +{ + float3 transformed = { + matrix[0] * p.x + matrix[4] * p.y + matrix[8] * p.z, + matrix[1] * p.x + matrix[5] * p.y + matrix[9] * p.z, + matrix[2] * p.x + matrix[6] * p.y + matrix[10] * p.z, + }; + return transformed; +} + +__forceinline__ __device__ float3 transformVec4x3Transpose(const float3& p, const float* matrix) +{ + float3 transformed = { + matrix[0] * p.x + matrix[1] * p.y + matrix[2] * p.z, + matrix[4] * p.x + matrix[5] * p.y + matrix[6] * p.z, + matrix[8] * p.x + matrix[9] * p.y + matrix[10] * p.z, + }; + return transformed; +} + +__forceinline__ __device__ float dnormvdz(float3 v, float3 dv) +{ + float sum2 = v.x * v.x + v.y * v.y + v.z * v.z; + float invsum32 = 1.0f / sqrt(sum2 * sum2 * sum2); + float dnormvdz = (-v.x * v.z * dv.x - v.y * v.z * dv.y + (sum2 - v.z * v.z) * dv.z) * invsum32; + return dnormvdz; +} + +__forceinline__ __device__ float3 dnormvdv(float3 v, float3 dv) +{ + float sum2 = v.x * v.x + v.y * v.y + v.z * v.z; + float invsum32 = 1.0f / sqrt(sum2 * sum2 * sum2); + + float3 dnormvdv; + dnormvdv.x = ((+sum2 - v.x * v.x) * dv.x - v.y * v.x * dv.y - v.z * v.x * dv.z) * invsum32; + dnormvdv.y = (-v.x * v.y * dv.x + (sum2 - v.y * v.y) * dv.y - v.z * v.y * dv.z) * invsum32; + dnormvdv.z = (-v.x * v.z * dv.x - v.y * v.z * dv.y + (sum2 - v.z * v.z) * dv.z) * invsum32; + return dnormvdv; +} + +__forceinline__ __device__ float4 dnormvdv(float4 v, float4 dv) +{ + float sum2 = v.x * v.x + v.y * v.y + v.z * v.z + v.w * v.w; + float invsum32 = 1.0f / sqrt(sum2 * sum2 * sum2); + + float4 vdv = { v.x * dv.x, v.y * dv.y, v.z * dv.z, v.w * dv.w }; + float vdv_sum = vdv.x + vdv.y + vdv.z + vdv.w; + float4 dnormvdv; + dnormvdv.x = ((sum2 - v.x * v.x) * dv.x - v.x * (vdv_sum - vdv.x)) * invsum32; + dnormvdv.y = ((sum2 - v.y * v.y) * dv.y - v.y * (vdv_sum - vdv.y)) * invsum32; + dnormvdv.z = ((sum2 - v.z * v.z) * dv.z - v.z * (vdv_sum - vdv.z)) * invsum32; + dnormvdv.w = ((sum2 - v.w * v.w) * dv.w - v.w * (vdv_sum - vdv.w)) * invsum32; + return dnormvdv; +} + +__forceinline__ __device__ float sigmoid(float x) +{ + return 1.0f / (1.0f + expf(-x)); +} + +__forceinline__ __device__ bool in_frustum(int idx, + const float* orig_points, + const float* viewmatrix, + const float* projmatrix, + bool prefiltered, + float3& p_view) +{ + float3 p_orig = { orig_points[3 * idx], orig_points[3 * idx + 1], orig_points[3 * idx + 2] }; + + // Bring points to screen space + float4 p_hom = transformPoint4x4(p_orig, projmatrix); + float p_w = 1.0f / (p_hom.w + 0.0000001f); + float3 p_proj = { p_hom.x * p_w, p_hom.y * p_w, p_hom.z * p_w }; + p_view = transformPoint4x3(p_orig, viewmatrix); + + if (p_view.z <= 0.01f)// || ((p_proj.x < -1.3 || p_proj.x > 1.3 || p_proj.y < -1.3 || p_proj.y > 1.3))) + { + if (prefiltered) + { + printf("Point is filtered although prefiltered is set. This shouldn't happen!"); + __trap(); + } + return false; + } + return true; +} + +#define CHECK_CUDA(A, debug) \ +A; if(debug) { \ +auto ret = cudaDeviceSynchronize(); \ +if (ret != cudaSuccess) { \ +std::cerr << "\n[CUDA ERROR] in " << __FILE__ << "\nLine " << __LINE__ << ": " << cudaGetErrorString(ret); \ +throw std::runtime_error(cudaGetErrorString(ret)); \ +} \ +} + +#endif \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu b/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu new file mode 100644 index 0000000000000000000000000000000000000000..4aa41e1cb856e429dc47ff4e4380ee76f96aaebe --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.cu @@ -0,0 +1,657 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#include "backward.h" +#include "auxiliary.h" +#include +#include +namespace cg = cooperative_groups; + +// Backward pass for conversion of spherical harmonics to RGB for +// each Gaussian. +__device__ void computeColorFromSH(int idx, int deg, int max_coeffs, const glm::vec3* means, glm::vec3 campos, const float* shs, const bool* clamped, const glm::vec3* dL_dcolor, glm::vec3* dL_dmeans, glm::vec3* dL_dshs) +{ + // Compute intermediate values, as it is done during forward + glm::vec3 pos = means[idx]; + glm::vec3 dir_orig = pos - campos; + glm::vec3 dir = dir_orig / glm::length(dir_orig); + + glm::vec3* sh = ((glm::vec3*)shs) + idx * max_coeffs; + + // Use PyTorch rule for clamping: if clamping was applied, + // gradient becomes 0. + glm::vec3 dL_dRGB = dL_dcolor[idx]; + dL_dRGB.x *= clamped[3 * idx + 0] ? 0 : 1; + dL_dRGB.y *= clamped[3 * idx + 1] ? 0 : 1; + dL_dRGB.z *= clamped[3 * idx + 2] ? 0 : 1; + + glm::vec3 dRGBdx(0, 0, 0); + glm::vec3 dRGBdy(0, 0, 0); + glm::vec3 dRGBdz(0, 0, 0); + float x = dir.x; + float y = dir.y; + float z = dir.z; + + // Target location for this Gaussian to write SH gradients to + glm::vec3* dL_dsh = dL_dshs + idx * max_coeffs; + + // No tricks here, just high school-level calculus. + float dRGBdsh0 = SH_C0; + dL_dsh[0] = dRGBdsh0 * dL_dRGB; + if (deg > 0) + { + float dRGBdsh1 = -SH_C1 * y; + float dRGBdsh2 = SH_C1 * z; + float dRGBdsh3 = -SH_C1 * x; + dL_dsh[1] = dRGBdsh1 * dL_dRGB; + dL_dsh[2] = dRGBdsh2 * dL_dRGB; + dL_dsh[3] = dRGBdsh3 * dL_dRGB; + + dRGBdx = -SH_C1 * sh[3]; + dRGBdy = -SH_C1 * sh[1]; + dRGBdz = SH_C1 * sh[2]; + + if (deg > 1) + { + float xx = x * x, yy = y * y, zz = z * z; + float xy = x * y, yz = y * z, xz = x * z; + + float dRGBdsh4 = SH_C2[0] * xy; + float dRGBdsh5 = SH_C2[1] * yz; + float dRGBdsh6 = SH_C2[2] * (2.f * zz - xx - yy); + float dRGBdsh7 = SH_C2[3] * xz; + float dRGBdsh8 = SH_C2[4] * (xx - yy); + dL_dsh[4] = dRGBdsh4 * dL_dRGB; + dL_dsh[5] = dRGBdsh5 * dL_dRGB; + dL_dsh[6] = dRGBdsh6 * dL_dRGB; + dL_dsh[7] = dRGBdsh7 * dL_dRGB; + dL_dsh[8] = dRGBdsh8 * dL_dRGB; + + dRGBdx += SH_C2[0] * y * sh[4] + SH_C2[2] * 2.f * -x * sh[6] + SH_C2[3] * z * sh[7] + SH_C2[4] * 2.f * x * sh[8]; + dRGBdy += SH_C2[0] * x * sh[4] + SH_C2[1] * z * sh[5] + SH_C2[2] * 2.f * -y * sh[6] + SH_C2[4] * 2.f * -y * sh[8]; + dRGBdz += SH_C2[1] * y * sh[5] + SH_C2[2] * 2.f * 2.f * z * sh[6] + SH_C2[3] * x * sh[7]; + + if (deg > 2) + { + float dRGBdsh9 = SH_C3[0] * y * (3.f * xx - yy); + float dRGBdsh10 = SH_C3[1] * xy * z; + float dRGBdsh11 = SH_C3[2] * y * (4.f * zz - xx - yy); + float dRGBdsh12 = SH_C3[3] * z * (2.f * zz - 3.f * xx - 3.f * yy); + float dRGBdsh13 = SH_C3[4] * x * (4.f * zz - xx - yy); + float dRGBdsh14 = SH_C3[5] * z * (xx - yy); + float dRGBdsh15 = SH_C3[6] * x * (xx - 3.f * yy); + dL_dsh[9] = dRGBdsh9 * dL_dRGB; + dL_dsh[10] = dRGBdsh10 * dL_dRGB; + dL_dsh[11] = dRGBdsh11 * dL_dRGB; + dL_dsh[12] = dRGBdsh12 * dL_dRGB; + dL_dsh[13] = dRGBdsh13 * dL_dRGB; + dL_dsh[14] = dRGBdsh14 * dL_dRGB; + dL_dsh[15] = dRGBdsh15 * dL_dRGB; + + dRGBdx += ( + SH_C3[0] * sh[9] * 3.f * 2.f * xy + + SH_C3[1] * sh[10] * yz + + SH_C3[2] * sh[11] * -2.f * xy + + SH_C3[3] * sh[12] * -3.f * 2.f * xz + + SH_C3[4] * sh[13] * (-3.f * xx + 4.f * zz - yy) + + SH_C3[5] * sh[14] * 2.f * xz + + SH_C3[6] * sh[15] * 3.f * (xx - yy)); + + dRGBdy += ( + SH_C3[0] * sh[9] * 3.f * (xx - yy) + + SH_C3[1] * sh[10] * xz + + SH_C3[2] * sh[11] * (-3.f * yy + 4.f * zz - xx) + + SH_C3[3] * sh[12] * -3.f * 2.f * yz + + SH_C3[4] * sh[13] * -2.f * xy + + SH_C3[5] * sh[14] * -2.f * yz + + SH_C3[6] * sh[15] * -3.f * 2.f * xy); + + dRGBdz += ( + SH_C3[1] * sh[10] * xy + + SH_C3[2] * sh[11] * 4.f * 2.f * yz + + SH_C3[3] * sh[12] * 3.f * (2.f * zz - xx - yy) + + SH_C3[4] * sh[13] * 4.f * 2.f * xz + + SH_C3[5] * sh[14] * (xx - yy)); + } + } + } + + // The view direction is an input to the computation. View direction + // is influenced by the Gaussian's mean, so SHs gradients + // must propagate back into 3D position. + glm::vec3 dL_ddir(glm::dot(dRGBdx, dL_dRGB), glm::dot(dRGBdy, dL_dRGB), glm::dot(dRGBdz, dL_dRGB)); + + // Account for normalization of direction + float3 dL_dmean = dnormvdv(float3{ dir_orig.x, dir_orig.y, dir_orig.z }, float3{ dL_ddir.x, dL_ddir.y, dL_ddir.z }); + + // Gradients of loss w.r.t. Gaussian means, but only the portion + // that is caused because the mean affects the view-dependent color. + // Additional mean gradient is accumulated in below methods. + dL_dmeans[idx] += glm::vec3(dL_dmean.x, dL_dmean.y, dL_dmean.z); +} + +// Backward version of INVERSE 2D covariance matrix computation +// (due to length launched as separate kernel before other +// backward steps contained in preprocess) +__global__ void computeCov2DCUDA(int P, + const float3* means, + const int* radii, + const float* cov3Ds, + const float h_x, float h_y, + const float tan_fovx, float tan_fovy, + const float* view_matrix, + const float* dL_dconics, + float3* dL_dmeans, + float* dL_dcov) +{ + auto idx = cg::this_grid().thread_rank(); + if (idx >= P || !(radii[idx] > 0)) + return; + + // Reading location of 3D covariance for this Gaussian + const float* cov3D = cov3Ds + 6 * idx; + + // Fetch gradients, recompute 2D covariance and relevant + // intermediate forward results needed in the backward. + float3 mean = means[idx]; + float3 dL_dconic = { dL_dconics[4 * idx], dL_dconics[4 * idx + 1], dL_dconics[4 * idx + 3] }; + float3 t = transformPoint4x3(mean, view_matrix); + + const float limx = 1.3f * tan_fovx; + const float limy = 1.3f * tan_fovy; + const float txtz = t.x / t.z; + const float tytz = t.y / t.z; + t.x = min(limx, max(-limx, txtz)) * t.z; + t.y = min(limy, max(-limy, tytz)) * t.z; + + const float x_grad_mul = txtz < -limx || txtz > limx ? 0 : 1; + const float y_grad_mul = tytz < -limy || tytz > limy ? 0 : 1; + + glm::mat3 J = glm::mat3(h_x / t.z, 0.0f, -(h_x * t.x) / (t.z * t.z), + 0.0f, h_y / t.z, -(h_y * t.y) / (t.z * t.z), + 0, 0, 0); + + glm::mat3 W = glm::mat3( + view_matrix[0], view_matrix[4], view_matrix[8], + view_matrix[1], view_matrix[5], view_matrix[9], + view_matrix[2], view_matrix[6], view_matrix[10]); + + glm::mat3 Vrk = glm::mat3( + cov3D[0], cov3D[1], cov3D[2], + cov3D[1], cov3D[3], cov3D[4], + cov3D[2], cov3D[4], cov3D[5]); + + glm::mat3 T = W * J; + + glm::mat3 cov2D = glm::transpose(T) * glm::transpose(Vrk) * T; + + // Use helper variables for 2D covariance entries. More compact. + float a = cov2D[0][0] += 0.3f; + float b = cov2D[0][1]; + float c = cov2D[1][1] += 0.3f; + + float denom = a * c - b * b; + float dL_da = 0, dL_db = 0, dL_dc = 0; + float denom2inv = 1.0f / ((denom * denom) + 0.0000001f); + + if (denom2inv != 0) + { + // Gradients of loss w.r.t. entries of 2D covariance matrix, + // given gradients of loss w.r.t. conic matrix (inverse covariance matrix). + // e.g., dL / da = dL / d_conic_a * d_conic_a / d_a + dL_da = denom2inv * (-c * c * dL_dconic.x + 2 * b * c * dL_dconic.y + (denom - a * c) * dL_dconic.z); + dL_dc = denom2inv * (-a * a * dL_dconic.z + 2 * a * b * dL_dconic.y + (denom - a * c) * dL_dconic.x); + dL_db = denom2inv * 2 * (b * c * dL_dconic.x - (denom + 2 * b * b) * dL_dconic.y + a * b * dL_dconic.z); + + // Gradients of loss L w.r.t. each 3D covariance matrix (Vrk) entry, + // given gradients w.r.t. 2D covariance matrix (diagonal). + // cov2D = transpose(T) * transpose(Vrk) * T; + dL_dcov[6 * idx + 0] = (T[0][0] * T[0][0] * dL_da + T[0][0] * T[1][0] * dL_db + T[1][0] * T[1][0] * dL_dc); + dL_dcov[6 * idx + 3] = (T[0][1] * T[0][1] * dL_da + T[0][1] * T[1][1] * dL_db + T[1][1] * T[1][1] * dL_dc); + dL_dcov[6 * idx + 5] = (T[0][2] * T[0][2] * dL_da + T[0][2] * T[1][2] * dL_db + T[1][2] * T[1][2] * dL_dc); + + // Gradients of loss L w.r.t. each 3D covariance matrix (Vrk) entry, + // given gradients w.r.t. 2D covariance matrix (off-diagonal). + // Off-diagonal elements appear twice --> double the gradient. + // cov2D = transpose(T) * transpose(Vrk) * T; + dL_dcov[6 * idx + 1] = 2 * T[0][0] * T[0][1] * dL_da + (T[0][0] * T[1][1] + T[0][1] * T[1][0]) * dL_db + 2 * T[1][0] * T[1][1] * dL_dc; + dL_dcov[6 * idx + 2] = 2 * T[0][0] * T[0][2] * dL_da + (T[0][0] * T[1][2] + T[0][2] * T[1][0]) * dL_db + 2 * T[1][0] * T[1][2] * dL_dc; + dL_dcov[6 * idx + 4] = 2 * T[0][2] * T[0][1] * dL_da + (T[0][1] * T[1][2] + T[0][2] * T[1][1]) * dL_db + 2 * T[1][1] * T[1][2] * dL_dc; + } + else + { + for (int i = 0; i < 6; i++) + dL_dcov[6 * idx + i] = 0; + } + + // Gradients of loss w.r.t. upper 2x3 portion of intermediate matrix T + // cov2D = transpose(T) * transpose(Vrk) * T; + float dL_dT00 = 2 * (T[0][0] * Vrk[0][0] + T[0][1] * Vrk[0][1] + T[0][2] * Vrk[0][2]) * dL_da + + (T[1][0] * Vrk[0][0] + T[1][1] * Vrk[0][1] + T[1][2] * Vrk[0][2]) * dL_db; + float dL_dT01 = 2 * (T[0][0] * Vrk[1][0] + T[0][1] * Vrk[1][1] + T[0][2] * Vrk[1][2]) * dL_da + + (T[1][0] * Vrk[1][0] + T[1][1] * Vrk[1][1] + T[1][2] * Vrk[1][2]) * dL_db; + float dL_dT02 = 2 * (T[0][0] * Vrk[2][0] + T[0][1] * Vrk[2][1] + T[0][2] * Vrk[2][2]) * dL_da + + (T[1][0] * Vrk[2][0] + T[1][1] * Vrk[2][1] + T[1][2] * Vrk[2][2]) * dL_db; + float dL_dT10 = 2 * (T[1][0] * Vrk[0][0] + T[1][1] * Vrk[0][1] + T[1][2] * Vrk[0][2]) * dL_dc + + (T[0][0] * Vrk[0][0] + T[0][1] * Vrk[0][1] + T[0][2] * Vrk[0][2]) * dL_db; + float dL_dT11 = 2 * (T[1][0] * Vrk[1][0] + T[1][1] * Vrk[1][1] + T[1][2] * Vrk[1][2]) * dL_dc + + (T[0][0] * Vrk[1][0] + T[0][1] * Vrk[1][1] + T[0][2] * Vrk[1][2]) * dL_db; + float dL_dT12 = 2 * (T[1][0] * Vrk[2][0] + T[1][1] * Vrk[2][1] + T[1][2] * Vrk[2][2]) * dL_dc + + (T[0][0] * Vrk[2][0] + T[0][1] * Vrk[2][1] + T[0][2] * Vrk[2][2]) * dL_db; + + // Gradients of loss w.r.t. upper 3x2 non-zero entries of Jacobian matrix + // T = W * J + float dL_dJ00 = W[0][0] * dL_dT00 + W[0][1] * dL_dT01 + W[0][2] * dL_dT02; + float dL_dJ02 = W[2][0] * dL_dT00 + W[2][1] * dL_dT01 + W[2][2] * dL_dT02; + float dL_dJ11 = W[1][0] * dL_dT10 + W[1][1] * dL_dT11 + W[1][2] * dL_dT12; + float dL_dJ12 = W[2][0] * dL_dT10 + W[2][1] * dL_dT11 + W[2][2] * dL_dT12; + + float tz = 1.f / t.z; + float tz2 = tz * tz; + float tz3 = tz2 * tz; + + // Gradients of loss w.r.t. transformed Gaussian mean t + float dL_dtx = x_grad_mul * -h_x * tz2 * dL_dJ02; + float dL_dty = y_grad_mul * -h_y * tz2 * dL_dJ12; + float dL_dtz = -h_x * tz2 * dL_dJ00 - h_y * tz2 * dL_dJ11 + (2 * h_x * t.x) * tz3 * dL_dJ02 + (2 * h_y * t.y) * tz3 * dL_dJ12; + + // Account for transformation of mean to t + // t = transformPoint4x3(mean, view_matrix); + float3 dL_dmean = transformVec4x3Transpose({ dL_dtx, dL_dty, dL_dtz }, view_matrix); + + // Gradients of loss w.r.t. Gaussian means, but only the portion + // that is caused because the mean affects the covariance matrix. + // Additional mean gradient is accumulated in BACKWARD::preprocess. + dL_dmeans[idx] = dL_dmean; +} + +// Backward pass for the conversion of scale and rotation to a +// 3D covariance matrix for each Gaussian. +__device__ void computeCov3D(int idx, const glm::vec3 scale, float mod, const glm::vec4 rot, const float* dL_dcov3Ds, glm::vec3* dL_dscales, glm::vec4* dL_drots) +{ + // Recompute (intermediate) results for the 3D covariance computation. + glm::vec4 q = rot;// / glm::length(rot); + float r = q.x; + float x = q.y; + float y = q.z; + float z = q.w; + + glm::mat3 R = glm::mat3( + 1.f - 2.f * (y * y + z * z), 2.f * (x * y - r * z), 2.f * (x * z + r * y), + 2.f * (x * y + r * z), 1.f - 2.f * (x * x + z * z), 2.f * (y * z - r * x), + 2.f * (x * z - r * y), 2.f * (y * z + r * x), 1.f - 2.f * (x * x + y * y) + ); + + glm::mat3 S = glm::mat3(1.0f); + + glm::vec3 s = mod * scale; + S[0][0] = s.x; + S[1][1] = s.y; + S[2][2] = s.z; + + glm::mat3 M = S * R; + + const float* dL_dcov3D = dL_dcov3Ds + 6 * idx; + + glm::vec3 dunc(dL_dcov3D[0], dL_dcov3D[3], dL_dcov3D[5]); + glm::vec3 ounc = 0.5f * glm::vec3(dL_dcov3D[1], dL_dcov3D[2], dL_dcov3D[4]); + + // Convert per-element covariance loss gradients to matrix form + glm::mat3 dL_dSigma = glm::mat3( + dL_dcov3D[0], 0.5f * dL_dcov3D[1], 0.5f * dL_dcov3D[2], + 0.5f * dL_dcov3D[1], dL_dcov3D[3], 0.5f * dL_dcov3D[4], + 0.5f * dL_dcov3D[2], 0.5f * dL_dcov3D[4], dL_dcov3D[5] + ); + + // Compute loss gradient w.r.t. matrix M + // dSigma_dM = 2 * M + glm::mat3 dL_dM = 2.0f * M * dL_dSigma; + + glm::mat3 Rt = glm::transpose(R); + glm::mat3 dL_dMt = glm::transpose(dL_dM); + + // Gradients of loss w.r.t. scale + glm::vec3* dL_dscale = dL_dscales + idx; + dL_dscale->x = glm::dot(Rt[0], dL_dMt[0]); + dL_dscale->y = glm::dot(Rt[1], dL_dMt[1]); + dL_dscale->z = glm::dot(Rt[2], dL_dMt[2]); + + dL_dMt[0] *= s.x; + dL_dMt[1] *= s.y; + dL_dMt[2] *= s.z; + + // Gradients of loss w.r.t. normalized quaternion + glm::vec4 dL_dq; + dL_dq.x = 2 * z * (dL_dMt[0][1] - dL_dMt[1][0]) + 2 * y * (dL_dMt[2][0] - dL_dMt[0][2]) + 2 * x * (dL_dMt[1][2] - dL_dMt[2][1]); + dL_dq.y = 2 * y * (dL_dMt[1][0] + dL_dMt[0][1]) + 2 * z * (dL_dMt[2][0] + dL_dMt[0][2]) + 2 * r * (dL_dMt[1][2] - dL_dMt[2][1]) - 4 * x * (dL_dMt[2][2] + dL_dMt[1][1]); + dL_dq.z = 2 * x * (dL_dMt[1][0] + dL_dMt[0][1]) + 2 * r * (dL_dMt[2][0] - dL_dMt[0][2]) + 2 * z * (dL_dMt[1][2] + dL_dMt[2][1]) - 4 * y * (dL_dMt[2][2] + dL_dMt[0][0]); + dL_dq.w = 2 * r * (dL_dMt[0][1] - dL_dMt[1][0]) + 2 * x * (dL_dMt[2][0] + dL_dMt[0][2]) + 2 * y * (dL_dMt[1][2] + dL_dMt[2][1]) - 4 * z * (dL_dMt[1][1] + dL_dMt[0][0]); + + // Gradients of loss w.r.t. unnormalized quaternion + float4* dL_drot = (float4*)(dL_drots + idx); + *dL_drot = float4{ dL_dq.x, dL_dq.y, dL_dq.z, dL_dq.w };//dnormvdv(float4{ rot.x, rot.y, rot.z, rot.w }, float4{ dL_dq.x, dL_dq.y, dL_dq.z, dL_dq.w }); +} + +// Backward pass of the preprocessing steps, except +// for the covariance computation and inversion +// (those are handled by a previous kernel call) +template +__global__ void preprocessCUDA( + int P, int D, int M, + const float3* means, + const int* radii, + const float* shs, + const bool* clamped, + const glm::vec3* scales, + const glm::vec4* rotations, + const float scale_modifier, + const float* proj, + const glm::vec3* campos, + const float3* dL_dmean2D, + glm::vec3* dL_dmeans, + float* dL_dcolor, + float* dL_dcov3D, + float* dL_dsh, + glm::vec3* dL_dscale, + glm::vec4* dL_drot) +{ + auto idx = cg::this_grid().thread_rank(); + if (idx >= P || !(radii[idx] > 0)) + return; + + float3 m = means[idx]; + + // Taking care of gradients from the screenspace points + float4 m_hom = transformPoint4x4(m, proj); + float m_w = 1.0f / (m_hom.w + 0.0000001f); + + // Compute loss gradient w.r.t. 3D means due to gradients of 2D means + // from rendering procedure + glm::vec3 dL_dmean; + float mul1 = (proj[0] * m.x + proj[4] * m.y + proj[8] * m.z + proj[12]) * m_w * m_w; + float mul2 = (proj[1] * m.x + proj[5] * m.y + proj[9] * m.z + proj[13]) * m_w * m_w; + dL_dmean.x = (proj[0] * m_w - proj[3] * mul1) * dL_dmean2D[idx].x + (proj[1] * m_w - proj[3] * mul2) * dL_dmean2D[idx].y; + dL_dmean.y = (proj[4] * m_w - proj[7] * mul1) * dL_dmean2D[idx].x + (proj[5] * m_w - proj[7] * mul2) * dL_dmean2D[idx].y; + dL_dmean.z = (proj[8] * m_w - proj[11] * mul1) * dL_dmean2D[idx].x + (proj[9] * m_w - proj[11] * mul2) * dL_dmean2D[idx].y; + + // That's the second part of the mean gradient. Previous computation + // of cov2D and following SH conversion also affects it. + dL_dmeans[idx] += dL_dmean; + + // Compute gradient updates due to computing colors from SHs + if (shs) + computeColorFromSH(idx, D, M, (glm::vec3*)means, *campos, shs, clamped, (glm::vec3*)dL_dcolor, (glm::vec3*)dL_dmeans, (glm::vec3*)dL_dsh); + + // Compute gradient updates due to computing covariance from scale/rotation + if (scales) + computeCov3D(idx, scales[idx], scale_modifier, rotations[idx], dL_dcov3D, dL_dscale, dL_drot); +} + +// Backward version of the rendering procedure. +template +__global__ void __launch_bounds__(BLOCK_X * BLOCK_Y) +renderCUDA( + const uint2* __restrict__ ranges, + const uint32_t* __restrict__ point_list, + int W, int H, + const float* __restrict__ bg_color, + const float2* __restrict__ points_xy_image, + const float4* __restrict__ conic_opacity, + const float* __restrict__ colors, + const float* __restrict__ final_Ts, + const uint32_t* __restrict__ n_contrib, + const float* __restrict__ dL_dpixels, + float3* __restrict__ dL_dmean2D, + float4* __restrict__ dL_dconic2D, + float* __restrict__ dL_dopacity, + float* __restrict__ dL_dcolors) +{ + // We rasterize again. Compute necessary block info. + auto block = cg::this_thread_block(); + const uint32_t horizontal_blocks = (W + BLOCK_X - 1) / BLOCK_X; + const uint2 pix_min = { block.group_index().x * BLOCK_X, block.group_index().y * BLOCK_Y }; + const uint2 pix_max = { min(pix_min.x + BLOCK_X, W), min(pix_min.y + BLOCK_Y , H) }; + const uint2 pix = { pix_min.x + block.thread_index().x, pix_min.y + block.thread_index().y }; + const uint32_t pix_id = W * pix.y + pix.x; + const float2 pixf = { (float)pix.x, (float)pix.y }; + + const bool inside = pix.x < W&& pix.y < H; + const uint2 range = ranges[block.group_index().y * horizontal_blocks + block.group_index().x]; + + const int rounds = ((range.y - range.x + BLOCK_SIZE - 1) / BLOCK_SIZE); + + bool done = !inside; + int toDo = range.y - range.x; + + __shared__ int collected_id[BLOCK_SIZE]; + __shared__ float2 collected_xy[BLOCK_SIZE]; + __shared__ float4 collected_conic_opacity[BLOCK_SIZE]; + __shared__ float collected_colors[C * BLOCK_SIZE]; + + // In the forward, we stored the final value for T, the + // product of all (1 - alpha) factors. + const float T_final = inside ? final_Ts[pix_id] : 0; + float T = T_final; + + // We start from the back. The ID of the last contributing + // Gaussian is known from each pixel from the forward. + uint32_t contributor = toDo; + const int last_contributor = inside ? n_contrib[pix_id] : 0; + + float accum_rec[C] = { 0 }; + float dL_dpixel[C]; + if (inside) + for (int i = 0; i < C; i++) + dL_dpixel[i] = dL_dpixels[i * H * W + pix_id]; + + float last_alpha = 0; + float last_color[C] = { 0 }; + + // Gradient of pixel coordinate w.r.t. normalized + // screen-space viewport corrdinates (-1 to 1) + const float ddelx_dx = 0.5 * W; + const float ddely_dy = 0.5 * H; + + // Traverse all Gaussians + for (int i = 0; i < rounds; i++, toDo -= BLOCK_SIZE) + { + // Load auxiliary data into shared memory, start in the BACK + // and load them in revers order. + block.sync(); + const int progress = i * BLOCK_SIZE + block.thread_rank(); + if (range.x + progress < range.y) + { + const int coll_id = point_list[range.y - progress - 1]; + collected_id[block.thread_rank()] = coll_id; + collected_xy[block.thread_rank()] = points_xy_image[coll_id]; + collected_conic_opacity[block.thread_rank()] = conic_opacity[coll_id]; + for (int i = 0; i < C; i++) + collected_colors[i * BLOCK_SIZE + block.thread_rank()] = colors[coll_id * C + i]; + } + block.sync(); + + // Iterate over Gaussians + for (int j = 0; !done && j < min(BLOCK_SIZE, toDo); j++) + { + // Keep track of current Gaussian ID. Skip, if this one + // is behind the last contributor for this pixel. + contributor--; + if (contributor >= last_contributor) + continue; + + // Compute blending values, as before. + const float2 xy = collected_xy[j]; + const float2 d = { xy.x - pixf.x, xy.y - pixf.y }; + const float4 con_o = collected_conic_opacity[j]; + const float power = -0.5f * (con_o.x * d.x * d.x + con_o.z * d.y * d.y) - con_o.y * d.x * d.y; + if (power > 0.0f) + continue; + + const float G = exp(power); + const float alpha = min(0.99f, con_o.w * G); + if (alpha < 1.0f / 255.0f) + continue; + + T = T / (1.f - alpha); + const float dchannel_dcolor = alpha * T; + + // Propagate gradients to per-Gaussian colors and keep + // gradients w.r.t. alpha (blending factor for a Gaussian/pixel + // pair). + float dL_dalpha = 0.0f; + const int global_id = collected_id[j]; + for (int ch = 0; ch < C; ch++) + { + const float c = collected_colors[ch * BLOCK_SIZE + j]; + // Update last color (to be used in the next iteration) + accum_rec[ch] = last_alpha * last_color[ch] + (1.f - last_alpha) * accum_rec[ch]; + last_color[ch] = c; + + const float dL_dchannel = dL_dpixel[ch]; + dL_dalpha += (c - accum_rec[ch]) * dL_dchannel; + // Update the gradients w.r.t. color of the Gaussian. + // Atomic, since this pixel is just one of potentially + // many that were affected by this Gaussian. + atomicAdd(&(dL_dcolors[global_id * C + ch]), dchannel_dcolor * dL_dchannel); + } + dL_dalpha *= T; + // Update last alpha (to be used in the next iteration) + last_alpha = alpha; + + // Account for fact that alpha also influences how much of + // the background color is added if nothing left to blend + float bg_dot_dpixel = 0; + for (int i = 0; i < C; i++) + bg_dot_dpixel += bg_color[i] * dL_dpixel[i]; + dL_dalpha += (-T_final / (1.f - alpha)) * bg_dot_dpixel; + + + // Helpful reusable temporary variables + const float dL_dG = con_o.w * dL_dalpha; + const float gdx = G * d.x; + const float gdy = G * d.y; + const float dG_ddelx = -gdx * con_o.x - gdy * con_o.y; + const float dG_ddely = -gdy * con_o.z - gdx * con_o.y; + + // Update gradients w.r.t. 2D mean position of the Gaussian + atomicAdd(&dL_dmean2D[global_id].x, dL_dG * dG_ddelx * ddelx_dx); + atomicAdd(&dL_dmean2D[global_id].y, dL_dG * dG_ddely * ddely_dy); + + // Update gradients w.r.t. 2D covariance (2x2 matrix, symmetric) + atomicAdd(&dL_dconic2D[global_id].x, -0.5f * gdx * d.x * dL_dG); + atomicAdd(&dL_dconic2D[global_id].y, -0.5f * gdx * d.y * dL_dG); + atomicAdd(&dL_dconic2D[global_id].w, -0.5f * gdy * d.y * dL_dG); + + // Update gradients w.r.t. opacity of the Gaussian + atomicAdd(&(dL_dopacity[global_id]), G * dL_dalpha); + } + } +} + +void BACKWARD::preprocess( + int P, int D, int M, + const float3* means3D, + const int* radii, + const float* shs, + const bool* clamped, + const glm::vec3* scales, + const glm::vec4* rotations, + const float scale_modifier, + const float* cov3Ds, + const float* viewmatrix, + const float* projmatrix, + const float focal_x, float focal_y, + const float tan_fovx, float tan_fovy, + const glm::vec3* campos, + const float3* dL_dmean2D, + const float* dL_dconic, + glm::vec3* dL_dmean3D, + float* dL_dcolor, + float* dL_dcov3D, + float* dL_dsh, + glm::vec3* dL_dscale, + glm::vec4* dL_drot) +{ + // Propagate gradients for the path of 2D conic matrix computation. + // Somewhat long, thus it is its own kernel rather than being part of + // "preprocess". When done, loss gradient w.r.t. 3D means has been + // modified and gradient w.r.t. 3D covariance matrix has been computed. + computeCov2DCUDA << <(P + 255) / 256, 256 >> > ( + P, + means3D, + radii, + cov3Ds, + focal_x, + focal_y, + tan_fovx, + tan_fovy, + viewmatrix, + dL_dconic, + (float3*)dL_dmean3D, + dL_dcov3D); + + // Propagate gradients for remaining steps: finish 3D mean gradients, + // propagate color gradients to SH (if desireD), propagate 3D covariance + // matrix gradients to scale and rotation. + preprocessCUDA << < (P + 255) / 256, 256 >> > ( + P, D, M, + (float3*)means3D, + radii, + shs, + clamped, + (glm::vec3*)scales, + (glm::vec4*)rotations, + scale_modifier, + projmatrix, + campos, + (float3*)dL_dmean2D, + (glm::vec3*)dL_dmean3D, + dL_dcolor, + dL_dcov3D, + dL_dsh, + dL_dscale, + dL_drot); +} + +void BACKWARD::render( + const dim3 grid, const dim3 block, + const uint2* ranges, + const uint32_t* point_list, + int W, int H, + const float* bg_color, + const float2* means2D, + const float4* conic_opacity, + const float* colors, + const float* final_Ts, + const uint32_t* n_contrib, + const float* dL_dpixels, + float3* dL_dmean2D, + float4* dL_dconic2D, + float* dL_dopacity, + float* dL_dcolors) +{ + renderCUDA << > >( + ranges, + point_list, + W, H, + bg_color, + means2D, + conic_opacity, + colors, + final_Ts, + n_contrib, + dL_dpixels, + dL_dmean2D, + dL_dconic2D, + dL_dopacity, + dL_dcolors + ); +} \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.h b/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.h new file mode 100644 index 0000000000000000000000000000000000000000..93dd2e4be371d36385b102b13f59f37c1d019223 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/backward.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#ifndef CUDA_RASTERIZER_BACKWARD_H_INCLUDED +#define CUDA_RASTERIZER_BACKWARD_H_INCLUDED + +#include +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#define GLM_FORCE_CUDA +#include + +namespace BACKWARD +{ + void render( + const dim3 grid, dim3 block, + const uint2* ranges, + const uint32_t* point_list, + int W, int H, + const float* bg_color, + const float2* means2D, + const float4* conic_opacity, + const float* colors, + const float* final_Ts, + const uint32_t* n_contrib, + const float* dL_dpixels, + float3* dL_dmean2D, + float4* dL_dconic2D, + float* dL_dopacity, + float* dL_dcolors); + + void preprocess( + int P, int D, int M, + const float3* means, + const int* radii, + const float* shs, + const bool* clamped, + const glm::vec3* scales, + const glm::vec4* rotations, + const float scale_modifier, + const float* cov3Ds, + const float* view, + const float* proj, + const float focal_x, float focal_y, + const float tan_fovx, float tan_fovy, + const glm::vec3* campos, + const float3* dL_dmean2D, + const float* dL_dconics, + glm::vec3* dL_dmeans, + float* dL_dcolor, + float* dL_dcov3D, + float* dL_dsh, + glm::vec3* dL_dscale, + glm::vec4* dL_drot); +} + +#endif \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/config.h b/submodules/diff-gaussian-rasterization/cuda_rasterizer/config.h new file mode 100644 index 0000000000000000000000000000000000000000..2a912fb34824349caadffe435fc1ab4b31e5aa4f --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/config.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#ifndef CUDA_RASTERIZER_CONFIG_H_INCLUDED +#define CUDA_RASTERIZER_CONFIG_H_INCLUDED + +#define NUM_CHANNELS 3 // Default 3, RGB +#define BLOCK_X 16 +#define BLOCK_Y 16 + +#endif \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu b/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu new file mode 100644 index 0000000000000000000000000000000000000000..c419a328de7106ef713c2e77146dbf96e4cbeea3 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.cu @@ -0,0 +1,455 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#include "forward.h" +#include "auxiliary.h" +#include +#include +namespace cg = cooperative_groups; + +// Forward method for converting the input spherical harmonics +// coefficients of each Gaussian to a simple RGB color. +__device__ glm::vec3 computeColorFromSH(int idx, int deg, int max_coeffs, const glm::vec3* means, glm::vec3 campos, const float* shs, bool* clamped) +{ + // The implementation is loosely based on code for + // "Differentiable Point-Based Radiance Fields for + // Efficient View Synthesis" by Zhang et al. (2022) + glm::vec3 pos = means[idx]; + glm::vec3 dir = pos - campos; + dir = dir / glm::length(dir); + + glm::vec3* sh = ((glm::vec3*)shs) + idx * max_coeffs; + glm::vec3 result = SH_C0 * sh[0]; + + if (deg > 0) + { + float x = dir.x; + float y = dir.y; + float z = dir.z; + result = result - SH_C1 * y * sh[1] + SH_C1 * z * sh[2] - SH_C1 * x * sh[3]; + + if (deg > 1) + { + float xx = x * x, yy = y * y, zz = z * z; + float xy = x * y, yz = y * z, xz = x * z; + result = result + + SH_C2[0] * xy * sh[4] + + SH_C2[1] * yz * sh[5] + + SH_C2[2] * (2.0f * zz - xx - yy) * sh[6] + + SH_C2[3] * xz * sh[7] + + SH_C2[4] * (xx - yy) * sh[8]; + + if (deg > 2) + { + result = result + + SH_C3[0] * y * (3.0f * xx - yy) * sh[9] + + SH_C3[1] * xy * z * sh[10] + + SH_C3[2] * y * (4.0f * zz - xx - yy) * sh[11] + + SH_C3[3] * z * (2.0f * zz - 3.0f * xx - 3.0f * yy) * sh[12] + + SH_C3[4] * x * (4.0f * zz - xx - yy) * sh[13] + + SH_C3[5] * z * (xx - yy) * sh[14] + + SH_C3[6] * x * (xx - 3.0f * yy) * sh[15]; + } + } + } + result += 0.5f; + + // RGB colors are clamped to positive values. If values are + // clamped, we need to keep track of this for the backward pass. + clamped[3 * idx + 0] = (result.x < 0); + clamped[3 * idx + 1] = (result.y < 0); + clamped[3 * idx + 2] = (result.z < 0); + return glm::max(result, 0.0f); +} + +// Forward version of 2D covariance matrix computation +__device__ float3 computeCov2D(const float3& mean, float focal_x, float focal_y, float tan_fovx, float tan_fovy, const float* cov3D, const float* viewmatrix) +{ + // The following models the steps outlined by equations 29 + // and 31 in "EWA Splatting" (Zwicker et al., 2002). + // Additionally considers aspect / scaling of viewport. + // Transposes used to account for row-/column-major conventions. + float3 t = transformPoint4x3(mean, viewmatrix); + + const float limx = 1.3f * tan_fovx; + const float limy = 1.3f * tan_fovy; + const float txtz = t.x / t.z; + const float tytz = t.y / t.z; + t.x = min(limx, max(-limx, txtz)) * t.z; + t.y = min(limy, max(-limy, tytz)) * t.z; + + glm::mat3 J = glm::mat3( + focal_x / t.z, 0.0f, -(focal_x * t.x) / (t.z * t.z), + 0.0f, focal_y / t.z, -(focal_y * t.y) / (t.z * t.z), + 0, 0, 0); + + glm::mat3 W = glm::mat3( + viewmatrix[0], viewmatrix[4], viewmatrix[8], + viewmatrix[1], viewmatrix[5], viewmatrix[9], + viewmatrix[2], viewmatrix[6], viewmatrix[10]); + + glm::mat3 T = W * J; + + glm::mat3 Vrk = glm::mat3( + cov3D[0], cov3D[1], cov3D[2], + cov3D[1], cov3D[3], cov3D[4], + cov3D[2], cov3D[4], cov3D[5]); + + glm::mat3 cov = glm::transpose(T) * glm::transpose(Vrk) * T; + + // Apply low-pass filter: every Gaussian should be at least + // one pixel wide/high. Discard 3rd row and column. + cov[0][0] += 0.3f; + cov[1][1] += 0.3f; + return { float(cov[0][0]), float(cov[0][1]), float(cov[1][1]) }; +} + +// Forward method for converting scale and rotation properties of each +// Gaussian to a 3D covariance matrix in world space. Also takes care +// of quaternion normalization. +__device__ void computeCov3D(const glm::vec3 scale, float mod, const glm::vec4 rot, float* cov3D) +{ + // Create scaling matrix + glm::mat3 S = glm::mat3(1.0f); + S[0][0] = mod * scale.x; + S[1][1] = mod * scale.y; + S[2][2] = mod * scale.z; + + // Normalize quaternion to get valid rotation + glm::vec4 q = rot;// / glm::length(rot); + float r = q.x; + float x = q.y; + float y = q.z; + float z = q.w; + + // Compute rotation matrix from quaternion + glm::mat3 R = glm::mat3( + 1.f - 2.f * (y * y + z * z), 2.f * (x * y - r * z), 2.f * (x * z + r * y), + 2.f * (x * y + r * z), 1.f - 2.f * (x * x + z * z), 2.f * (y * z - r * x), + 2.f * (x * z - r * y), 2.f * (y * z + r * x), 1.f - 2.f * (x * x + y * y) + ); + + glm::mat3 M = S * R; + + // Compute 3D world covariance matrix Sigma + glm::mat3 Sigma = glm::transpose(M) * M; + + // Covariance is symmetric, only store upper right + cov3D[0] = Sigma[0][0]; + cov3D[1] = Sigma[0][1]; + cov3D[2] = Sigma[0][2]; + cov3D[3] = Sigma[1][1]; + cov3D[4] = Sigma[1][2]; + cov3D[5] = Sigma[2][2]; +} + +// Perform initial steps for each Gaussian prior to rasterization. +template +__global__ void preprocessCUDA(int P, int D, int M, + const float* orig_points, + const glm::vec3* scales, + const float scale_modifier, + const glm::vec4* rotations, + const float* opacities, + const float* shs, + bool* clamped, + const float* cov3D_precomp, + const float* colors_precomp, + const float* viewmatrix, + const float* projmatrix, + const glm::vec3* cam_pos, + const int W, int H, + const float tan_fovx, float tan_fovy, + const float focal_x, float focal_y, + int* radii, + float2* points_xy_image, + float* depths, + float* cov3Ds, + float* rgb, + float4* conic_opacity, + const dim3 grid, + uint32_t* tiles_touched, + bool prefiltered) +{ + auto idx = cg::this_grid().thread_rank(); + if (idx >= P) + return; + + // Initialize radius and touched tiles to 0. If this isn't changed, + // this Gaussian will not be processed further. + radii[idx] = 0; + tiles_touched[idx] = 0; + + // Perform near culling, quit if outside. + float3 p_view; + if (!in_frustum(idx, orig_points, viewmatrix, projmatrix, prefiltered, p_view)) + return; + + // Transform point by projecting + float3 p_orig = { orig_points[3 * idx], orig_points[3 * idx + 1], orig_points[3 * idx + 2] }; + float4 p_hom = transformPoint4x4(p_orig, projmatrix); + float p_w = 1.0f / (p_hom.w + 0.0000001f); + float3 p_proj = { p_hom.x * p_w, p_hom.y * p_w, p_hom.z * p_w }; + + // If 3D covariance matrix is precomputed, use it, otherwise compute + // from scaling and rotation parameters. + const float* cov3D; + if (cov3D_precomp != nullptr) + { + cov3D = cov3D_precomp + idx * 6; + } + else + { + computeCov3D(scales[idx], scale_modifier, rotations[idx], cov3Ds + idx * 6); + cov3D = cov3Ds + idx * 6; + } + + // Compute 2D screen-space covariance matrix + float3 cov = computeCov2D(p_orig, focal_x, focal_y, tan_fovx, tan_fovy, cov3D, viewmatrix); + + // Invert covariance (EWA algorithm) + float det = (cov.x * cov.z - cov.y * cov.y); + if (det == 0.0f) + return; + float det_inv = 1.f / det; + float3 conic = { cov.z * det_inv, -cov.y * det_inv, cov.x * det_inv }; + + // Compute extent in screen space (by finding eigenvalues of + // 2D covariance matrix). Use extent to compute a bounding rectangle + // of screen-space tiles that this Gaussian overlaps with. Quit if + // rectangle covers 0 tiles. + float mid = 0.5f * (cov.x + cov.z); + float lambda1 = mid + sqrt(max(0.1f, mid * mid - det)); + float lambda2 = mid - sqrt(max(0.1f, mid * mid - det)); + float my_radius = ceil(3.f * sqrt(max(lambda1, lambda2))); + float2 point_image = { ndc2Pix(p_proj.x, W), ndc2Pix(p_proj.y, H) }; + uint2 rect_min, rect_max; + getRect(point_image, my_radius, rect_min, rect_max, grid); + if ((rect_max.x - rect_min.x) * (rect_max.y - rect_min.y) == 0) + return; + + // If colors have been precomputed, use them, otherwise convert + // spherical harmonics coefficients to RGB color. + if (colors_precomp == nullptr) + { + glm::vec3 result = computeColorFromSH(idx, D, M, (glm::vec3*)orig_points, *cam_pos, shs, clamped); + rgb[idx * C + 0] = result.x; + rgb[idx * C + 1] = result.y; + rgb[idx * C + 2] = result.z; + } + + // Store some useful helper data for the next steps. + depths[idx] = p_view.z; + radii[idx] = my_radius; + points_xy_image[idx] = point_image; + // Inverse 2D covariance and opacity neatly pack into one float4 + conic_opacity[idx] = { conic.x, conic.y, conic.z, opacities[idx] }; + tiles_touched[idx] = (rect_max.y - rect_min.y) * (rect_max.x - rect_min.x); +} + +// Main rasterization method. Collaboratively works on one tile per +// block, each thread treats one pixel. Alternates between fetching +// and rasterizing data. +template +__global__ void __launch_bounds__(BLOCK_X * BLOCK_Y) +renderCUDA( + const uint2* __restrict__ ranges, + const uint32_t* __restrict__ point_list, + int W, int H, + const float2* __restrict__ points_xy_image, + const float* __restrict__ features, + const float4* __restrict__ conic_opacity, + float* __restrict__ final_T, + uint32_t* __restrict__ n_contrib, + const float* __restrict__ bg_color, + float* __restrict__ out_color) +{ + // Identify current tile and associated min/max pixel range. + auto block = cg::this_thread_block(); + uint32_t horizontal_blocks = (W + BLOCK_X - 1) / BLOCK_X; + uint2 pix_min = { block.group_index().x * BLOCK_X, block.group_index().y * BLOCK_Y }; + uint2 pix_max = { min(pix_min.x + BLOCK_X, W), min(pix_min.y + BLOCK_Y , H) }; + uint2 pix = { pix_min.x + block.thread_index().x, pix_min.y + block.thread_index().y }; + uint32_t pix_id = W * pix.y + pix.x; + float2 pixf = { (float)pix.x, (float)pix.y }; + + // Check if this thread is associated with a valid pixel or outside. + bool inside = pix.x < W&& pix.y < H; + // Done threads can help with fetching, but don't rasterize + bool done = !inside; + + // Load start/end range of IDs to process in bit sorted list. + uint2 range = ranges[block.group_index().y * horizontal_blocks + block.group_index().x]; + const int rounds = ((range.y - range.x + BLOCK_SIZE - 1) / BLOCK_SIZE); + int toDo = range.y - range.x; + + // Allocate storage for batches of collectively fetched data. + __shared__ int collected_id[BLOCK_SIZE]; + __shared__ float2 collected_xy[BLOCK_SIZE]; + __shared__ float4 collected_conic_opacity[BLOCK_SIZE]; + + // Initialize helper variables + float T = 1.0f; + uint32_t contributor = 0; + uint32_t last_contributor = 0; + float C[CHANNELS] = { 0 }; + + // Iterate over batches until all done or range is complete + for (int i = 0; i < rounds; i++, toDo -= BLOCK_SIZE) + { + // End if entire block votes that it is done rasterizing + int num_done = __syncthreads_count(done); + if (num_done == BLOCK_SIZE) + break; + + // Collectively fetch per-Gaussian data from global to shared + int progress = i * BLOCK_SIZE + block.thread_rank(); + if (range.x + progress < range.y) + { + int coll_id = point_list[range.x + progress]; + collected_id[block.thread_rank()] = coll_id; + collected_xy[block.thread_rank()] = points_xy_image[coll_id]; + collected_conic_opacity[block.thread_rank()] = conic_opacity[coll_id]; + } + block.sync(); + + // Iterate over current batch + for (int j = 0; !done && j < min(BLOCK_SIZE, toDo); j++) + { + // Keep track of current position in range + contributor++; + + // Resample using conic matrix (cf. "Surface + // Splatting" by Zwicker et al., 2001) + float2 xy = collected_xy[j]; + float2 d = { xy.x - pixf.x, xy.y - pixf.y }; + float4 con_o = collected_conic_opacity[j]; + float power = -0.5f * (con_o.x * d.x * d.x + con_o.z * d.y * d.y) - con_o.y * d.x * d.y; + if (power > 0.0f) + continue; + + // Eq. (2) from 3D Gaussian splatting paper. + // Obtain alpha by multiplying with Gaussian opacity + // and its exponential falloff from mean. + // Avoid numerical instabilities (see paper appendix). + float alpha = min(0.99f, con_o.w * exp(power)); + if (alpha < 1.0f / 255.0f) + continue; + float test_T = T * (1 - alpha); + if (test_T < 0.0001f) + { + done = true; + continue; + } + + // Eq. (3) from 3D Gaussian splatting paper. + for (int ch = 0; ch < CHANNELS; ch++) + C[ch] += features[collected_id[j] * CHANNELS + ch] * alpha * T; + + T = test_T; + + // Keep track of last range entry to update this + // pixel. + last_contributor = contributor; + } + } + + // All threads that treat valid pixel write out their final + // rendering data to the frame and auxiliary buffers. + if (inside) + { + final_T[pix_id] = T; + n_contrib[pix_id] = last_contributor; + for (int ch = 0; ch < CHANNELS; ch++) + out_color[ch * H * W + pix_id] = C[ch] + T * bg_color[ch]; + } +} + +void FORWARD::render( + const dim3 grid, dim3 block, + const uint2* ranges, + const uint32_t* point_list, + int W, int H, + const float2* means2D, + const float* colors, + const float4* conic_opacity, + float* final_T, + uint32_t* n_contrib, + const float* bg_color, + float* out_color) +{ + renderCUDA << > > ( + ranges, + point_list, + W, H, + means2D, + colors, + conic_opacity, + final_T, + n_contrib, + bg_color, + out_color); +} + +void FORWARD::preprocess(int P, int D, int M, + const float* means3D, + const glm::vec3* scales, + const float scale_modifier, + const glm::vec4* rotations, + const float* opacities, + const float* shs, + bool* clamped, + const float* cov3D_precomp, + const float* colors_precomp, + const float* viewmatrix, + const float* projmatrix, + const glm::vec3* cam_pos, + const int W, int H, + const float focal_x, float focal_y, + const float tan_fovx, float tan_fovy, + int* radii, + float2* means2D, + float* depths, + float* cov3Ds, + float* rgb, + float4* conic_opacity, + const dim3 grid, + uint32_t* tiles_touched, + bool prefiltered) +{ + preprocessCUDA << <(P + 255) / 256, 256 >> > ( + P, D, M, + means3D, + scales, + scale_modifier, + rotations, + opacities, + shs, + clamped, + cov3D_precomp, + colors_precomp, + viewmatrix, + projmatrix, + cam_pos, + W, H, + tan_fovx, tan_fovy, + focal_x, focal_y, + radii, + means2D, + depths, + cov3Ds, + rgb, + conic_opacity, + grid, + tiles_touched, + prefiltered + ); +} \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.h b/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.h new file mode 100644 index 0000000000000000000000000000000000000000..3c11cb917ac7e9e26f88613bdd079f42fd23ec8a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/forward.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#ifndef CUDA_RASTERIZER_FORWARD_H_INCLUDED +#define CUDA_RASTERIZER_FORWARD_H_INCLUDED + +#include +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#define GLM_FORCE_CUDA +#include + +namespace FORWARD +{ + // Perform initial steps for each Gaussian prior to rasterization. + void preprocess(int P, int D, int M, + const float* orig_points, + const glm::vec3* scales, + const float scale_modifier, + const glm::vec4* rotations, + const float* opacities, + const float* shs, + bool* clamped, + const float* cov3D_precomp, + const float* colors_precomp, + const float* viewmatrix, + const float* projmatrix, + const glm::vec3* cam_pos, + const int W, int H, + const float focal_x, float focal_y, + const float tan_fovx, float tan_fovy, + int* radii, + float2* points_xy_image, + float* depths, + float* cov3Ds, + float* colors, + float4* conic_opacity, + const dim3 grid, + uint32_t* tiles_touched, + bool prefiltered); + + // Main rasterization method. + void render( + const dim3 grid, dim3 block, + const uint2* ranges, + const uint32_t* point_list, + int W, int H, + const float2* points_xy_image, + const float* features, + const float4* conic_opacity, + float* final_T, + uint32_t* n_contrib, + const float* bg_color, + float* out_color); +} + + +#endif \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer.h b/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer.h new file mode 100644 index 0000000000000000000000000000000000000000..81544ef61626669831bd6e85a6b5a1126a611db2 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#ifndef CUDA_RASTERIZER_H_INCLUDED +#define CUDA_RASTERIZER_H_INCLUDED + +#include +#include + +namespace CudaRasterizer +{ + class Rasterizer + { + public: + + static void markVisible( + int P, + float* means3D, + float* viewmatrix, + float* projmatrix, + bool* present); + + static int forward( + std::function geometryBuffer, + std::function binningBuffer, + std::function imageBuffer, + const int P, int D, int M, + const float* background, + const int width, int height, + const float* means3D, + const float* shs, + const float* colors_precomp, + const float* opacities, + const float* scales, + const float scale_modifier, + const float* rotations, + const float* cov3D_precomp, + const float* viewmatrix, + const float* projmatrix, + const float* cam_pos, + const float tan_fovx, float tan_fovy, + const bool prefiltered, + float* out_color, + int* radii = nullptr, + bool debug = false); + + static void backward( + const int P, int D, int M, int R, + const float* background, + const int width, int height, + const float* means3D, + const float* shs, + const float* colors_precomp, + const float* scales, + const float scale_modifier, + const float* rotations, + const float* cov3D_precomp, + const float* viewmatrix, + const float* projmatrix, + const float* campos, + const float tan_fovx, float tan_fovy, + const int* radii, + char* geom_buffer, + char* binning_buffer, + char* image_buffer, + const float* dL_dpix, + float* dL_dmean2D, + float* dL_dconic, + float* dL_dopacity, + float* dL_dcolor, + float* dL_dmean3D, + float* dL_dcov3D, + float* dL_dsh, + float* dL_dscale, + float* dL_drot, + bool debug); + }; +}; + +#endif \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu b/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu new file mode 100644 index 0000000000000000000000000000000000000000..f8782ac43945a854937284c0fd5946e1fafb3052 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.cu @@ -0,0 +1,434 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#include "rasterizer_impl.h" +#include +#include +#include +#include +#include +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include +#include +#define GLM_FORCE_CUDA +#include + +#include +#include +namespace cg = cooperative_groups; + +#include "auxiliary.h" +#include "forward.h" +#include "backward.h" + +// Helper function to find the next-highest bit of the MSB +// on the CPU. +uint32_t getHigherMsb(uint32_t n) +{ + uint32_t msb = sizeof(n) * 4; + uint32_t step = msb; + while (step > 1) + { + step /= 2; + if (n >> msb) + msb += step; + else + msb -= step; + } + if (n >> msb) + msb++; + return msb; +} + +// Wrapper method to call auxiliary coarse frustum containment test. +// Mark all Gaussians that pass it. +__global__ void checkFrustum(int P, + const float* orig_points, + const float* viewmatrix, + const float* projmatrix, + bool* present) +{ + auto idx = cg::this_grid().thread_rank(); + if (idx >= P) + return; + + float3 p_view; + present[idx] = in_frustum(idx, orig_points, viewmatrix, projmatrix, false, p_view); +} + +// Generates one key/value pair for all Gaussian / tile overlaps. +// Run once per Gaussian (1:N mapping). +__global__ void duplicateWithKeys( + int P, + const float2* points_xy, + const float* depths, + const uint32_t* offsets, + uint64_t* gaussian_keys_unsorted, + uint32_t* gaussian_values_unsorted, + int* radii, + dim3 grid) +{ + auto idx = cg::this_grid().thread_rank(); + if (idx >= P) + return; + + // Generate no key/value pair for invisible Gaussians + if (radii[idx] > 0) + { + // Find this Gaussian's offset in buffer for writing keys/values. + uint32_t off = (idx == 0) ? 0 : offsets[idx - 1]; + uint2 rect_min, rect_max; + + getRect(points_xy[idx], radii[idx], rect_min, rect_max, grid); + + // For each tile that the bounding rect overlaps, emit a + // key/value pair. The key is | tile ID | depth |, + // and the value is the ID of the Gaussian. Sorting the values + // with this key yields Gaussian IDs in a list, such that they + // are first sorted by tile and then by depth. + for (int y = rect_min.y; y < rect_max.y; y++) + { + for (int x = rect_min.x; x < rect_max.x; x++) + { + uint64_t key = y * grid.x + x; + key <<= 32; + key |= *((uint32_t*)&depths[idx]); + gaussian_keys_unsorted[off] = key; + gaussian_values_unsorted[off] = idx; + off++; + } + } + } +} + +// Check keys to see if it is at the start/end of one tile's range in +// the full sorted list. If yes, write start/end of this tile. +// Run once per instanced (duplicated) Gaussian ID. +__global__ void identifyTileRanges(int L, uint64_t* point_list_keys, uint2* ranges) +{ + auto idx = cg::this_grid().thread_rank(); + if (idx >= L) + return; + + // Read tile ID from key. Update start/end of tile range if at limit. + uint64_t key = point_list_keys[idx]; + uint32_t currtile = key >> 32; + if (idx == 0) + ranges[currtile].x = 0; + else + { + uint32_t prevtile = point_list_keys[idx - 1] >> 32; + if (currtile != prevtile) + { + ranges[prevtile].y = idx; + ranges[currtile].x = idx; + } + } + if (idx == L - 1) + ranges[currtile].y = L; +} + +// Mark Gaussians as visible/invisible, based on view frustum testing +void CudaRasterizer::Rasterizer::markVisible( + int P, + float* means3D, + float* viewmatrix, + float* projmatrix, + bool* present) +{ + checkFrustum << <(P + 255) / 256, 256 >> > ( + P, + means3D, + viewmatrix, projmatrix, + present); +} + +CudaRasterizer::GeometryState CudaRasterizer::GeometryState::fromChunk(char*& chunk, size_t P) +{ + GeometryState geom; + obtain(chunk, geom.depths, P, 128); + obtain(chunk, geom.clamped, P * 3, 128); + obtain(chunk, geom.internal_radii, P, 128); + obtain(chunk, geom.means2D, P, 128); + obtain(chunk, geom.cov3D, P * 6, 128); + obtain(chunk, geom.conic_opacity, P, 128); + obtain(chunk, geom.rgb, P * 3, 128); + obtain(chunk, geom.tiles_touched, P, 128); + cub::DeviceScan::InclusiveSum(nullptr, geom.scan_size, geom.tiles_touched, geom.tiles_touched, P); + obtain(chunk, geom.scanning_space, geom.scan_size, 128); + obtain(chunk, geom.point_offsets, P, 128); + return geom; +} + +CudaRasterizer::ImageState CudaRasterizer::ImageState::fromChunk(char*& chunk, size_t N) +{ + ImageState img; + obtain(chunk, img.accum_alpha, N, 128); + obtain(chunk, img.n_contrib, N, 128); + obtain(chunk, img.ranges, N, 128); + return img; +} + +CudaRasterizer::BinningState CudaRasterizer::BinningState::fromChunk(char*& chunk, size_t P) +{ + BinningState binning; + obtain(chunk, binning.point_list, P, 128); + obtain(chunk, binning.point_list_unsorted, P, 128); + obtain(chunk, binning.point_list_keys, P, 128); + obtain(chunk, binning.point_list_keys_unsorted, P, 128); + cub::DeviceRadixSort::SortPairs( + nullptr, binning.sorting_size, + binning.point_list_keys_unsorted, binning.point_list_keys, + binning.point_list_unsorted, binning.point_list, P); + obtain(chunk, binning.list_sorting_space, binning.sorting_size, 128); + return binning; +} + +// Forward rendering procedure for differentiable rasterization +// of Gaussians. +int CudaRasterizer::Rasterizer::forward( + std::function geometryBuffer, + std::function binningBuffer, + std::function imageBuffer, + const int P, int D, int M, + const float* background, + const int width, int height, + const float* means3D, + const float* shs, + const float* colors_precomp, + const float* opacities, + const float* scales, + const float scale_modifier, + const float* rotations, + const float* cov3D_precomp, + const float* viewmatrix, + const float* projmatrix, + const float* cam_pos, + const float tan_fovx, float tan_fovy, + const bool prefiltered, + float* out_color, + int* radii, + bool debug) +{ + const float focal_y = height / (2.0f * tan_fovy); + const float focal_x = width / (2.0f * tan_fovx); + + size_t chunk_size = required(P); + char* chunkptr = geometryBuffer(chunk_size); + GeometryState geomState = GeometryState::fromChunk(chunkptr, P); + + if (radii == nullptr) + { + radii = geomState.internal_radii; + } + + dim3 tile_grid((width + BLOCK_X - 1) / BLOCK_X, (height + BLOCK_Y - 1) / BLOCK_Y, 1); + dim3 block(BLOCK_X, BLOCK_Y, 1); + + // Dynamically resize image-based auxiliary buffers during training + size_t img_chunk_size = required(width * height); + char* img_chunkptr = imageBuffer(img_chunk_size); + ImageState imgState = ImageState::fromChunk(img_chunkptr, width * height); + + if (NUM_CHANNELS != 3 && colors_precomp == nullptr) + { + throw std::runtime_error("For non-RGB, provide precomputed Gaussian colors!"); + } + + // Run preprocessing per-Gaussian (transformation, bounding, conversion of SHs to RGB) + CHECK_CUDA(FORWARD::preprocess( + P, D, M, + means3D, + (glm::vec3*)scales, + scale_modifier, + (glm::vec4*)rotations, + opacities, + shs, + geomState.clamped, + cov3D_precomp, + colors_precomp, + viewmatrix, projmatrix, + (glm::vec3*)cam_pos, + width, height, + focal_x, focal_y, + tan_fovx, tan_fovy, + radii, + geomState.means2D, + geomState.depths, + geomState.cov3D, + geomState.rgb, + geomState.conic_opacity, + tile_grid, + geomState.tiles_touched, + prefiltered + ), debug) + + // Compute prefix sum over full list of touched tile counts by Gaussians + // E.g., [2, 3, 0, 2, 1] -> [2, 5, 5, 7, 8] + CHECK_CUDA(cub::DeviceScan::InclusiveSum(geomState.scanning_space, geomState.scan_size, geomState.tiles_touched, geomState.point_offsets, P), debug) + + // Retrieve total number of Gaussian instances to launch and resize aux buffers + int num_rendered; + CHECK_CUDA(cudaMemcpy(&num_rendered, geomState.point_offsets + P - 1, sizeof(int), cudaMemcpyDeviceToHost), debug); + + size_t binning_chunk_size = required(num_rendered); + char* binning_chunkptr = binningBuffer(binning_chunk_size); + BinningState binningState = BinningState::fromChunk(binning_chunkptr, num_rendered); + + // For each instance to be rendered, produce adequate [ tile | depth ] key + // and corresponding dublicated Gaussian indices to be sorted + duplicateWithKeys << <(P + 255) / 256, 256 >> > ( + P, + geomState.means2D, + geomState.depths, + geomState.point_offsets, + binningState.point_list_keys_unsorted, + binningState.point_list_unsorted, + radii, + tile_grid) + CHECK_CUDA(, debug) + + int bit = getHigherMsb(tile_grid.x * tile_grid.y); + + // Sort complete list of (duplicated) Gaussian indices by keys + CHECK_CUDA(cub::DeviceRadixSort::SortPairs( + binningState.list_sorting_space, + binningState.sorting_size, + binningState.point_list_keys_unsorted, binningState.point_list_keys, + binningState.point_list_unsorted, binningState.point_list, + num_rendered, 0, 32 + bit), debug) + + CHECK_CUDA(cudaMemset(imgState.ranges, 0, tile_grid.x * tile_grid.y * sizeof(uint2)), debug); + + // Identify start and end of per-tile workloads in sorted list + if (num_rendered > 0) + identifyTileRanges << <(num_rendered + 255) / 256, 256 >> > ( + num_rendered, + binningState.point_list_keys, + imgState.ranges); + CHECK_CUDA(, debug) + + // Let each tile blend its range of Gaussians independently in parallel + const float* feature_ptr = colors_precomp != nullptr ? colors_precomp : geomState.rgb; + CHECK_CUDA(FORWARD::render( + tile_grid, block, + imgState.ranges, + binningState.point_list, + width, height, + geomState.means2D, + feature_ptr, + geomState.conic_opacity, + imgState.accum_alpha, + imgState.n_contrib, + background, + out_color), debug) + + return num_rendered; +} + +// Produce necessary gradients for optimization, corresponding +// to forward render pass +void CudaRasterizer::Rasterizer::backward( + const int P, int D, int M, int R, + const float* background, + const int width, int height, + const float* means3D, + const float* shs, + const float* colors_precomp, + const float* scales, + const float scale_modifier, + const float* rotations, + const float* cov3D_precomp, + const float* viewmatrix, + const float* projmatrix, + const float* campos, + const float tan_fovx, float tan_fovy, + const int* radii, + char* geom_buffer, + char* binning_buffer, + char* img_buffer, + const float* dL_dpix, + float* dL_dmean2D, + float* dL_dconic, + float* dL_dopacity, + float* dL_dcolor, + float* dL_dmean3D, + float* dL_dcov3D, + float* dL_dsh, + float* dL_dscale, + float* dL_drot, + bool debug) +{ + GeometryState geomState = GeometryState::fromChunk(geom_buffer, P); + BinningState binningState = BinningState::fromChunk(binning_buffer, R); + ImageState imgState = ImageState::fromChunk(img_buffer, width * height); + + if (radii == nullptr) + { + radii = geomState.internal_radii; + } + + const float focal_y = height / (2.0f * tan_fovy); + const float focal_x = width / (2.0f * tan_fovx); + + const dim3 tile_grid((width + BLOCK_X - 1) / BLOCK_X, (height + BLOCK_Y - 1) / BLOCK_Y, 1); + const dim3 block(BLOCK_X, BLOCK_Y, 1); + + // Compute loss gradients w.r.t. 2D mean position, conic matrix, + // opacity and RGB of Gaussians from per-pixel loss gradients. + // If we were given precomputed colors and not SHs, use them. + const float* color_ptr = (colors_precomp != nullptr) ? colors_precomp : geomState.rgb; + CHECK_CUDA(BACKWARD::render( + tile_grid, + block, + imgState.ranges, + binningState.point_list, + width, height, + background, + geomState.means2D, + geomState.conic_opacity, + color_ptr, + imgState.accum_alpha, + imgState.n_contrib, + dL_dpix, + (float3*)dL_dmean2D, + (float4*)dL_dconic, + dL_dopacity, + dL_dcolor), debug) + + // Take care of the rest of preprocessing. Was the precomputed covariance + // given to us or a scales/rot pair? If precomputed, pass that. If not, + // use the one we computed ourselves. + const float* cov3D_ptr = (cov3D_precomp != nullptr) ? cov3D_precomp : geomState.cov3D; + CHECK_CUDA(BACKWARD::preprocess(P, D, M, + (float3*)means3D, + radii, + shs, + geomState.clamped, + (glm::vec3*)scales, + (glm::vec4*)rotations, + scale_modifier, + cov3D_ptr, + viewmatrix, + projmatrix, + focal_x, focal_y, + tan_fovx, tan_fovy, + (glm::vec3*)campos, + (float3*)dL_dmean2D, + dL_dconic, + (glm::vec3*)dL_dmean3D, + dL_dcolor, + dL_dcov3D, + dL_dsh, + (glm::vec3*)dL_dscale, + (glm::vec4*)dL_drot), debug) +} \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.h b/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..bc3f0ece7f3eed613be1f95c212b07ac1220b58c --- /dev/null +++ b/submodules/diff-gaussian-rasterization/cuda_rasterizer/rasterizer_impl.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#pragma once + +#include +#include +#include "rasterizer.h" +#include + +namespace CudaRasterizer +{ + template + static void obtain(char*& chunk, T*& ptr, std::size_t count, std::size_t alignment) + { + std::size_t offset = (reinterpret_cast(chunk) + alignment - 1) & ~(alignment - 1); + ptr = reinterpret_cast(offset); + chunk = reinterpret_cast(ptr + count); + } + + struct GeometryState + { + size_t scan_size; + float* depths; + char* scanning_space; + bool* clamped; + int* internal_radii; + float2* means2D; + float* cov3D; + float4* conic_opacity; + float* rgb; + uint32_t* point_offsets; + uint32_t* tiles_touched; + + static GeometryState fromChunk(char*& chunk, size_t P); + }; + + struct ImageState + { + uint2* ranges; + uint32_t* n_contrib; + float* accum_alpha; + + static ImageState fromChunk(char*& chunk, size_t N); + }; + + struct BinningState + { + size_t sorting_size; + uint64_t* point_list_keys_unsorted; + uint64_t* point_list_keys; + uint32_t* point_list_unsorted; + uint32_t* point_list; + char* list_sorting_space; + + static BinningState fromChunk(char*& chunk, size_t P); + }; + + template + size_t required(size_t P) + { + char* size = nullptr; + T::fromChunk(size, P); + return ((size_t)size) + 128; + } +}; \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py b/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bbef37d1f3732c57f229dfa1f2fd542b7be40412 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/diff_gaussian_rasterization/__init__.py @@ -0,0 +1,221 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +from typing import NamedTuple +import torch.nn as nn +import torch +from . import _C + +def cpu_deep_copy_tuple(input_tuple): + copied_tensors = [item.cpu().clone() if isinstance(item, torch.Tensor) else item for item in input_tuple] + return tuple(copied_tensors) + +def rasterize_gaussians( + means3D, + means2D, + sh, + colors_precomp, + opacities, + scales, + rotations, + cov3Ds_precomp, + raster_settings, +): + return _RasterizeGaussians.apply( + means3D, + means2D, + sh, + colors_precomp, + opacities, + scales, + rotations, + cov3Ds_precomp, + raster_settings, + ) + +class _RasterizeGaussians(torch.autograd.Function): + @staticmethod + def forward( + ctx, + means3D, + means2D, + sh, + colors_precomp, + opacities, + scales, + rotations, + cov3Ds_precomp, + raster_settings, + ): + + # Restructure arguments the way that the C++ lib expects them + args = ( + raster_settings.bg, + means3D, + colors_precomp, + opacities, + scales, + rotations, + raster_settings.scale_modifier, + cov3Ds_precomp, + raster_settings.viewmatrix, + raster_settings.projmatrix, + raster_settings.tanfovx, + raster_settings.tanfovy, + raster_settings.image_height, + raster_settings.image_width, + sh, + raster_settings.sh_degree, + raster_settings.campos, + raster_settings.prefiltered, + raster_settings.debug + ) + + # Invoke C++/CUDA rasterizer + if raster_settings.debug: + cpu_args = cpu_deep_copy_tuple(args) # Copy them before they can be corrupted + try: + num_rendered, color, radii, geomBuffer, binningBuffer, imgBuffer = _C.rasterize_gaussians(*args) + except Exception as ex: + torch.save(cpu_args, "snapshot_fw.dump") + print("\nAn error occured in forward. Please forward snapshot_fw.dump for debugging.") + raise ex + else: + num_rendered, color, radii, geomBuffer, binningBuffer, imgBuffer = _C.rasterize_gaussians(*args) + + # Keep relevant tensors for backward + ctx.raster_settings = raster_settings + ctx.num_rendered = num_rendered + ctx.save_for_backward(colors_precomp, means3D, scales, rotations, cov3Ds_precomp, radii, sh, geomBuffer, binningBuffer, imgBuffer) + return color, radii + + @staticmethod + def backward(ctx, grad_out_color, _): + + # Restore necessary values from context + num_rendered = ctx.num_rendered + raster_settings = ctx.raster_settings + colors_precomp, means3D, scales, rotations, cov3Ds_precomp, radii, sh, geomBuffer, binningBuffer, imgBuffer = ctx.saved_tensors + + # Restructure args as C++ method expects them + args = (raster_settings.bg, + means3D, + radii, + colors_precomp, + scales, + rotations, + raster_settings.scale_modifier, + cov3Ds_precomp, + raster_settings.viewmatrix, + raster_settings.projmatrix, + raster_settings.tanfovx, + raster_settings.tanfovy, + grad_out_color, + sh, + raster_settings.sh_degree, + raster_settings.campos, + geomBuffer, + num_rendered, + binningBuffer, + imgBuffer, + raster_settings.debug) + + # Compute gradients for relevant tensors by invoking backward method + if raster_settings.debug: + cpu_args = cpu_deep_copy_tuple(args) # Copy them before they can be corrupted + try: + grad_means2D, grad_colors_precomp, grad_opacities, grad_means3D, grad_cov3Ds_precomp, grad_sh, grad_scales, grad_rotations = _C.rasterize_gaussians_backward(*args) + except Exception as ex: + torch.save(cpu_args, "snapshot_bw.dump") + print("\nAn error occured in backward. Writing snapshot_bw.dump for debugging.\n") + raise ex + else: + grad_means2D, grad_colors_precomp, grad_opacities, grad_means3D, grad_cov3Ds_precomp, grad_sh, grad_scales, grad_rotations = _C.rasterize_gaussians_backward(*args) + + grads = ( + grad_means3D, + grad_means2D, + grad_sh, + grad_colors_precomp, + grad_opacities, + grad_scales, + grad_rotations, + grad_cov3Ds_precomp, + None, + ) + + return grads + +class GaussianRasterizationSettings(NamedTuple): + image_height: int + image_width: int + tanfovx : float + tanfovy : float + bg : torch.Tensor + scale_modifier : float + viewmatrix : torch.Tensor + projmatrix : torch.Tensor + sh_degree : int + campos : torch.Tensor + prefiltered : bool + debug : bool + +class GaussianRasterizer(nn.Module): + def __init__(self, raster_settings): + super().__init__() + self.raster_settings = raster_settings + + def markVisible(self, positions): + # Mark visible points (based on frustum culling for camera) with a boolean + with torch.no_grad(): + raster_settings = self.raster_settings + visible = _C.mark_visible( + positions, + raster_settings.viewmatrix, + raster_settings.projmatrix) + + return visible + + def forward(self, means3D, means2D, opacities, shs = None, colors_precomp = None, scales = None, rotations = None, cov3D_precomp = None): + + raster_settings = self.raster_settings + + if (shs is None and colors_precomp is None) or (shs is not None and colors_precomp is not None): + raise Exception('Please provide excatly one of either SHs or precomputed colors!') + + if ((scales is None or rotations is None) and cov3D_precomp is None) or ((scales is not None or rotations is not None) and cov3D_precomp is not None): + raise Exception('Please provide exactly one of either scale/rotation pair or precomputed 3D covariance!') + + if shs is None: + shs = torch.Tensor([]) + if colors_precomp is None: + colors_precomp = torch.Tensor([]) + + if scales is None: + scales = torch.Tensor([]) + if rotations is None: + rotations = torch.Tensor([]) + if cov3D_precomp is None: + cov3D_precomp = torch.Tensor([]) + + # Invoke C++/CUDA rasterization routine + return rasterize_gaussians( + means3D, + means2D, + shs, + colors_precomp, + opacities, + scales, + rotations, + cov3D_precomp, + raster_settings, + ) + diff --git a/submodules/diff-gaussian-rasterization/ext.cpp b/submodules/diff-gaussian-rasterization/ext.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d768779579761238347972a973fbd1603d44235e --- /dev/null +++ b/submodules/diff-gaussian-rasterization/ext.cpp @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#include +#include "rasterize_points.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("rasterize_gaussians", &RasterizeGaussiansCUDA); + m.def("rasterize_gaussians_backward", &RasterizeGaussiansBackwardCUDA); + m.def("mark_visible", &markVisible); +} \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/rasterize_points.cu b/submodules/diff-gaussian-rasterization/rasterize_points.cu new file mode 100644 index 0000000000000000000000000000000000000000..ddc5cf8b01b478302b00cc6b3dcf68f4891a2b86 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/rasterize_points.cu @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cuda_rasterizer/config.h" +#include "cuda_rasterizer/rasterizer.h" +#include +#include +#include + +std::function resizeFunctional(torch::Tensor& t) { + auto lambda = [&t](size_t N) { + t.resize_({(long long)N}); + return reinterpret_cast(t.contiguous().data_ptr()); + }; + return lambda; +} + +std::tuple +RasterizeGaussiansCUDA( + const torch::Tensor& background, + const torch::Tensor& means3D, + const torch::Tensor& colors, + const torch::Tensor& opacity, + const torch::Tensor& scales, + const torch::Tensor& rotations, + const float scale_modifier, + const torch::Tensor& cov3D_precomp, + const torch::Tensor& viewmatrix, + const torch::Tensor& projmatrix, + const float tan_fovx, + const float tan_fovy, + const int image_height, + const int image_width, + const torch::Tensor& sh, + const int degree, + const torch::Tensor& campos, + const bool prefiltered, + const bool debug) +{ + if (means3D.ndimension() != 2 || means3D.size(1) != 3) { + AT_ERROR("means3D must have dimensions (num_points, 3)"); + } + + const int P = means3D.size(0); + const int H = image_height; + const int W = image_width; + + auto int_opts = means3D.options().dtype(torch::kInt32); + auto float_opts = means3D.options().dtype(torch::kFloat32); + + torch::Tensor out_color = torch::full({NUM_CHANNELS, H, W}, 0.0, float_opts); + torch::Tensor radii = torch::full({P}, 0, means3D.options().dtype(torch::kInt32)); + + torch::Device device(torch::kCUDA); + torch::TensorOptions options(torch::kByte); + torch::Tensor geomBuffer = torch::empty({0}, options.device(device)); + torch::Tensor binningBuffer = torch::empty({0}, options.device(device)); + torch::Tensor imgBuffer = torch::empty({0}, options.device(device)); + std::function geomFunc = resizeFunctional(geomBuffer); + std::function binningFunc = resizeFunctional(binningBuffer); + std::function imgFunc = resizeFunctional(imgBuffer); + + int rendered = 0; + if(P != 0) + { + int M = 0; + if(sh.size(0) != 0) + { + M = sh.size(1); + } + + rendered = CudaRasterizer::Rasterizer::forward( + geomFunc, + binningFunc, + imgFunc, + P, degree, M, + background.contiguous().data(), + W, H, + means3D.contiguous().data(), + sh.contiguous().data_ptr(), + colors.contiguous().data(), + opacity.contiguous().data(), + scales.contiguous().data_ptr(), + scale_modifier, + rotations.contiguous().data_ptr(), + cov3D_precomp.contiguous().data(), + viewmatrix.contiguous().data(), + projmatrix.contiguous().data(), + campos.contiguous().data(), + tan_fovx, + tan_fovy, + prefiltered, + out_color.contiguous().data(), + radii.contiguous().data(), + debug); + } + return std::make_tuple(rendered, out_color, radii, geomBuffer, binningBuffer, imgBuffer); +} + +std::tuple + RasterizeGaussiansBackwardCUDA( + const torch::Tensor& background, + const torch::Tensor& means3D, + const torch::Tensor& radii, + const torch::Tensor& colors, + const torch::Tensor& scales, + const torch::Tensor& rotations, + const float scale_modifier, + const torch::Tensor& cov3D_precomp, + const torch::Tensor& viewmatrix, + const torch::Tensor& projmatrix, + const float tan_fovx, + const float tan_fovy, + const torch::Tensor& dL_dout_color, + const torch::Tensor& sh, + const int degree, + const torch::Tensor& campos, + const torch::Tensor& geomBuffer, + const int R, + const torch::Tensor& binningBuffer, + const torch::Tensor& imageBuffer, + const bool debug) +{ + const int P = means3D.size(0); + const int H = dL_dout_color.size(1); + const int W = dL_dout_color.size(2); + + int M = 0; + if(sh.size(0) != 0) + { + M = sh.size(1); + } + + torch::Tensor dL_dmeans3D = torch::zeros({P, 3}, means3D.options()); + torch::Tensor dL_dmeans2D = torch::zeros({P, 3}, means3D.options()); + torch::Tensor dL_dcolors = torch::zeros({P, NUM_CHANNELS}, means3D.options()); + torch::Tensor dL_dconic = torch::zeros({P, 2, 2}, means3D.options()); + torch::Tensor dL_dopacity = torch::zeros({P, 1}, means3D.options()); + torch::Tensor dL_dcov3D = torch::zeros({P, 6}, means3D.options()); + torch::Tensor dL_dsh = torch::zeros({P, M, 3}, means3D.options()); + torch::Tensor dL_dscales = torch::zeros({P, 3}, means3D.options()); + torch::Tensor dL_drotations = torch::zeros({P, 4}, means3D.options()); + + if(P != 0) + { + CudaRasterizer::Rasterizer::backward(P, degree, M, R, + background.contiguous().data(), + W, H, + means3D.contiguous().data(), + sh.contiguous().data(), + colors.contiguous().data(), + scales.data_ptr(), + scale_modifier, + rotations.data_ptr(), + cov3D_precomp.contiguous().data(), + viewmatrix.contiguous().data(), + projmatrix.contiguous().data(), + campos.contiguous().data(), + tan_fovx, + tan_fovy, + radii.contiguous().data(), + reinterpret_cast(geomBuffer.contiguous().data_ptr()), + reinterpret_cast(binningBuffer.contiguous().data_ptr()), + reinterpret_cast(imageBuffer.contiguous().data_ptr()), + dL_dout_color.contiguous().data(), + dL_dmeans2D.contiguous().data(), + dL_dconic.contiguous().data(), + dL_dopacity.contiguous().data(), + dL_dcolors.contiguous().data(), + dL_dmeans3D.contiguous().data(), + dL_dcov3D.contiguous().data(), + dL_dsh.contiguous().data(), + dL_dscales.contiguous().data(), + dL_drotations.contiguous().data(), + debug); + } + + return std::make_tuple(dL_dmeans2D, dL_dcolors, dL_dopacity, dL_dmeans3D, dL_dcov3D, dL_dsh, dL_dscales, dL_drotations); +} + +torch::Tensor markVisible( + torch::Tensor& means3D, + torch::Tensor& viewmatrix, + torch::Tensor& projmatrix) +{ + const int P = means3D.size(0); + + torch::Tensor present = torch::full({P}, false, means3D.options().dtype(at::kBool)); + + if(P != 0) + { + CudaRasterizer::Rasterizer::markVisible(P, + means3D.contiguous().data(), + viewmatrix.contiguous().data(), + projmatrix.contiguous().data(), + present.contiguous().data()); + } + + return present; +} \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/rasterize_points.h b/submodules/diff-gaussian-rasterization/rasterize_points.h new file mode 100644 index 0000000000000000000000000000000000000000..9023d994bfcf4cc2c78dbd80ea3d9424268274b2 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/rasterize_points.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2023, Inria + * GRAPHDECO research group, https://team.inria.fr/graphdeco + * All rights reserved. + * + * This software is free for non-commercial, research and evaluation use + * under the terms of the LICENSE.md file. + * + * For inquiries contact george.drettakis@inria.fr + */ + +#pragma once +#include +#include +#include +#include + +std::tuple +RasterizeGaussiansCUDA( + const torch::Tensor& background, + const torch::Tensor& means3D, + const torch::Tensor& colors, + const torch::Tensor& opacity, + const torch::Tensor& scales, + const torch::Tensor& rotations, + const float scale_modifier, + const torch::Tensor& cov3D_precomp, + const torch::Tensor& viewmatrix, + const torch::Tensor& projmatrix, + const float tan_fovx, + const float tan_fovy, + const int image_height, + const int image_width, + const torch::Tensor& sh, + const int degree, + const torch::Tensor& campos, + const bool prefiltered, + const bool debug); + +std::tuple + RasterizeGaussiansBackwardCUDA( + const torch::Tensor& background, + const torch::Tensor& means3D, + const torch::Tensor& radii, + const torch::Tensor& colors, + const torch::Tensor& scales, + const torch::Tensor& rotations, + const float scale_modifier, + const torch::Tensor& cov3D_precomp, + const torch::Tensor& viewmatrix, + const torch::Tensor& projmatrix, + const float tan_fovx, + const float tan_fovy, + const torch::Tensor& dL_dout_color, + const torch::Tensor& sh, + const int degree, + const torch::Tensor& campos, + const torch::Tensor& geomBuffer, + const int R, + const torch::Tensor& binningBuffer, + const torch::Tensor& imageBuffer, + const bool debug); + +torch::Tensor markVisible( + torch::Tensor& means3D, + torch::Tensor& viewmatrix, + torch::Tensor& projmatrix); \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/setup.py b/submodules/diff-gaussian-rasterization/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..bb7220d2934d006ea756e35ecb0f391403b43d64 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/setup.py @@ -0,0 +1,34 @@ +# +# Copyright (C) 2023, Inria +# GRAPHDECO research group, https://team.inria.fr/graphdeco +# All rights reserved. +# +# This software is free for non-commercial, research and evaluation use +# under the terms of the LICENSE.md file. +# +# For inquiries contact george.drettakis@inria.fr +# + +from setuptools import setup +from torch.utils.cpp_extension import CUDAExtension, BuildExtension +import os +os.path.dirname(os.path.abspath(__file__)) + +setup( + name="diff_gaussian_rasterization", + packages=['diff_gaussian_rasterization'], + ext_modules=[ + CUDAExtension( + name="diff_gaussian_rasterization._C", + sources=[ + "cuda_rasterizer/rasterizer_impl.cu", + "cuda_rasterizer/forward.cu", + "cuda_rasterizer/backward.cu", + "rasterize_points.cu", + "ext.cpp"], + extra_compile_args={"nvcc": ["-I" + os.path.join(os.path.dirname(os.path.abspath(__file__)), "third_party/glm/")]}) + ], + cmdclass={ + 'build_ext': BuildExtension + } +) diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/.appveyor.yml b/submodules/diff-gaussian-rasterization/third_party/glm/.appveyor.yml new file mode 100644 index 0000000000000000000000000000000000000000..5ce6028184b763af011bd0e325abf45a3370e182 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/.appveyor.yml @@ -0,0 +1,92 @@ +shallow_clone: true + +platform: + - x86 + - x64 + +configuration: + - Debug + - Release + +image: + - Visual Studio 2013 + - Visual Studio 2015 + - Visual Studio 2017 + - Visual Studio 2019 + +environment: + matrix: + - GLM_ARGUMENTS: -DGLM_TEST_FORCE_PURE=ON + - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_SSE2=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON + - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON + - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_14=ON + - GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_17=ON + +matrix: + exclude: + - image: Visual Studio 2013 + GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON + - image: Visual Studio 2013 + GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_14=ON + - image: Visual Studio 2013 + GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_17=ON + - image: Visual Studio 2013 + configuration: Debug + - image: Visual Studio 2015 + GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_SSE2=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON + - image: Visual Studio 2015 + GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_14=ON + - image: Visual Studio 2015 + GLM_ARGUMENTS: -DGLM_TEST_ENABLE_SIMD_AVX=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_CXX_17=ON + - image: Visual Studio 2015 + platform: x86 + - image: Visual Studio 2015 + configuration: Debug + - image: Visual Studio 2017 + platform: x86 + - image: Visual Studio 2017 + configuration: Debug + - image: Visual Studio 2019 + platform: x64 + +branches: + only: + - master + +before_build: + - ps: | + mkdir build + cd build + + if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2013") { + $env:generator="Visual Studio 12 2013" + } + if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2015") { + $env:generator="Visual Studio 14 2015" + } + if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2017") { + $env:generator="Visual Studio 15 2017" + } + if ("$env:APPVEYOR_JOB_NAME" -match "Image: Visual Studio 2019") { + $env:generator="Visual Studio 16 2019" + } + if ($env:PLATFORM -eq "x64") { + $env:generator="$env:generator Win64" + } + echo generator="$env:generator" + cmake .. -G "$env:generator" -DCMAKE_INSTALL_PREFIX="$env:APPVEYOR_BUILD_FOLDER/install" -DGLM_QUIET=ON -DGLM_TEST_ENABLE=ON "$env:GLM_ARGUMENTS" + +build_script: + - cmake --build . --parallel --config %CONFIGURATION% -- /m /v:minimal + - cmake --build . --target install --parallel --config %CONFIGURATION% -- /m /v:minimal + +test_script: + - ctest --parallel 4 --verbose -C %CONFIGURATION% + - cd .. + - ps: | + mkdir build_test_cmake + cd build_test_cmake + cmake ..\test\cmake\ -G "$env:generator" -DCMAKE_PREFIX_PATH="$env:APPVEYOR_BUILD_FOLDER/install" + - cmake --build . --parallel --config %CONFIGURATION% -- /m /v:minimal + +deploy: off diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/.gitignore b/submodules/diff-gaussian-rasterization/third_party/glm/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9dbd6d8c0b89bdc20124cfec9766821a252fbd67 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/.gitignore @@ -0,0 +1,61 @@ +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +# CMake +CMakeCache.txt +CMakeFiles +cmake_install.cmake +install_manifest.txt +*.cmake +!glmConfig.cmake +!glmConfig-version.cmake +# ^ May need to add future .cmake files as exceptions + +# Test logs +Testing/* + +# Test input +test/gtc/*.dds + +# Project Files +Makefile +*.cbp +*.user + +# Misc. +*.log + +# local build(s) +build* + +/.vs +/.vscode +/CMakeSettings.json +.DS_Store +*.swp diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/.travis.yml b/submodules/diff-gaussian-rasterization/third_party/glm/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..1660ec0c59d213b94ff7925503b2caff73567aeb --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/.travis.yml @@ -0,0 +1,388 @@ +language: cpp + +branches: + only: + - master + - stable + +jobs: + include: + - name: "Xcode 7.3 C++98 pure release" + os: osx + osx_image: xcode7.3 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_FORCE_PURE=ON" + + - name: "Xcode 7.3 C++98 sse2 release" + os: osx + osx_image: xcode7.3 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON" + + - name: "Xcode 7.3 C++98 ms release" + os: osx + osx_image: xcode7.3 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON" + + - name: "XCode 7.3 C++11 pure release" + os: osx + osx_image: xcode7.3 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_FORCE_PURE=ON" + + - name: "XCode 7.3 C++11 sse2 release" + os: osx + osx_image: xcode7.3 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON" + + - name: "XCode 10.3 C++11 sse2 release" + os: osx + osx_image: xcode10.3 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON" + + - name: "XCode 12.2 C++11 sse2 release" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++11 sse2 debug" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE3=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++11 avx debug" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++14 avx debug" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++14 pure debug" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++17 pure debug" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++17 sse2 debug" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++17 sse2 release" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "XCode 12.2 C++17 avx release" + os: osx + osx_image: xcode12.2 + env: + - MATRIX_EVAL="" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 4.9 C++98 pure release" + os: linux + dist: Xenial + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-4.9 + env: + - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 4.9 C++98 pure debug" + os: linux + dist: Xenial + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-4.9 + env: + - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 4.9 C++98 ms debug" + os: linux + dist: Xenial + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-4.9 + env: + - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_98=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 4.9 C++11 ms debug" + os: linux + dist: Xenial + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-4.9 + env: + - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 4.9 C++11 pure debug" + os: linux + dist: Xenial + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-4.9 + env: + - MATRIX_EVAL="CC=gcc-4.9 && CXX=g++-4.9" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_11=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 6 C++14 pure debug" + os: linux + dist: bionic + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-6 + env: + - MATRIX_EVAL="CC=gcc-6 && CXX=g++-6" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 6 C++14 ms debug" + os: linux + dist: bionic + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-6 + env: + - MATRIX_EVAL="CC=gcc-6 && CXX=g++-6" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 7 C++17 ms debug" + os: linux + dist: bionic + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-7 + env: + - MATRIX_EVAL="CC=gcc-7 && CXX=g++-7" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 7 C++17 pure debug" + os: linux + dist: bionic + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-7 + env: + - MATRIX_EVAL="CC=gcc-7 && CXX=g++-7" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 10 C++17 pure debug" + os: linux + dist: bionic + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-10 + env: + - MATRIX_EVAL="CC=gcc-10 && CXX=g++-10" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "GCC 10 C++17 pure release" + os: linux + dist: bionic + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-10 + env: + - MATRIX_EVAL="CC=gcc-10 && CXX=g++-10" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++14 pure release" + os: linux + dist: Xenial + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++14 pure debug" + os: linux + dist: Xenial + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++14 sse2 debug" + os: linux + dist: Xenial + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++14 sse2 debug" + os: linux + dist: focal + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_14=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++17 sse2 debug" + os: linux + dist: focal + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_SSE2=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++17 avx2 debug" + os: linux + dist: focal + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_ENABLE_LANG_EXTENSIONS=ON -DGLM_TEST_ENABLE_SIMD_AVX2=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++17 pure debug" + os: linux + dist: focal + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Debug -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + + - name: "Clang C++17 pure release" + os: linux + dist: focal + env: + - MATRIX_EVAL="CC=clang && CXX=clang++" + - CMAKE_BUILD_ENV="-DCMAKE_BUILD_TYPE=Release -DGLM_TEST_ENABLE=ON -DGLM_TEST_ENABLE_CXX_17=ON -DGLM_TEST_FORCE_PURE=ON" + - CTEST_ENV="--parallel 4 --output-on-failure" + - CMAKE_ENV="--parallel" + +before_script: + - cmake --version + - eval "${MATRIX_EVAL}" + +script: + - ${CC} --version + - mkdir ./build + - cd ./build + - cmake -DCMAKE_INSTALL_PREFIX=$TRAVIS_BUILD_DIR/install -DCMAKE_CXX_COMPILER=$COMPILER ${CMAKE_BUILD_ENV} .. + - cmake --build . ${CMAKE_ENV} + - ctest ${CTEST_ENV} + - cmake --build . --target install ${CMAKE_ENV} + - cd $TRAVIS_BUILD_DIR + - mkdir ./build_test_cmake + - cd ./build_test_cmake + - cmake -DCMAKE_CXX_COMPILER=$COMPILER $TRAVIS_BUILD_DIR/test/cmake/ -DCMAKE_PREFIX_PATH=$TRAVIS_BUILD_DIR/install + - cmake --build . + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/CMakeLists.txt b/submodules/diff-gaussian-rasterization/third_party/glm/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b7641a28699119f1a87e563c8dc1b449dd6e6b8f --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/CMakeLists.txt @@ -0,0 +1,45 @@ +cmake_minimum_required(VERSION 3.2 FATAL_ERROR) +cmake_policy(VERSION 3.2) + + +file(READ "glm/detail/setup.hpp" GLM_SETUP_FILE) +string(REGEX MATCH "#define[ ]+GLM_VERSION_MAJOR[ ]+([0-9]+)" _ ${GLM_SETUP_FILE}) +set(GLM_VERSION_MAJOR "${CMAKE_MATCH_1}") +string(REGEX MATCH "#define[ ]+GLM_VERSION_MINOR[ ]+([0-9]+)" _ ${GLM_SETUP_FILE}) +set(GLM_VERSION_MINOR "${CMAKE_MATCH_1}") +string(REGEX MATCH "#define[ ]+GLM_VERSION_PATCH[ ]+([0-9]+)" _ ${GLM_SETUP_FILE}) +set(GLM_VERSION_PATCH "${CMAKE_MATCH_1}") +string(REGEX MATCH "#define[ ]+GLM_VERSION_REVISION[ ]+([0-9]+)" _ ${GLM_SETUP_FILE}) +set(GLM_VERSION_REVISION "${CMAKE_MATCH_1}") + +set(GLM_VERSION ${GLM_VERSION_MAJOR}.${GLM_VERSION_MINOR}.${GLM_VERSION_PATCH}.${GLM_VERSION_REVISION}) +project(glm VERSION ${GLM_VERSION} LANGUAGES CXX) +message(STATUS "GLM: Version " ${GLM_VERSION}) + +add_subdirectory(glm) +add_library(glm::glm ALIAS glm) + +if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + + include(CPack) + install(DIRECTORY glm DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} PATTERN "CMakeLists.txt" EXCLUDE) + install(EXPORT glm FILE glmConfig.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/glm NAMESPACE glm::) + include(CMakePackageConfigHelpers) + write_basic_package_version_file("glmConfigVersion.cmake" COMPATIBILITY AnyNewerVersion) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/glmConfigVersion.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/glm) + + include(CTest) + if(BUILD_TESTING) + add_subdirectory(test) + endif() + +endif(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + +if (NOT TARGET uninstall) +configure_file(cmake/cmake_uninstall.cmake.in + cmake_uninstall.cmake IMMEDIATE @ONLY) + +add_custom_target(uninstall + "${CMAKE_COMMAND}" -P + "${CMAKE_BINARY_DIR}/cmake_uninstall.cmake") +endif() diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/cmake/cmake_uninstall.cmake.in b/submodules/diff-gaussian-rasterization/third_party/glm/cmake/cmake_uninstall.cmake.in new file mode 100644 index 0000000000000000000000000000000000000000..c2d34d4796d9e2abd8aa0e7aca99a558a1e0366b --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/cmake/cmake_uninstall.cmake.in @@ -0,0 +1,21 @@ +if(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt") + message(FATAL_ERROR "Cannot find install manifest: @CMAKE_BINARY_DIR@/install_manifest.txt") +endif() + +file(READ "@CMAKE_BINARY_DIR@/install_manifest.txt" files) +string(REGEX REPLACE "\n" ";" files "${files}") +foreach(file ${files}) + message(STATUS "Uninstalling $ENV{DESTDIR}${file}") + if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + exec_program( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + if(NOT "${rm_retval}" STREQUAL 0) + message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}") + endif() + else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") + message(STATUS "File $ENV{DESTDIR}${file} does not exist.") + endif() +endforeach() diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/copying.txt b/submodules/diff-gaussian-rasterization/third_party/glm/copying.txt new file mode 100644 index 0000000000000000000000000000000000000000..779c32fb9afef1180798d46f08b4373e427ac693 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/copying.txt @@ -0,0 +1,54 @@ +================================================================================ +OpenGL Mathematics (GLM) +-------------------------------------------------------------------------------- +GLM is licensed under The Happy Bunny License or MIT License + +================================================================================ +The Happy Bunny License (Modified MIT License) +-------------------------------------------------------------------------------- +Copyright (c) 2005 - G-Truc Creation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Restrictions: + By making use of the Software for military purposes, you choose to make a + Bunny unhappy. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +================================================================================ +The MIT License +-------------------------------------------------------------------------------- +Copyright (c) 2005 - G-Truc Creation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00001_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00001_source.html new file mode 100644 index 0000000000000000000000000000000000000000..36d74cebfc73c0f48f1afa7025923f58257a6c14 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00001_source.html @@ -0,0 +1,493 @@ + + + + + + +0.9.9 API documentation: _features.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
_features.hpp
+
+
+
1 #pragma once
+
2 
+
3 // #define GLM_CXX98_EXCEPTIONS
+
4 // #define GLM_CXX98_RTTI
+
5 
+
6 // #define GLM_CXX11_RVALUE_REFERENCES
+
7 // Rvalue references - GCC 4.3
+
8 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html
+
9 
+
10 // GLM_CXX11_TRAILING_RETURN
+
11 // Rvalue references for *this - GCC not supported
+
12 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm
+
13 
+
14 // GLM_CXX11_NONSTATIC_MEMBER_INIT
+
15 // Initialization of class objects by rvalues - GCC any
+
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html
+
17 
+
18 // GLM_CXX11_NONSTATIC_MEMBER_INIT
+
19 // Non-static data member initializers - GCC 4.7
+
20 // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm
+
21 
+
22 // #define GLM_CXX11_VARIADIC_TEMPLATE
+
23 // Variadic templates - GCC 4.3
+
24 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf
+
25 
+
26 //
+
27 // Extending variadic template template parameters - GCC 4.4
+
28 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf
+
29 
+
30 // #define GLM_CXX11_GENERALIZED_INITIALIZERS
+
31 // Initializer lists - GCC 4.4
+
32 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm
+
33 
+
34 // #define GLM_CXX11_STATIC_ASSERT
+
35 // Static assertions - GCC 4.3
+
36 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html
+
37 
+
38 // #define GLM_CXX11_AUTO_TYPE
+
39 // auto-typed variables - GCC 4.4
+
40 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf
+
41 
+
42 // #define GLM_CXX11_AUTO_TYPE
+
43 // Multi-declarator auto - GCC 4.4
+
44 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf
+
45 
+
46 // #define GLM_CXX11_AUTO_TYPE
+
47 // Removal of auto as a storage-class specifier - GCC 4.4
+
48 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm
+
49 
+
50 // #define GLM_CXX11_AUTO_TYPE
+
51 // New function declarator syntax - GCC 4.4
+
52 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm
+
53 
+
54 // #define GLM_CXX11_LAMBDAS
+
55 // New wording for C++0x lambdas - GCC 4.5
+
56 // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf
+
57 
+
58 // #define GLM_CXX11_DECLTYPE
+
59 // Declared type of an expression - GCC 4.3
+
60 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf
+
61 
+
62 //
+
63 // Right angle brackets - GCC 4.3
+
64 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html
+
65 
+
66 //
+
67 // Default template arguments for function templates DR226 GCC 4.3
+
68 // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226
+
69 
+
70 //
+
71 // Solving the SFINAE problem for expressions DR339 GCC 4.4
+
72 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html
+
73 
+
74 // #define GLM_CXX11_ALIAS_TEMPLATE
+
75 // Template aliases N2258 GCC 4.7
+
76 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf
+
77 
+
78 //
+
79 // Extern templates N1987 Yes
+
80 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm
+
81 
+
82 // #define GLM_CXX11_NULLPTR
+
83 // Null pointer constant N2431 GCC 4.6
+
84 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf
+
85 
+
86 // #define GLM_CXX11_STRONG_ENUMS
+
87 // Strongly-typed enums N2347 GCC 4.4
+
88 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf
+
89 
+
90 //
+
91 // Forward declarations for enums N2764 GCC 4.6
+
92 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf
+
93 
+
94 //
+
95 // Generalized attributes N2761 GCC 4.8
+
96 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf
+
97 
+
98 //
+
99 // Generalized constant expressions N2235 GCC 4.6
+
100 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf
+
101 
+
102 //
+
103 // Alignment support N2341 GCC 4.8
+
104 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf
+
105 
+
106 // #define GLM_CXX11_DELEGATING_CONSTRUCTORS
+
107 // Delegating constructors N1986 GCC 4.7
+
108 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf
+
109 
+
110 //
+
111 // Inheriting constructors N2540 GCC 4.8
+
112 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm
+
113 
+
114 // #define GLM_CXX11_EXPLICIT_CONVERSIONS
+
115 // Explicit conversion operators N2437 GCC 4.5
+
116 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf
+
117 
+
118 //
+
119 // New character types N2249 GCC 4.4
+
120 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html
+
121 
+
122 //
+
123 // Unicode string literals N2442 GCC 4.5
+
124 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm
+
125 
+
126 //
+
127 // Raw string literals N2442 GCC 4.5
+
128 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm
+
129 
+
130 //
+
131 // Universal character name literals N2170 GCC 4.5
+
132 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html
+
133 
+
134 // #define GLM_CXX11_USER_LITERALS
+
135 // User-defined literals N2765 GCC 4.7
+
136 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf
+
137 
+
138 //
+
139 // Standard Layout Types N2342 GCC 4.5
+
140 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm
+
141 
+
142 // #define GLM_CXX11_DEFAULTED_FUNCTIONS
+
143 // #define GLM_CXX11_DELETED_FUNCTIONS
+
144 // Defaulted and deleted functions N2346 GCC 4.4
+
145 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm
+
146 
+
147 //
+
148 // Extended friend declarations N1791 GCC 4.7
+
149 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf
+
150 
+
151 //
+
152 // Extending sizeof N2253 GCC 4.4
+
153 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html
+
154 
+
155 // #define GLM_CXX11_INLINE_NAMESPACES
+
156 // Inline namespaces N2535 GCC 4.4
+
157 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm
+
158 
+
159 // #define GLM_CXX11_UNRESTRICTED_UNIONS
+
160 // Unrestricted unions N2544 GCC 4.6
+
161 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf
+
162 
+
163 // #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS
+
164 // Local and unnamed types as template arguments N2657 GCC 4.5
+
165 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm
+
166 
+
167 // #define GLM_CXX11_RANGE_FOR
+
168 // Range-based for N2930 GCC 4.6
+
169 // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html
+
170 
+
171 // #define GLM_CXX11_OVERRIDE_CONTROL
+
172 // Explicit virtual overrides N2928 N3206 N3272 GCC 4.7
+
173 // http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm
+
174 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm
+
175 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm
+
176 
+
177 //
+
178 // Minimal support for garbage collection and reachability-based leak detection N2670 No
+
179 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm
+
180 
+
181 // #define GLM_CXX11_NOEXCEPT
+
182 // Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only)
+
183 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html
+
184 
+
185 //
+
186 // Defining move special member functions N3053 GCC 4.6
+
187 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html
+
188 
+
189 //
+
190 // Sequence points N2239 Yes
+
191 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html
+
192 
+
193 //
+
194 // Atomic operations N2427 GCC 4.4
+
195 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html
+
196 
+
197 //
+
198 // Strong Compare and Exchange N2748 GCC 4.5
+
199 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
+
200 
+
201 //
+
202 // Bidirectional Fences N2752 GCC 4.8
+
203 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm
+
204 
+
205 //
+
206 // Memory model N2429 GCC 4.8
+
207 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm
+
208 
+
209 //
+
210 // Data-dependency ordering: atomics and memory model N2664 GCC 4.4
+
211 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm
+
212 
+
213 //
+
214 // Propagating exceptions N2179 GCC 4.4
+
215 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html
+
216 
+
217 //
+
218 // Abandoning a process and at_quick_exit N2440 GCC 4.8
+
219 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm
+
220 
+
221 //
+
222 // Allow atomics use in signal handlers N2547 Yes
+
223 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm
+
224 
+
225 //
+
226 // Thread-local storage N2659 GCC 4.8
+
227 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm
+
228 
+
229 //
+
230 // Dynamic initialization and destruction with concurrency N2660 GCC 4.3
+
231 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm
+
232 
+
233 //
+
234 // __func__ predefined identifier N2340 GCC 4.3
+
235 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm
+
236 
+
237 //
+
238 // C99 preprocessor N1653 GCC 4.3
+
239 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm
+
240 
+
241 //
+
242 // long long N1811 GCC 4.3
+
243 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf
+
244 
+
245 //
+
246 // Extended integral types N1988 Yes
+
247 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf
+
248 
+
249 #if(GLM_COMPILER & GLM_COMPILER_GCC)
+
250 
+
251 # define GLM_CXX11_STATIC_ASSERT
+
252 
+
253 #elif(GLM_COMPILER & GLM_COMPILER_CLANG)
+
254 # if(__has_feature(cxx_exceptions))
+
255 # define GLM_CXX98_EXCEPTIONS
+
256 # endif
+
257 
+
258 # if(__has_feature(cxx_rtti))
+
259 # define GLM_CXX98_RTTI
+
260 # endif
+
261 
+
262 # if(__has_feature(cxx_access_control_sfinae))
+
263 # define GLM_CXX11_ACCESS_CONTROL_SFINAE
+
264 # endif
+
265 
+
266 # if(__has_feature(cxx_alias_templates))
+
267 # define GLM_CXX11_ALIAS_TEMPLATE
+
268 # endif
+
269 
+
270 # if(__has_feature(cxx_alignas))
+
271 # define GLM_CXX11_ALIGNAS
+
272 # endif
+
273 
+
274 # if(__has_feature(cxx_attributes))
+
275 # define GLM_CXX11_ATTRIBUTES
+
276 # endif
+
277 
+
278 # if(__has_feature(cxx_constexpr))
+
279 # define GLM_CXX11_CONSTEXPR
+
280 # endif
+
281 
+
282 # if(__has_feature(cxx_decltype))
+
283 # define GLM_CXX11_DECLTYPE
+
284 # endif
+
285 
+
286 # if(__has_feature(cxx_default_function_template_args))
+
287 # define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS
+
288 # endif
+
289 
+
290 # if(__has_feature(cxx_defaulted_functions))
+
291 # define GLM_CXX11_DEFAULTED_FUNCTIONS
+
292 # endif
+
293 
+
294 # if(__has_feature(cxx_delegating_constructors))
+
295 # define GLM_CXX11_DELEGATING_CONSTRUCTORS
+
296 # endif
+
297 
+
298 # if(__has_feature(cxx_deleted_functions))
+
299 # define GLM_CXX11_DELETED_FUNCTIONS
+
300 # endif
+
301 
+
302 # if(__has_feature(cxx_explicit_conversions))
+
303 # define GLM_CXX11_EXPLICIT_CONVERSIONS
+
304 # endif
+
305 
+
306 # if(__has_feature(cxx_generalized_initializers))
+
307 # define GLM_CXX11_GENERALIZED_INITIALIZERS
+
308 # endif
+
309 
+
310 # if(__has_feature(cxx_implicit_moves))
+
311 # define GLM_CXX11_IMPLICIT_MOVES
+
312 # endif
+
313 
+
314 # if(__has_feature(cxx_inheriting_constructors))
+
315 # define GLM_CXX11_INHERITING_CONSTRUCTORS
+
316 # endif
+
317 
+
318 # if(__has_feature(cxx_inline_namespaces))
+
319 # define GLM_CXX11_INLINE_NAMESPACES
+
320 # endif
+
321 
+
322 # if(__has_feature(cxx_lambdas))
+
323 # define GLM_CXX11_LAMBDAS
+
324 # endif
+
325 
+
326 # if(__has_feature(cxx_local_type_template_args))
+
327 # define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS
+
328 # endif
+
329 
+
330 # if(__has_feature(cxx_noexcept))
+
331 # define GLM_CXX11_NOEXCEPT
+
332 # endif
+
333 
+
334 # if(__has_feature(cxx_nonstatic_member_init))
+
335 # define GLM_CXX11_NONSTATIC_MEMBER_INIT
+
336 # endif
+
337 
+
338 # if(__has_feature(cxx_nullptr))
+
339 # define GLM_CXX11_NULLPTR
+
340 # endif
+
341 
+
342 # if(__has_feature(cxx_override_control))
+
343 # define GLM_CXX11_OVERRIDE_CONTROL
+
344 # endif
+
345 
+
346 # if(__has_feature(cxx_reference_qualified_functions))
+
347 # define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS
+
348 # endif
+
349 
+
350 # if(__has_feature(cxx_range_for))
+
351 # define GLM_CXX11_RANGE_FOR
+
352 # endif
+
353 
+
354 # if(__has_feature(cxx_raw_string_literals))
+
355 # define GLM_CXX11_RAW_STRING_LITERALS
+
356 # endif
+
357 
+
358 # if(__has_feature(cxx_rvalue_references))
+
359 # define GLM_CXX11_RVALUE_REFERENCES
+
360 # endif
+
361 
+
362 # if(__has_feature(cxx_static_assert))
+
363 # define GLM_CXX11_STATIC_ASSERT
+
364 # endif
+
365 
+
366 # if(__has_feature(cxx_auto_type))
+
367 # define GLM_CXX11_AUTO_TYPE
+
368 # endif
+
369 
+
370 # if(__has_feature(cxx_strong_enums))
+
371 # define GLM_CXX11_STRONG_ENUMS
+
372 # endif
+
373 
+
374 # if(__has_feature(cxx_trailing_return))
+
375 # define GLM_CXX11_TRAILING_RETURN
+
376 # endif
+
377 
+
378 # if(__has_feature(cxx_unicode_literals))
+
379 # define GLM_CXX11_UNICODE_LITERALS
+
380 # endif
+
381 
+
382 # if(__has_feature(cxx_unrestricted_unions))
+
383 # define GLM_CXX11_UNRESTRICTED_UNIONS
+
384 # endif
+
385 
+
386 # if(__has_feature(cxx_user_literals))
+
387 # define GLM_CXX11_USER_LITERALS
+
388 # endif
+
389 
+
390 # if(__has_feature(cxx_variadic_templates))
+
391 # define GLM_CXX11_VARIADIC_TEMPLATES
+
392 # endif
+
393 
+
394 #endif//(GLM_COMPILER & GLM_COMPILER_CLANG)
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00002_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00002_source.html new file mode 100644 index 0000000000000000000000000000000000000000..b38783549b04b8e60129273f6e6c2c926bc778c9 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00002_source.html @@ -0,0 +1,121 @@ + + + + + + +0.9.9 API documentation: _fixes.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
_fixes.hpp
+
+
+
1 #include <cmath>
+
2 
+
4 #ifdef max
+
5 #undef max
+
6 #endif
+
7 
+
9 #ifdef min
+
10 #undef min
+
11 #endif
+
12 
+
14 #ifdef isnan
+
15 #undef isnan
+
16 #endif
+
17 
+
19 #ifdef isinf
+
20 #undef isinf
+
21 #endif
+
22 
+
24 #ifdef log2
+
25 #undef log2
+
26 #endif
+
27 
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00003_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00003_source.html new file mode 100644 index 0000000000000000000000000000000000000000..4e90ac88bb08d7430ad6972a81c3c8f26b49319c --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00003_source.html @@ -0,0 +1,182 @@ + + + + + + +0.9.9 API documentation: _noise.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
_noise.hpp
+
+
+
1 #pragma once
+
2 
+
3 #include "../common.hpp"
+
4 
+
5 namespace glm{
+
6 namespace detail
+
7 {
+
8  template<typename T>
+
9  GLM_FUNC_QUALIFIER T mod289(T const& x)
+
10  {
+
11  return x - floor(x * (static_cast<T>(1.0) / static_cast<T>(289.0))) * static_cast<T>(289.0);
+
12  }
+
13 
+
14  template<typename T>
+
15  GLM_FUNC_QUALIFIER T permute(T const& x)
+
16  {
+
17  return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+
18  }
+
19 
+
20  template<typename T, qualifier Q>
+
21  GLM_FUNC_QUALIFIER vec<2, T, Q> permute(vec<2, T, Q> const& x)
+
22  {
+
23  return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+
24  }
+
25 
+
26  template<typename T, qualifier Q>
+
27  GLM_FUNC_QUALIFIER vec<3, T, Q> permute(vec<3, T, Q> const& x)
+
28  {
+
29  return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+
30  }
+
31 
+
32  template<typename T, qualifier Q>
+
33  GLM_FUNC_QUALIFIER vec<4, T, Q> permute(vec<4, T, Q> const& x)
+
34  {
+
35  return mod289(((x * static_cast<T>(34)) + static_cast<T>(1)) * x);
+
36  }
+
37 
+
38  template<typename T>
+
39  GLM_FUNC_QUALIFIER T taylorInvSqrt(T const& r)
+
40  {
+
41  return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+
42  }
+
43 
+
44  template<typename T, qualifier Q>
+
45  GLM_FUNC_QUALIFIER vec<2, T, Q> taylorInvSqrt(vec<2, T, Q> const& r)
+
46  {
+
47  return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+
48  }
+
49 
+
50  template<typename T, qualifier Q>
+
51  GLM_FUNC_QUALIFIER vec<3, T, Q> taylorInvSqrt(vec<3, T, Q> const& r)
+
52  {
+
53  return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+
54  }
+
55 
+
56  template<typename T, qualifier Q>
+
57  GLM_FUNC_QUALIFIER vec<4, T, Q> taylorInvSqrt(vec<4, T, Q> const& r)
+
58  {
+
59  return static_cast<T>(1.79284291400159) - static_cast<T>(0.85373472095314) * r;
+
60  }
+
61 
+
62  template<typename T, qualifier Q>
+
63  GLM_FUNC_QUALIFIER vec<2, T, Q> fade(vec<2, T, Q> const& t)
+
64  {
+
65  return (t * t * t) * (t * (t * static_cast<T>(6) - static_cast<T>(15)) + static_cast<T>(10));
+
66  }
+
67 
+
68  template<typename T, qualifier Q>
+
69  GLM_FUNC_QUALIFIER vec<3, T, Q> fade(vec<3, T, Q> const& t)
+
70  {
+
71  return (t * t * t) * (t * (t * static_cast<T>(6) - static_cast<T>(15)) + static_cast<T>(10));
+
72  }
+
73 
+
74  template<typename T, qualifier Q>
+
75  GLM_FUNC_QUALIFIER vec<4, T, Q> fade(vec<4, T, Q> const& t)
+
76  {
+
77  return (t * t * t) * (t * (t * static_cast<T>(6) - static_cast<T>(15)) + static_cast<T>(10));
+
78  }
+
79 }//namespace detail
+
80 }//namespace glm
+
81 
+
GLM_FUNC_DECL vec< L, T, Q > floor(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer that is less then or equal to x.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00004_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00004_source.html new file mode 100644 index 0000000000000000000000000000000000000000..a2a5ebb623fe88841ada1abf10b81e7714ed72ce --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00004_source.html @@ -0,0 +1,905 @@ + + + + + + +0.9.9 API documentation: _swizzle.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
_swizzle.hpp
+
+
+
1 #pragma once
+
2 
+
3 namespace glm{
+
4 namespace detail
+
5 {
+
6  // Internal class for implementing swizzle operators
+
7  template<typename T, int N>
+
8  struct _swizzle_base0
+
9  {
+
10  protected:
+
11  GLM_FUNC_QUALIFIER T& elem(size_t i){ return (reinterpret_cast<T*>(_buffer))[i]; }
+
12  GLM_FUNC_QUALIFIER T const& elem(size_t i) const{ return (reinterpret_cast<const T*>(_buffer))[i]; }
+
13 
+
14  // Use an opaque buffer to *ensure* the compiler doesn't call a constructor.
+
15  // The size 1 buffer is assumed to aligned to the actual members so that the
+
16  // elem()
+
17  char _buffer[1];
+
18  };
+
19 
+
20  template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3, bool Aligned>
+
21  struct _swizzle_base1 : public _swizzle_base0<T, N>
+
22  {
+
23  };
+
24 
+
25  template<typename T, qualifier Q, int E0, int E1, bool Aligned>
+
26  struct _swizzle_base1<2, T, Q, E0,E1,-1,-2, Aligned> : public _swizzle_base0<T, 2>
+
27  {
+
28  GLM_FUNC_QUALIFIER vec<2, T, Q> operator ()() const { return vec<2, T, Q>(this->elem(E0), this->elem(E1)); }
+
29  };
+
30 
+
31  template<typename T, qualifier Q, int E0, int E1, int E2, bool Aligned>
+
32  struct _swizzle_base1<3, T, Q, E0,E1,E2,-1, Aligned> : public _swizzle_base0<T, 3>
+
33  {
+
34  GLM_FUNC_QUALIFIER vec<3, T, Q> operator ()() const { return vec<3, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2)); }
+
35  };
+
36 
+
37  template<typename T, qualifier Q, int E0, int E1, int E2, int E3, bool Aligned>
+
38  struct _swizzle_base1<4, T, Q, E0,E1,E2,E3, Aligned> : public _swizzle_base0<T, 4>
+
39  {
+
40  GLM_FUNC_QUALIFIER vec<4, T, Q> operator ()() const { return vec<4, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2), this->elem(E3)); }
+
41  };
+
42 
+
43  // Internal class for implementing swizzle operators
+
44  /*
+
45  Template parameters:
+
46 
+
47  T = type of scalar values (e.g. float, double)
+
48  N = number of components in the vector (e.g. 3)
+
49  E0...3 = what index the n-th element of this swizzle refers to in the unswizzled vec
+
50 
+
51  DUPLICATE_ELEMENTS = 1 if there is a repeated element, 0 otherwise (used to specialize swizzles
+
52  containing duplicate elements so that they cannot be used as r-values).
+
53  */
+
54  template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3, int DUPLICATE_ELEMENTS>
+
55  struct _swizzle_base2 : public _swizzle_base1<N, T, Q, E0,E1,E2,E3, detail::is_aligned<Q>::value>
+
56  {
+
57  struct op_equal
+
58  {
+
59  GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e = t; }
+
60  };
+
61 
+
62  struct op_minus
+
63  {
+
64  GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e -= t; }
+
65  };
+
66 
+
67  struct op_plus
+
68  {
+
69  GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e += t; }
+
70  };
+
71 
+
72  struct op_mul
+
73  {
+
74  GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e *= t; }
+
75  };
+
76 
+
77  struct op_div
+
78  {
+
79  GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e /= t; }
+
80  };
+
81 
+
82  public:
+
83  GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const T& t)
+
84  {
+
85  for (int i = 0; i < N; ++i)
+
86  (*this)[i] = t;
+
87  return *this;
+
88  }
+
89 
+
90  GLM_FUNC_QUALIFIER _swizzle_base2& operator= (vec<N, T, Q> const& that)
+
91  {
+
92  _apply_op(that, op_equal());
+
93  return *this;
+
94  }
+
95 
+
96  GLM_FUNC_QUALIFIER void operator -= (vec<N, T, Q> const& that)
+
97  {
+
98  _apply_op(that, op_minus());
+
99  }
+
100 
+
101  GLM_FUNC_QUALIFIER void operator += (vec<N, T, Q> const& that)
+
102  {
+
103  _apply_op(that, op_plus());
+
104  }
+
105 
+
106  GLM_FUNC_QUALIFIER void operator *= (vec<N, T, Q> const& that)
+
107  {
+
108  _apply_op(that, op_mul());
+
109  }
+
110 
+
111  GLM_FUNC_QUALIFIER void operator /= (vec<N, T, Q> const& that)
+
112  {
+
113  _apply_op(that, op_div());
+
114  }
+
115 
+
116  GLM_FUNC_QUALIFIER T& operator[](size_t i)
+
117  {
+
118  const int offset_dst[4] = { E0, E1, E2, E3 };
+
119  return this->elem(offset_dst[i]);
+
120  }
+
121  GLM_FUNC_QUALIFIER T operator[](size_t i) const
+
122  {
+
123  const int offset_dst[4] = { E0, E1, E2, E3 };
+
124  return this->elem(offset_dst[i]);
+
125  }
+
126 
+
127  protected:
+
128  template<typename U>
+
129  GLM_FUNC_QUALIFIER void _apply_op(vec<N, T, Q> const& that, const U& op)
+
130  {
+
131  // Make a copy of the data in this == &that.
+
132  // The copier should optimize out the copy in cases where the function is
+
133  // properly inlined and the copy is not necessary.
+
134  T t[N];
+
135  for (int i = 0; i < N; ++i)
+
136  t[i] = that[i];
+
137  for (int i = 0; i < N; ++i)
+
138  op( (*this)[i], t[i] );
+
139  }
+
140  };
+
141 
+
142  // Specialization for swizzles containing duplicate elements. These cannot be modified.
+
143  template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3>
+
144  struct _swizzle_base2<N, T, Q, E0,E1,E2,E3, 1> : public _swizzle_base1<N, T, Q, E0,E1,E2,E3, detail::is_aligned<Q>::value>
+
145  {
+
146  struct Stub {};
+
147 
+
148  GLM_FUNC_QUALIFIER _swizzle_base2& operator= (Stub const&) { return *this; }
+
149 
+
150  GLM_FUNC_QUALIFIER T operator[] (size_t i) const
+
151  {
+
152  const int offset_dst[4] = { E0, E1, E2, E3 };
+
153  return this->elem(offset_dst[i]);
+
154  }
+
155  };
+
156 
+
157  template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3>
+
158  struct _swizzle : public _swizzle_base2<N, T, Q, E0, E1, E2, E3, (E0 == E1 || E0 == E2 || E0 == E3 || E1 == E2 || E1 == E3 || E2 == E3)>
+
159  {
+
160  typedef _swizzle_base2<N, T, Q, E0, E1, E2, E3, (E0 == E1 || E0 == E2 || E0 == E3 || E1 == E2 || E1 == E3 || E2 == E3)> base_type;
+
161 
+
162  using base_type::operator=;
+
163 
+
164  GLM_FUNC_QUALIFIER operator vec<N, T, Q> () const { return (*this)(); }
+
165  };
+
166 
+
167 //
+
168 // To prevent the C++ syntax from getting entirely overwhelming, define some alias macros
+
169 //
+
170 #define GLM_SWIZZLE_TEMPLATE1 template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3>
+
171 #define GLM_SWIZZLE_TEMPLATE2 template<int N, typename T, qualifier Q, int E0, int E1, int E2, int E3, int F0, int F1, int F2, int F3>
+
172 #define GLM_SWIZZLE_TYPE1 _swizzle<N, T, Q, E0, E1, E2, E3>
+
173 #define GLM_SWIZZLE_TYPE2 _swizzle<N, T, Q, F0, F1, F2, F3>
+
174 
+
175 //
+
176 // Wrapper for a binary operator (e.g. u.yy + v.zy)
+
177 //
+
178 #define GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \
+
179  GLM_SWIZZLE_TEMPLATE2 \
+
180  GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \
+
181  { \
+
182  return a() OPERAND b(); \
+
183  } \
+
184  GLM_SWIZZLE_TEMPLATE1 \
+
185  GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const vec<N, T, Q>& b) \
+
186  { \
+
187  return a() OPERAND b; \
+
188  } \
+
189  GLM_SWIZZLE_TEMPLATE1 \
+
190  GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const vec<N, T, Q>& a, const GLM_SWIZZLE_TYPE1& b) \
+
191  { \
+
192  return a OPERAND b(); \
+
193  }
+
194 
+
195 //
+
196 // Wrapper for a operand between a swizzle and a binary (e.g. 1.0f - u.xyz)
+
197 //
+
198 #define GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \
+
199  GLM_SWIZZLE_TEMPLATE1 \
+
200  GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const T& b) \
+
201  { \
+
202  return a() OPERAND b; \
+
203  } \
+
204  GLM_SWIZZLE_TEMPLATE1 \
+
205  GLM_FUNC_QUALIFIER vec<N, T, Q> operator OPERAND ( const T& a, const GLM_SWIZZLE_TYPE1& b) \
+
206  { \
+
207  return a OPERAND b(); \
+
208  }
+
209 
+
210 //
+
211 // Macro for wrapping a function taking one argument (e.g. abs())
+
212 //
+
213 #define GLM_SWIZZLE_FUNCTION_1_ARGS(RETURN_TYPE,FUNCTION) \
+
214  GLM_SWIZZLE_TEMPLATE1 \
+
215  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a) \
+
216  { \
+
217  return FUNCTION(a()); \
+
218  }
+
219 
+
220 //
+
221 // Macro for wrapping a function taking two vector arguments (e.g. dot()).
+
222 //
+
223 #define GLM_SWIZZLE_FUNCTION_2_ARGS(RETURN_TYPE,FUNCTION) \
+
224  GLM_SWIZZLE_TEMPLATE2 \
+
225  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \
+
226  { \
+
227  return FUNCTION(a(), b()); \
+
228  } \
+
229  GLM_SWIZZLE_TEMPLATE1 \
+
230  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b) \
+
231  { \
+
232  return FUNCTION(a(), b()); \
+
233  } \
+
234  GLM_SWIZZLE_TEMPLATE1 \
+
235  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename V& b) \
+
236  { \
+
237  return FUNCTION(a(), b); \
+
238  } \
+
239  GLM_SWIZZLE_TEMPLATE1 \
+
240  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const V& a, const GLM_SWIZZLE_TYPE1& b) \
+
241  { \
+
242  return FUNCTION(a, b()); \
+
243  }
+
244 
+
245 //
+
246 // Macro for wrapping a function take 2 vec arguments followed by a scalar (e.g. mix()).
+
247 //
+
248 #define GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(RETURN_TYPE,FUNCTION) \
+
249  GLM_SWIZZLE_TEMPLATE2 \
+
250  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b, const T& c) \
+
251  { \
+
252  return FUNCTION(a(), b(), c); \
+
253  } \
+
254  GLM_SWIZZLE_TEMPLATE1 \
+
255  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \
+
256  { \
+
257  return FUNCTION(a(), b(), c); \
+
258  } \
+
259  GLM_SWIZZLE_TEMPLATE1 \
+
260  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename S0::vec_type& b, const T& c)\
+
261  { \
+
262  return FUNCTION(a(), b, c); \
+
263  } \
+
264  GLM_SWIZZLE_TEMPLATE1 \
+
265  GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const typename V& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \
+
266  { \
+
267  return FUNCTION(a, b(), c); \
+
268  }
+
269 
+
270 }//namespace detail
+
271 }//namespace glm
+
272 
+
273 namespace glm
+
274 {
+
275  namespace detail
+
276  {
+
277  GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(-)
+
278  GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(*)
+
279  GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(+)
+
280  GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(-)
+
281  GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(*)
+
282  GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(/)
+
283  }
+
284 
+
285  //
+
286  // Swizzles are distinct types from the unswizzled type. The below macros will
+
287  // provide template specializations for the swizzle types for the given functions
+
288  // so that the compiler does not have any ambiguity to choosing how to handle
+
289  // the function.
+
290  //
+
291  // The alternative is to use the operator()() when calling the function in order
+
292  // to explicitly convert the swizzled type to the unswizzled type.
+
293  //
+
294 
+
295  //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, abs);
+
296  //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acos);
+
297  //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acosh);
+
298  //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, all);
+
299  //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, any);
+
300 
+
301  //GLM_SWIZZLE_FUNCTION_2_ARGS(value_type, dot);
+
302  //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, cross);
+
303  //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, step);
+
304  //GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(vec_type, mix);
+
305 }
+
306 
+
307 #define GLM_SWIZZLE2_2_MEMBERS(T, Q, E0,E1) \
+
308  struct { detail::_swizzle<2, T, Q, 0,0,-1,-2> E0 ## E0; }; \
+
309  struct { detail::_swizzle<2, T, Q, 0,1,-1,-2> E0 ## E1; }; \
+
310  struct { detail::_swizzle<2, T, Q, 1,0,-1,-2> E1 ## E0; }; \
+
311  struct { detail::_swizzle<2, T, Q, 1,1,-1,-2> E1 ## E1; };
+
312 
+
313 #define GLM_SWIZZLE2_3_MEMBERS(T, Q, E0,E1) \
+
314  struct { detail::_swizzle<3,T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \
+
315  struct { detail::_swizzle<3,T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \
+
316  struct { detail::_swizzle<3,T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \
+
317  struct { detail::_swizzle<3,T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \
+
318  struct { detail::_swizzle<3,T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \
+
319  struct { detail::_swizzle<3,T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \
+
320  struct { detail::_swizzle<3,T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \
+
321  struct { detail::_swizzle<3,T, Q, 1,1,1,-1> E1 ## E1 ## E1; };
+
322 
+
323 #define GLM_SWIZZLE2_4_MEMBERS(T, Q, E0,E1) \
+
324  struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \
+
325  struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \
+
326  struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \
+
327  struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \
+
328  struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \
+
329  struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \
+
330  struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \
+
331  struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \
+
332  struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \
+
333  struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \
+
334  struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \
+
335  struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \
+
336  struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \
+
337  struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \
+
338  struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \
+
339  struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; };
+
340 
+
341 #define GLM_SWIZZLE3_2_MEMBERS(T, Q, E0,E1,E2) \
+
342  struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \
+
343  struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \
+
344  struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \
+
345  struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \
+
346  struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \
+
347  struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \
+
348  struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \
+
349  struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \
+
350  struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; };
+
351 
+
352 #define GLM_SWIZZLE3_3_MEMBERS(T, Q ,E0,E1,E2) \
+
353  struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \
+
354  struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \
+
355  struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \
+
356  struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \
+
357  struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \
+
358  struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \
+
359  struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \
+
360  struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \
+
361  struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \
+
362  struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \
+
363  struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \
+
364  struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \
+
365  struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \
+
366  struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \
+
367  struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \
+
368  struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \
+
369  struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \
+
370  struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \
+
371  struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \
+
372  struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \
+
373  struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \
+
374  struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \
+
375  struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \
+
376  struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \
+
377  struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \
+
378  struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \
+
379  struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; };
+
380 
+
381 #define GLM_SWIZZLE3_4_MEMBERS(T, Q, E0,E1,E2) \
+
382  struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \
+
383  struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \
+
384  struct { detail::_swizzle<4,T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \
+
385  struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \
+
386  struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \
+
387  struct { detail::_swizzle<4,T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \
+
388  struct { detail::_swizzle<4,T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \
+
389  struct { detail::_swizzle<4,T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \
+
390  struct { detail::_swizzle<4,T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \
+
391  struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \
+
392  struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \
+
393  struct { detail::_swizzle<4,T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \
+
394  struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \
+
395  struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \
+
396  struct { detail::_swizzle<4,T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \
+
397  struct { detail::_swizzle<4,T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \
+
398  struct { detail::_swizzle<4,T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \
+
399  struct { detail::_swizzle<4,T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \
+
400  struct { detail::_swizzle<4,T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \
+
401  struct { detail::_swizzle<4,T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \
+
402  struct { detail::_swizzle<4,T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \
+
403  struct { detail::_swizzle<4,T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \
+
404  struct { detail::_swizzle<4,T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \
+
405  struct { detail::_swizzle<4,T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \
+
406  struct { detail::_swizzle<4,T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \
+
407  struct { detail::_swizzle<4,T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \
+
408  struct { detail::_swizzle<4,T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \
+
409  struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \
+
410  struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \
+
411  struct { detail::_swizzle<4,T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \
+
412  struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \
+
413  struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \
+
414  struct { detail::_swizzle<4,T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \
+
415  struct { detail::_swizzle<4,T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \
+
416  struct { detail::_swizzle<4,T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \
+
417  struct { detail::_swizzle<4,T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \
+
418  struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \
+
419  struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \
+
420  struct { detail::_swizzle<4,T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \
+
421  struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \
+
422  struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \
+
423  struct { detail::_swizzle<4,T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \
+
424  struct { detail::_swizzle<4,T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \
+
425  struct { detail::_swizzle<4,T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \
+
426  struct { detail::_swizzle<4,T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \
+
427  struct { detail::_swizzle<4,T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \
+
428  struct { detail::_swizzle<4,T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \
+
429  struct { detail::_swizzle<4,T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \
+
430  struct { detail::_swizzle<4,T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \
+
431  struct { detail::_swizzle<4,T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \
+
432  struct { detail::_swizzle<4,T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \
+
433  struct { detail::_swizzle<4,T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \
+
434  struct { detail::_swizzle<4,T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \
+
435  struct { detail::_swizzle<4,T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \
+
436  struct { detail::_swizzle<4,T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \
+
437  struct { detail::_swizzle<4,T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \
+
438  struct { detail::_swizzle<4,T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \
+
439  struct { detail::_swizzle<4,T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \
+
440  struct { detail::_swizzle<4,T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \
+
441  struct { detail::_swizzle<4,T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \
+
442  struct { detail::_swizzle<4,T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \
+
443  struct { detail::_swizzle<4,T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \
+
444  struct { detail::_swizzle<4,T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \
+
445  struct { detail::_swizzle<4,T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \
+
446  struct { detail::_swizzle<4,T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \
+
447  struct { detail::_swizzle<4,T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \
+
448  struct { detail::_swizzle<4,T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \
+
449  struct { detail::_swizzle<4,T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \
+
450  struct { detail::_swizzle<4,T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \
+
451  struct { detail::_swizzle<4,T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \
+
452  struct { detail::_swizzle<4,T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \
+
453  struct { detail::_swizzle<4,T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \
+
454  struct { detail::_swizzle<4,T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \
+
455  struct { detail::_swizzle<4,T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \
+
456  struct { detail::_swizzle<4,T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \
+
457  struct { detail::_swizzle<4,T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \
+
458  struct { detail::_swizzle<4,T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \
+
459  struct { detail::_swizzle<4,T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \
+
460  struct { detail::_swizzle<4,T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \
+
461  struct { detail::_swizzle<4,T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \
+
462  struct { detail::_swizzle<4,T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; };
+
463 
+
464 #define GLM_SWIZZLE4_2_MEMBERS(T, Q, E0,E1,E2,E3) \
+
465  struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \
+
466  struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \
+
467  struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \
+
468  struct { detail::_swizzle<2,T, Q, 0,3,-1,-2> E0 ## E3; }; \
+
469  struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \
+
470  struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \
+
471  struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \
+
472  struct { detail::_swizzle<2,T, Q, 1,3,-1,-2> E1 ## E3; }; \
+
473  struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \
+
474  struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \
+
475  struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; \
+
476  struct { detail::_swizzle<2,T, Q, 2,3,-1,-2> E2 ## E3; }; \
+
477  struct { detail::_swizzle<2,T, Q, 3,0,-1,-2> E3 ## E0; }; \
+
478  struct { detail::_swizzle<2,T, Q, 3,1,-1,-2> E3 ## E1; }; \
+
479  struct { detail::_swizzle<2,T, Q, 3,2,-1,-2> E3 ## E2; }; \
+
480  struct { detail::_swizzle<2,T, Q, 3,3,-1,-2> E3 ## E3; };
+
481 
+
482 #define GLM_SWIZZLE4_3_MEMBERS(T, Q, E0,E1,E2,E3) \
+
483  struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \
+
484  struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \
+
485  struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \
+
486  struct { detail::_swizzle<3, T, Q, 0,0,3,-1> E0 ## E0 ## E3; }; \
+
487  struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \
+
488  struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \
+
489  struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \
+
490  struct { detail::_swizzle<3, T, Q, 0,1,3,-1> E0 ## E1 ## E3; }; \
+
491  struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \
+
492  struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \
+
493  struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \
+
494  struct { detail::_swizzle<3, T, Q, 0,2,3,-1> E0 ## E2 ## E3; }; \
+
495  struct { detail::_swizzle<3, T, Q, 0,3,0,-1> E0 ## E3 ## E0; }; \
+
496  struct { detail::_swizzle<3, T, Q, 0,3,1,-1> E0 ## E3 ## E1; }; \
+
497  struct { detail::_swizzle<3, T, Q, 0,3,2,-1> E0 ## E3 ## E2; }; \
+
498  struct { detail::_swizzle<3, T, Q, 0,3,3,-1> E0 ## E3 ## E3; }; \
+
499  struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \
+
500  struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \
+
501  struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \
+
502  struct { detail::_swizzle<3, T, Q, 1,0,3,-1> E1 ## E0 ## E3; }; \
+
503  struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \
+
504  struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \
+
505  struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \
+
506  struct { detail::_swizzle<3, T, Q, 1,1,3,-1> E1 ## E1 ## E3; }; \
+
507  struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \
+
508  struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \
+
509  struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \
+
510  struct { detail::_swizzle<3, T, Q, 1,2,3,-1> E1 ## E2 ## E3; }; \
+
511  struct { detail::_swizzle<3, T, Q, 1,3,0,-1> E1 ## E3 ## E0; }; \
+
512  struct { detail::_swizzle<3, T, Q, 1,3,1,-1> E1 ## E3 ## E1; }; \
+
513  struct { detail::_swizzle<3, T, Q, 1,3,2,-1> E1 ## E3 ## E2; }; \
+
514  struct { detail::_swizzle<3, T, Q, 1,3,3,-1> E1 ## E3 ## E3; }; \
+
515  struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \
+
516  struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \
+
517  struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \
+
518  struct { detail::_swizzle<3, T, Q, 2,0,3,-1> E2 ## E0 ## E3; }; \
+
519  struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \
+
520  struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \
+
521  struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \
+
522  struct { detail::_swizzle<3, T, Q, 2,1,3,-1> E2 ## E1 ## E3; }; \
+
523  struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \
+
524  struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \
+
525  struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; \
+
526  struct { detail::_swizzle<3, T, Q, 2,2,3,-1> E2 ## E2 ## E3; }; \
+
527  struct { detail::_swizzle<3, T, Q, 2,3,0,-1> E2 ## E3 ## E0; }; \
+
528  struct { detail::_swizzle<3, T, Q, 2,3,1,-1> E2 ## E3 ## E1; }; \
+
529  struct { detail::_swizzle<3, T, Q, 2,3,2,-1> E2 ## E3 ## E2; }; \
+
530  struct { detail::_swizzle<3, T, Q, 2,3,3,-1> E2 ## E3 ## E3; }; \
+
531  struct { detail::_swizzle<3, T, Q, 3,0,0,-1> E3 ## E0 ## E0; }; \
+
532  struct { detail::_swizzle<3, T, Q, 3,0,1,-1> E3 ## E0 ## E1; }; \
+
533  struct { detail::_swizzle<3, T, Q, 3,0,2,-1> E3 ## E0 ## E2; }; \
+
534  struct { detail::_swizzle<3, T, Q, 3,0,3,-1> E3 ## E0 ## E3; }; \
+
535  struct { detail::_swizzle<3, T, Q, 3,1,0,-1> E3 ## E1 ## E0; }; \
+
536  struct { detail::_swizzle<3, T, Q, 3,1,1,-1> E3 ## E1 ## E1; }; \
+
537  struct { detail::_swizzle<3, T, Q, 3,1,2,-1> E3 ## E1 ## E2; }; \
+
538  struct { detail::_swizzle<3, T, Q, 3,1,3,-1> E3 ## E1 ## E3; }; \
+
539  struct { detail::_swizzle<3, T, Q, 3,2,0,-1> E3 ## E2 ## E0; }; \
+
540  struct { detail::_swizzle<3, T, Q, 3,2,1,-1> E3 ## E2 ## E1; }; \
+
541  struct { detail::_swizzle<3, T, Q, 3,2,2,-1> E3 ## E2 ## E2; }; \
+
542  struct { detail::_swizzle<3, T, Q, 3,2,3,-1> E3 ## E2 ## E3; }; \
+
543  struct { detail::_swizzle<3, T, Q, 3,3,0,-1> E3 ## E3 ## E0; }; \
+
544  struct { detail::_swizzle<3, T, Q, 3,3,1,-1> E3 ## E3 ## E1; }; \
+
545  struct { detail::_swizzle<3, T, Q, 3,3,2,-1> E3 ## E3 ## E2; }; \
+
546  struct { detail::_swizzle<3, T, Q, 3,3,3,-1> E3 ## E3 ## E3; };
+
547 
+
548 #define GLM_SWIZZLE4_4_MEMBERS(T, Q, E0,E1,E2,E3) \
+
549  struct { detail::_swizzle<4, T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \
+
550  struct { detail::_swizzle<4, T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \
+
551  struct { detail::_swizzle<4, T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \
+
552  struct { detail::_swizzle<4, T, Q, 0,0,0,3> E0 ## E0 ## E0 ## E3; }; \
+
553  struct { detail::_swizzle<4, T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \
+
554  struct { detail::_swizzle<4, T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \
+
555  struct { detail::_swizzle<4, T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \
+
556  struct { detail::_swizzle<4, T, Q, 0,0,1,3> E0 ## E0 ## E1 ## E3; }; \
+
557  struct { detail::_swizzle<4, T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \
+
558  struct { detail::_swizzle<4, T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \
+
559  struct { detail::_swizzle<4, T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \
+
560  struct { detail::_swizzle<4, T, Q, 0,0,2,3> E0 ## E0 ## E2 ## E3; }; \
+
561  struct { detail::_swizzle<4, T, Q, 0,0,3,0> E0 ## E0 ## E3 ## E0; }; \
+
562  struct { detail::_swizzle<4, T, Q, 0,0,3,1> E0 ## E0 ## E3 ## E1; }; \
+
563  struct { detail::_swizzle<4, T, Q, 0,0,3,2> E0 ## E0 ## E3 ## E2; }; \
+
564  struct { detail::_swizzle<4, T, Q, 0,0,3,3> E0 ## E0 ## E3 ## E3; }; \
+
565  struct { detail::_swizzle<4, T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \
+
566  struct { detail::_swizzle<4, T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \
+
567  struct { detail::_swizzle<4, T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \
+
568  struct { detail::_swizzle<4, T, Q, 0,1,0,3> E0 ## E1 ## E0 ## E3; }; \
+
569  struct { detail::_swizzle<4, T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \
+
570  struct { detail::_swizzle<4, T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \
+
571  struct { detail::_swizzle<4, T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \
+
572  struct { detail::_swizzle<4, T, Q, 0,1,1,3> E0 ## E1 ## E1 ## E3; }; \
+
573  struct { detail::_swizzle<4, T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \
+
574  struct { detail::_swizzle<4, T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \
+
575  struct { detail::_swizzle<4, T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \
+
576  struct { detail::_swizzle<4, T, Q, 0,1,2,3> E0 ## E1 ## E2 ## E3; }; \
+
577  struct { detail::_swizzle<4, T, Q, 0,1,3,0> E0 ## E1 ## E3 ## E0; }; \
+
578  struct { detail::_swizzle<4, T, Q, 0,1,3,1> E0 ## E1 ## E3 ## E1; }; \
+
579  struct { detail::_swizzle<4, T, Q, 0,1,3,2> E0 ## E1 ## E3 ## E2; }; \
+
580  struct { detail::_swizzle<4, T, Q, 0,1,3,3> E0 ## E1 ## E3 ## E3; }; \
+
581  struct { detail::_swizzle<4, T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \
+
582  struct { detail::_swizzle<4, T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \
+
583  struct { detail::_swizzle<4, T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \
+
584  struct { detail::_swizzle<4, T, Q, 0,2,0,3> E0 ## E2 ## E0 ## E3; }; \
+
585  struct { detail::_swizzle<4, T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \
+
586  struct { detail::_swizzle<4, T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \
+
587  struct { detail::_swizzle<4, T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \
+
588  struct { detail::_swizzle<4, T, Q, 0,2,1,3> E0 ## E2 ## E1 ## E3; }; \
+
589  struct { detail::_swizzle<4, T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \
+
590  struct { detail::_swizzle<4, T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \
+
591  struct { detail::_swizzle<4, T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \
+
592  struct { detail::_swizzle<4, T, Q, 0,2,2,3> E0 ## E2 ## E2 ## E3; }; \
+
593  struct { detail::_swizzle<4, T, Q, 0,2,3,0> E0 ## E2 ## E3 ## E0; }; \
+
594  struct { detail::_swizzle<4, T, Q, 0,2,3,1> E0 ## E2 ## E3 ## E1; }; \
+
595  struct { detail::_swizzle<4, T, Q, 0,2,3,2> E0 ## E2 ## E3 ## E2; }; \
+
596  struct { detail::_swizzle<4, T, Q, 0,2,3,3> E0 ## E2 ## E3 ## E3; }; \
+
597  struct { detail::_swizzle<4, T, Q, 0,3,0,0> E0 ## E3 ## E0 ## E0; }; \
+
598  struct { detail::_swizzle<4, T, Q, 0,3,0,1> E0 ## E3 ## E0 ## E1; }; \
+
599  struct { detail::_swizzle<4, T, Q, 0,3,0,2> E0 ## E3 ## E0 ## E2; }; \
+
600  struct { detail::_swizzle<4, T, Q, 0,3,0,3> E0 ## E3 ## E0 ## E3; }; \
+
601  struct { detail::_swizzle<4, T, Q, 0,3,1,0> E0 ## E3 ## E1 ## E0; }; \
+
602  struct { detail::_swizzle<4, T, Q, 0,3,1,1> E0 ## E3 ## E1 ## E1; }; \
+
603  struct { detail::_swizzle<4, T, Q, 0,3,1,2> E0 ## E3 ## E1 ## E2; }; \
+
604  struct { detail::_swizzle<4, T, Q, 0,3,1,3> E0 ## E3 ## E1 ## E3; }; \
+
605  struct { detail::_swizzle<4, T, Q, 0,3,2,0> E0 ## E3 ## E2 ## E0; }; \
+
606  struct { detail::_swizzle<4, T, Q, 0,3,2,1> E0 ## E3 ## E2 ## E1; }; \
+
607  struct { detail::_swizzle<4, T, Q, 0,3,2,2> E0 ## E3 ## E2 ## E2; }; \
+
608  struct { detail::_swizzle<4, T, Q, 0,3,2,3> E0 ## E3 ## E2 ## E3; }; \
+
609  struct { detail::_swizzle<4, T, Q, 0,3,3,0> E0 ## E3 ## E3 ## E0; }; \
+
610  struct { detail::_swizzle<4, T, Q, 0,3,3,1> E0 ## E3 ## E3 ## E1; }; \
+
611  struct { detail::_swizzle<4, T, Q, 0,3,3,2> E0 ## E3 ## E3 ## E2; }; \
+
612  struct { detail::_swizzle<4, T, Q, 0,3,3,3> E0 ## E3 ## E3 ## E3; }; \
+
613  struct { detail::_swizzle<4, T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \
+
614  struct { detail::_swizzle<4, T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \
+
615  struct { detail::_swizzle<4, T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \
+
616  struct { detail::_swizzle<4, T, Q, 1,0,0,3> E1 ## E0 ## E0 ## E3; }; \
+
617  struct { detail::_swizzle<4, T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \
+
618  struct { detail::_swizzle<4, T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \
+
619  struct { detail::_swizzle<4, T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \
+
620  struct { detail::_swizzle<4, T, Q, 1,0,1,3> E1 ## E0 ## E1 ## E3; }; \
+
621  struct { detail::_swizzle<4, T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \
+
622  struct { detail::_swizzle<4, T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \
+
623  struct { detail::_swizzle<4, T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \
+
624  struct { detail::_swizzle<4, T, Q, 1,0,2,3> E1 ## E0 ## E2 ## E3; }; \
+
625  struct { detail::_swizzle<4, T, Q, 1,0,3,0> E1 ## E0 ## E3 ## E0; }; \
+
626  struct { detail::_swizzle<4, T, Q, 1,0,3,1> E1 ## E0 ## E3 ## E1; }; \
+
627  struct { detail::_swizzle<4, T, Q, 1,0,3,2> E1 ## E0 ## E3 ## E2; }; \
+
628  struct { detail::_swizzle<4, T, Q, 1,0,3,3> E1 ## E0 ## E3 ## E3; }; \
+
629  struct { detail::_swizzle<4, T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \
+
630  struct { detail::_swizzle<4, T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \
+
631  struct { detail::_swizzle<4, T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \
+
632  struct { detail::_swizzle<4, T, Q, 1,1,0,3> E1 ## E1 ## E0 ## E3; }; \
+
633  struct { detail::_swizzle<4, T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \
+
634  struct { detail::_swizzle<4, T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \
+
635  struct { detail::_swizzle<4, T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \
+
636  struct { detail::_swizzle<4, T, Q, 1,1,1,3> E1 ## E1 ## E1 ## E3; }; \
+
637  struct { detail::_swizzle<4, T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \
+
638  struct { detail::_swizzle<4, T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \
+
639  struct { detail::_swizzle<4, T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \
+
640  struct { detail::_swizzle<4, T, Q, 1,1,2,3> E1 ## E1 ## E2 ## E3; }; \
+
641  struct { detail::_swizzle<4, T, Q, 1,1,3,0> E1 ## E1 ## E3 ## E0; }; \
+
642  struct { detail::_swizzle<4, T, Q, 1,1,3,1> E1 ## E1 ## E3 ## E1; }; \
+
643  struct { detail::_swizzle<4, T, Q, 1,1,3,2> E1 ## E1 ## E3 ## E2; }; \
+
644  struct { detail::_swizzle<4, T, Q, 1,1,3,3> E1 ## E1 ## E3 ## E3; }; \
+
645  struct { detail::_swizzle<4, T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \
+
646  struct { detail::_swizzle<4, T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \
+
647  struct { detail::_swizzle<4, T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \
+
648  struct { detail::_swizzle<4, T, Q, 1,2,0,3> E1 ## E2 ## E0 ## E3; }; \
+
649  struct { detail::_swizzle<4, T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \
+
650  struct { detail::_swizzle<4, T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \
+
651  struct { detail::_swizzle<4, T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \
+
652  struct { detail::_swizzle<4, T, Q, 1,2,1,3> E1 ## E2 ## E1 ## E3; }; \
+
653  struct { detail::_swizzle<4, T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \
+
654  struct { detail::_swizzle<4, T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \
+
655  struct { detail::_swizzle<4, T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \
+
656  struct { detail::_swizzle<4, T, Q, 1,2,2,3> E1 ## E2 ## E2 ## E3; }; \
+
657  struct { detail::_swizzle<4, T, Q, 1,2,3,0> E1 ## E2 ## E3 ## E0; }; \
+
658  struct { detail::_swizzle<4, T, Q, 1,2,3,1> E1 ## E2 ## E3 ## E1; }; \
+
659  struct { detail::_swizzle<4, T, Q, 1,2,3,2> E1 ## E2 ## E3 ## E2; }; \
+
660  struct { detail::_swizzle<4, T, Q, 1,2,3,3> E1 ## E2 ## E3 ## E3; }; \
+
661  struct { detail::_swizzle<4, T, Q, 1,3,0,0> E1 ## E3 ## E0 ## E0; }; \
+
662  struct { detail::_swizzle<4, T, Q, 1,3,0,1> E1 ## E3 ## E0 ## E1; }; \
+
663  struct { detail::_swizzle<4, T, Q, 1,3,0,2> E1 ## E3 ## E0 ## E2; }; \
+
664  struct { detail::_swizzle<4, T, Q, 1,3,0,3> E1 ## E3 ## E0 ## E3; }; \
+
665  struct { detail::_swizzle<4, T, Q, 1,3,1,0> E1 ## E3 ## E1 ## E0; }; \
+
666  struct { detail::_swizzle<4, T, Q, 1,3,1,1> E1 ## E3 ## E1 ## E1; }; \
+
667  struct { detail::_swizzle<4, T, Q, 1,3,1,2> E1 ## E3 ## E1 ## E2; }; \
+
668  struct { detail::_swizzle<4, T, Q, 1,3,1,3> E1 ## E3 ## E1 ## E3; }; \
+
669  struct { detail::_swizzle<4, T, Q, 1,3,2,0> E1 ## E3 ## E2 ## E0; }; \
+
670  struct { detail::_swizzle<4, T, Q, 1,3,2,1> E1 ## E3 ## E2 ## E1; }; \
+
671  struct { detail::_swizzle<4, T, Q, 1,3,2,2> E1 ## E3 ## E2 ## E2; }; \
+
672  struct { detail::_swizzle<4, T, Q, 1,3,2,3> E1 ## E3 ## E2 ## E3; }; \
+
673  struct { detail::_swizzle<4, T, Q, 1,3,3,0> E1 ## E3 ## E3 ## E0; }; \
+
674  struct { detail::_swizzle<4, T, Q, 1,3,3,1> E1 ## E3 ## E3 ## E1; }; \
+
675  struct { detail::_swizzle<4, T, Q, 1,3,3,2> E1 ## E3 ## E3 ## E2; }; \
+
676  struct { detail::_swizzle<4, T, Q, 1,3,3,3> E1 ## E3 ## E3 ## E3; }; \
+
677  struct { detail::_swizzle<4, T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \
+
678  struct { detail::_swizzle<4, T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \
+
679  struct { detail::_swizzle<4, T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \
+
680  struct { detail::_swizzle<4, T, Q, 2,0,0,3> E2 ## E0 ## E0 ## E3; }; \
+
681  struct { detail::_swizzle<4, T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \
+
682  struct { detail::_swizzle<4, T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \
+
683  struct { detail::_swizzle<4, T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \
+
684  struct { detail::_swizzle<4, T, Q, 2,0,1,3> E2 ## E0 ## E1 ## E3; }; \
+
685  struct { detail::_swizzle<4, T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \
+
686  struct { detail::_swizzle<4, T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \
+
687  struct { detail::_swizzle<4, T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \
+
688  struct { detail::_swizzle<4, T, Q, 2,0,2,3> E2 ## E0 ## E2 ## E3; }; \
+
689  struct { detail::_swizzle<4, T, Q, 2,0,3,0> E2 ## E0 ## E3 ## E0; }; \
+
690  struct { detail::_swizzle<4, T, Q, 2,0,3,1> E2 ## E0 ## E3 ## E1; }; \
+
691  struct { detail::_swizzle<4, T, Q, 2,0,3,2> E2 ## E0 ## E3 ## E2; }; \
+
692  struct { detail::_swizzle<4, T, Q, 2,0,3,3> E2 ## E0 ## E3 ## E3; }; \
+
693  struct { detail::_swizzle<4, T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \
+
694  struct { detail::_swizzle<4, T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \
+
695  struct { detail::_swizzle<4, T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \
+
696  struct { detail::_swizzle<4, T, Q, 2,1,0,3> E2 ## E1 ## E0 ## E3; }; \
+
697  struct { detail::_swizzle<4, T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \
+
698  struct { detail::_swizzle<4, T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \
+
699  struct { detail::_swizzle<4, T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \
+
700  struct { detail::_swizzle<4, T, Q, 2,1,1,3> E2 ## E1 ## E1 ## E3; }; \
+
701  struct { detail::_swizzle<4, T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \
+
702  struct { detail::_swizzle<4, T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \
+
703  struct { detail::_swizzle<4, T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \
+
704  struct { detail::_swizzle<4, T, Q, 2,1,2,3> E2 ## E1 ## E2 ## E3; }; \
+
705  struct { detail::_swizzle<4, T, Q, 2,1,3,0> E2 ## E1 ## E3 ## E0; }; \
+
706  struct { detail::_swizzle<4, T, Q, 2,1,3,1> E2 ## E1 ## E3 ## E1; }; \
+
707  struct { detail::_swizzle<4, T, Q, 2,1,3,2> E2 ## E1 ## E3 ## E2; }; \
+
708  struct { detail::_swizzle<4, T, Q, 2,1,3,3> E2 ## E1 ## E3 ## E3; }; \
+
709  struct { detail::_swizzle<4, T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \
+
710  struct { detail::_swizzle<4, T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \
+
711  struct { detail::_swizzle<4, T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \
+
712  struct { detail::_swizzle<4, T, Q, 2,2,0,3> E2 ## E2 ## E0 ## E3; }; \
+
713  struct { detail::_swizzle<4, T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \
+
714  struct { detail::_swizzle<4, T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \
+
715  struct { detail::_swizzle<4, T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \
+
716  struct { detail::_swizzle<4, T, Q, 2,2,1,3> E2 ## E2 ## E1 ## E3; }; \
+
717  struct { detail::_swizzle<4, T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \
+
718  struct { detail::_swizzle<4, T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \
+
719  struct { detail::_swizzle<4, T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; \
+
720  struct { detail::_swizzle<4, T, Q, 2,2,2,3> E2 ## E2 ## E2 ## E3; }; \
+
721  struct { detail::_swizzle<4, T, Q, 2,2,3,0> E2 ## E2 ## E3 ## E0; }; \
+
722  struct { detail::_swizzle<4, T, Q, 2,2,3,1> E2 ## E2 ## E3 ## E1; }; \
+
723  struct { detail::_swizzle<4, T, Q, 2,2,3,2> E2 ## E2 ## E3 ## E2; }; \
+
724  struct { detail::_swizzle<4, T, Q, 2,2,3,3> E2 ## E2 ## E3 ## E3; }; \
+
725  struct { detail::_swizzle<4, T, Q, 2,3,0,0> E2 ## E3 ## E0 ## E0; }; \
+
726  struct { detail::_swizzle<4, T, Q, 2,3,0,1> E2 ## E3 ## E0 ## E1; }; \
+
727  struct { detail::_swizzle<4, T, Q, 2,3,0,2> E2 ## E3 ## E0 ## E2; }; \
+
728  struct { detail::_swizzle<4, T, Q, 2,3,0,3> E2 ## E3 ## E0 ## E3; }; \
+
729  struct { detail::_swizzle<4, T, Q, 2,3,1,0> E2 ## E3 ## E1 ## E0; }; \
+
730  struct { detail::_swizzle<4, T, Q, 2,3,1,1> E2 ## E3 ## E1 ## E1; }; \
+
731  struct { detail::_swizzle<4, T, Q, 2,3,1,2> E2 ## E3 ## E1 ## E2; }; \
+
732  struct { detail::_swizzle<4, T, Q, 2,3,1,3> E2 ## E3 ## E1 ## E3; }; \
+
733  struct { detail::_swizzle<4, T, Q, 2,3,2,0> E2 ## E3 ## E2 ## E0; }; \
+
734  struct { detail::_swizzle<4, T, Q, 2,3,2,1> E2 ## E3 ## E2 ## E1; }; \
+
735  struct { detail::_swizzle<4, T, Q, 2,3,2,2> E2 ## E3 ## E2 ## E2; }; \
+
736  struct { detail::_swizzle<4, T, Q, 2,3,2,3> E2 ## E3 ## E2 ## E3; }; \
+
737  struct { detail::_swizzle<4, T, Q, 2,3,3,0> E2 ## E3 ## E3 ## E0; }; \
+
738  struct { detail::_swizzle<4, T, Q, 2,3,3,1> E2 ## E3 ## E3 ## E1; }; \
+
739  struct { detail::_swizzle<4, T, Q, 2,3,3,2> E2 ## E3 ## E3 ## E2; }; \
+
740  struct { detail::_swizzle<4, T, Q, 2,3,3,3> E2 ## E3 ## E3 ## E3; }; \
+
741  struct { detail::_swizzle<4, T, Q, 3,0,0,0> E3 ## E0 ## E0 ## E0; }; \
+
742  struct { detail::_swizzle<4, T, Q, 3,0,0,1> E3 ## E0 ## E0 ## E1; }; \
+
743  struct { detail::_swizzle<4, T, Q, 3,0,0,2> E3 ## E0 ## E0 ## E2; }; \
+
744  struct { detail::_swizzle<4, T, Q, 3,0,0,3> E3 ## E0 ## E0 ## E3; }; \
+
745  struct { detail::_swizzle<4, T, Q, 3,0,1,0> E3 ## E0 ## E1 ## E0; }; \
+
746  struct { detail::_swizzle<4, T, Q, 3,0,1,1> E3 ## E0 ## E1 ## E1; }; \
+
747  struct { detail::_swizzle<4, T, Q, 3,0,1,2> E3 ## E0 ## E1 ## E2; }; \
+
748  struct { detail::_swizzle<4, T, Q, 3,0,1,3> E3 ## E0 ## E1 ## E3; }; \
+
749  struct { detail::_swizzle<4, T, Q, 3,0,2,0> E3 ## E0 ## E2 ## E0; }; \
+
750  struct { detail::_swizzle<4, T, Q, 3,0,2,1> E3 ## E0 ## E2 ## E1; }; \
+
751  struct { detail::_swizzle<4, T, Q, 3,0,2,2> E3 ## E0 ## E2 ## E2; }; \
+
752  struct { detail::_swizzle<4, T, Q, 3,0,2,3> E3 ## E0 ## E2 ## E3; }; \
+
753  struct { detail::_swizzle<4, T, Q, 3,0,3,0> E3 ## E0 ## E3 ## E0; }; \
+
754  struct { detail::_swizzle<4, T, Q, 3,0,3,1> E3 ## E0 ## E3 ## E1; }; \
+
755  struct { detail::_swizzle<4, T, Q, 3,0,3,2> E3 ## E0 ## E3 ## E2; }; \
+
756  struct { detail::_swizzle<4, T, Q, 3,0,3,3> E3 ## E0 ## E3 ## E3; }; \
+
757  struct { detail::_swizzle<4, T, Q, 3,1,0,0> E3 ## E1 ## E0 ## E0; }; \
+
758  struct { detail::_swizzle<4, T, Q, 3,1,0,1> E3 ## E1 ## E0 ## E1; }; \
+
759  struct { detail::_swizzle<4, T, Q, 3,1,0,2> E3 ## E1 ## E0 ## E2; }; \
+
760  struct { detail::_swizzle<4, T, Q, 3,1,0,3> E3 ## E1 ## E0 ## E3; }; \
+
761  struct { detail::_swizzle<4, T, Q, 3,1,1,0> E3 ## E1 ## E1 ## E0; }; \
+
762  struct { detail::_swizzle<4, T, Q, 3,1,1,1> E3 ## E1 ## E1 ## E1; }; \
+
763  struct { detail::_swizzle<4, T, Q, 3,1,1,2> E3 ## E1 ## E1 ## E2; }; \
+
764  struct { detail::_swizzle<4, T, Q, 3,1,1,3> E3 ## E1 ## E1 ## E3; }; \
+
765  struct { detail::_swizzle<4, T, Q, 3,1,2,0> E3 ## E1 ## E2 ## E0; }; \
+
766  struct { detail::_swizzle<4, T, Q, 3,1,2,1> E3 ## E1 ## E2 ## E1; }; \
+
767  struct { detail::_swizzle<4, T, Q, 3,1,2,2> E3 ## E1 ## E2 ## E2; }; \
+
768  struct { detail::_swizzle<4, T, Q, 3,1,2,3> E3 ## E1 ## E2 ## E3; }; \
+
769  struct { detail::_swizzle<4, T, Q, 3,1,3,0> E3 ## E1 ## E3 ## E0; }; \
+
770  struct { detail::_swizzle<4, T, Q, 3,1,3,1> E3 ## E1 ## E3 ## E1; }; \
+
771  struct { detail::_swizzle<4, T, Q, 3,1,3,2> E3 ## E1 ## E3 ## E2; }; \
+
772  struct { detail::_swizzle<4, T, Q, 3,1,3,3> E3 ## E1 ## E3 ## E3; }; \
+
773  struct { detail::_swizzle<4, T, Q, 3,2,0,0> E3 ## E2 ## E0 ## E0; }; \
+
774  struct { detail::_swizzle<4, T, Q, 3,2,0,1> E3 ## E2 ## E0 ## E1; }; \
+
775  struct { detail::_swizzle<4, T, Q, 3,2,0,2> E3 ## E2 ## E0 ## E2; }; \
+
776  struct { detail::_swizzle<4, T, Q, 3,2,0,3> E3 ## E2 ## E0 ## E3; }; \
+
777  struct { detail::_swizzle<4, T, Q, 3,2,1,0> E3 ## E2 ## E1 ## E0; }; \
+
778  struct { detail::_swizzle<4, T, Q, 3,2,1,1> E3 ## E2 ## E1 ## E1; }; \
+
779  struct { detail::_swizzle<4, T, Q, 3,2,1,2> E3 ## E2 ## E1 ## E2; }; \
+
780  struct { detail::_swizzle<4, T, Q, 3,2,1,3> E3 ## E2 ## E1 ## E3; }; \
+
781  struct { detail::_swizzle<4, T, Q, 3,2,2,0> E3 ## E2 ## E2 ## E0; }; \
+
782  struct { detail::_swizzle<4, T, Q, 3,2,2,1> E3 ## E2 ## E2 ## E1; }; \
+
783  struct { detail::_swizzle<4, T, Q, 3,2,2,2> E3 ## E2 ## E2 ## E2; }; \
+
784  struct { detail::_swizzle<4, T, Q, 3,2,2,3> E3 ## E2 ## E2 ## E3; }; \
+
785  struct { detail::_swizzle<4, T, Q, 3,2,3,0> E3 ## E2 ## E3 ## E0; }; \
+
786  struct { detail::_swizzle<4, T, Q, 3,2,3,1> E3 ## E2 ## E3 ## E1; }; \
+
787  struct { detail::_swizzle<4, T, Q, 3,2,3,2> E3 ## E2 ## E3 ## E2; }; \
+
788  struct { detail::_swizzle<4, T, Q, 3,2,3,3> E3 ## E2 ## E3 ## E3; }; \
+
789  struct { detail::_swizzle<4, T, Q, 3,3,0,0> E3 ## E3 ## E0 ## E0; }; \
+
790  struct { detail::_swizzle<4, T, Q, 3,3,0,1> E3 ## E3 ## E0 ## E1; }; \
+
791  struct { detail::_swizzle<4, T, Q, 3,3,0,2> E3 ## E3 ## E0 ## E2; }; \
+
792  struct { detail::_swizzle<4, T, Q, 3,3,0,3> E3 ## E3 ## E0 ## E3; }; \
+
793  struct { detail::_swizzle<4, T, Q, 3,3,1,0> E3 ## E3 ## E1 ## E0; }; \
+
794  struct { detail::_swizzle<4, T, Q, 3,3,1,1> E3 ## E3 ## E1 ## E1; }; \
+
795  struct { detail::_swizzle<4, T, Q, 3,3,1,2> E3 ## E3 ## E1 ## E2; }; \
+
796  struct { detail::_swizzle<4, T, Q, 3,3,1,3> E3 ## E3 ## E1 ## E3; }; \
+
797  struct { detail::_swizzle<4, T, Q, 3,3,2,0> E3 ## E3 ## E2 ## E0; }; \
+
798  struct { detail::_swizzle<4, T, Q, 3,3,2,1> E3 ## E3 ## E2 ## E1; }; \
+
799  struct { detail::_swizzle<4, T, Q, 3,3,2,2> E3 ## E3 ## E2 ## E2; }; \
+
800  struct { detail::_swizzle<4, T, Q, 3,3,2,3> E3 ## E3 ## E2 ## E3; }; \
+
801  struct { detail::_swizzle<4, T, Q, 3,3,3,0> E3 ## E3 ## E3 ## E0; }; \
+
802  struct { detail::_swizzle<4, T, Q, 3,3,3,1> E3 ## E3 ## E3 ## E1; }; \
+
803  struct { detail::_swizzle<4, T, Q, 3,3,3,2> E3 ## E3 ## E3 ## E2; }; \
+
804  struct { detail::_swizzle<4, T, Q, 3,3,3,3> E3 ## E3 ## E3 ## E3; };
+
GLM_FUNC_DECL GLM_CONSTEXPR genType e()
Return e constant.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00005_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00005_source.html new file mode 100644 index 0000000000000000000000000000000000000000..5ef1455ec2fa5392c87ea69914a47c5bd367ffc9 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00005_source.html @@ -0,0 +1,781 @@ + + + + + + +0.9.9 API documentation: _swizzle_func.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
_swizzle_func.hpp
+
+
+
1 #pragma once
+
2 
+
3 #define GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, CONST, A, B) \
+
4  vec<2, T, Q> A ## B() CONST \
+
5  { \
+
6  return vec<2, T, Q>(this->A, this->B); \
+
7  }
+
8 
+
9 #define GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, CONST, A, B, C) \
+
10  vec<3, T, Q> A ## B ## C() CONST \
+
11  { \
+
12  return vec<3, T, Q>(this->A, this->B, this->C); \
+
13  }
+
14 
+
15 #define GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, CONST, A, B, C, D) \
+
16  vec<4, T, Q> A ## B ## C ## D() CONST \
+
17  { \
+
18  return vec<4, T, Q>(this->A, this->B, this->C, this->D); \
+
19  }
+
20 
+
21 #define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, P, L, CONST, A, B) \
+
22  template<typename T> \
+
23  vec<L, T, Q> vec<L, T, Q>::A ## B() CONST \
+
24  { \
+
25  return vec<2, T, Q>(this->A, this->B); \
+
26  }
+
27 
+
28 #define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, P, L, CONST, A, B, C) \
+
29  template<typename T> \
+
30  vec<3, T, Q> vec<L, T, Q>::A ## B ## C() CONST \
+
31  { \
+
32  return vec<3, T, Q>(this->A, this->B, this->C); \
+
33  }
+
34 
+
35 #define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, P, L, CONST, A, B, C, D) \
+
36  template<typename T> \
+
37  vec<4, T, Q> vec<L, T, Q>::A ## B ## C ## D() CONST \
+
38  { \
+
39  return vec<4, T, Q>(this->A, this->B, this->C, this->D); \
+
40  }
+
41 
+
42 #define GLM_MUTABLE
+
43 
+
44 #define GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, A, B) \
+
45  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, A, B) \
+
46  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, B, A)
+
47 
+
48 #define GLM_SWIZZLE_GEN_REF_FROM_VEC2(T, P) \
+
49  GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, x, y) \
+
50  GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, r, g) \
+
51  GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, s, t)
+
52 
+
53 #define GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
54  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \
+
55  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \
+
56  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \
+
57  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \
+
58  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \
+
59  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B)
+
60 
+
61 #define GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
62  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, B, C) \
+
63  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, C, B) \
+
64  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, A, C) \
+
65  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, C, A) \
+
66  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, A, B) \
+
67  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, B, A)
+
68 
+
69 #define GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, A, B, C) \
+
70  GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
71  GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C)
+
72 
+
73 #define GLM_SWIZZLE_GEN_REF_FROM_VEC3(T, P) \
+
74  GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, x, y, z) \
+
75  GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, r, g, b) \
+
76  GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, s, t, p)
+
77 
+
78 #define GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
79  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \
+
80  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \
+
81  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, D) \
+
82  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \
+
83  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \
+
84  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, D) \
+
85  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \
+
86  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) \
+
87  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, D) \
+
88  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, A) \
+
89  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, B) \
+
90  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, C)
+
91 
+
92 #define GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
93  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, C) \
+
94  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, D) \
+
95  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, B) \
+
96  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, D) \
+
97  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, B) \
+
98  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, C) \
+
99  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, C) \
+
100  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, D) \
+
101  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, A) \
+
102  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, D) \
+
103  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, A) \
+
104  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, C) \
+
105  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, B) \
+
106  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, D) \
+
107  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, A) \
+
108  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, D) \
+
109  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, A) \
+
110  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, B) \
+
111  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, B) \
+
112  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, C) \
+
113  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, A) \
+
114  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, C) \
+
115  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, A) \
+
116  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, B)
+
117 
+
118 #define GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
119  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, B, D) \
+
120  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, D, B) \
+
121  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, B, C) \
+
122  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, C, B) \
+
123  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, D, C) \
+
124  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, C, D) \
+
125  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, A, D) \
+
126  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, D, A) \
+
127  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, A, C) \
+
128  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, C, A) \
+
129  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, D, C) \
+
130  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, C, D) \
+
131  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, A, D) \
+
132  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, D, A) \
+
133  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, A, B) \
+
134  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, B, A) \
+
135  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, D, B) \
+
136  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, B, D) \
+
137  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, B, A) \
+
138  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, A, B) \
+
139  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, B, C) \
+
140  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, C, B) \
+
141  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, A, C) \
+
142  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, C, A)
+
143 
+
144 #define GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, A, B, C, D) \
+
145  GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
146  GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
147  GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D)
+
148 
+
149 #define GLM_SWIZZLE_GEN_REF_FROM_VEC4(T, P) \
+
150  GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, x, y, z, w) \
+
151  GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, r, g, b, a) \
+
152  GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, s, t, p, q)
+
153 
+
154 #define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \
+
155  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \
+
156  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \
+
157  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \
+
158  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B)
+
159 
+
160 #define GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \
+
161  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \
+
162  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \
+
163  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \
+
164  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \
+
165  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \
+
166  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \
+
167  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \
+
168  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B)
+
169 
+
170 #define GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) \
+
171  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \
+
172  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \
+
173  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \
+
174  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \
+
175  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \
+
176  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \
+
177  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \
+
178  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \
+
179  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \
+
180  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \
+
181  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \
+
182  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \
+
183  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \
+
184  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \
+
185  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \
+
186  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B)
+
187 
+
188 #define GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, A, B) \
+
189  GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \
+
190  GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \
+
191  GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B)
+
192 
+
193 #define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, P) \
+
194  GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, x, y) \
+
195  GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, r, g) \
+
196  GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, s, t)
+
197 
+
198 #define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
199  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \
+
200  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \
+
201  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \
+
202  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \
+
203  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \
+
204  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \
+
205  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \
+
206  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \
+
207  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C)
+
208 
+
209 #define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
210  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \
+
211  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \
+
212  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \
+
213  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \
+
214  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \
+
215  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \
+
216  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \
+
217  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \
+
218  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \
+
219  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \
+
220  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \
+
221  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \
+
222  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \
+
223  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \
+
224  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \
+
225  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \
+
226  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \
+
227  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \
+
228  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \
+
229  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \
+
230  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \
+
231  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \
+
232  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \
+
233  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \
+
234  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \
+
235  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \
+
236  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C)
+
237 
+
238 #define GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
239  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \
+
240  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \
+
241  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \
+
242  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \
+
243  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \
+
244  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \
+
245  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \
+
246  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \
+
247  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \
+
248  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \
+
249  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \
+
250  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \
+
251  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \
+
252  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \
+
253  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \
+
254  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \
+
255  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \
+
256  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \
+
257  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \
+
258  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \
+
259  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \
+
260  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \
+
261  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \
+
262  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \
+
263  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \
+
264  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \
+
265  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \
+
266  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \
+
267  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \
+
268  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \
+
269  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \
+
270  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \
+
271  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \
+
272  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \
+
273  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \
+
274  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \
+
275  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \
+
276  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \
+
277  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \
+
278  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \
+
279  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \
+
280  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \
+
281  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \
+
282  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \
+
283  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \
+
284  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \
+
285  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \
+
286  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \
+
287  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \
+
288  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \
+
289  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \
+
290  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \
+
291  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \
+
292  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \
+
293  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \
+
294  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \
+
295  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \
+
296  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \
+
297  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \
+
298  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \
+
299  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \
+
300  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \
+
301  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \
+
302  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \
+
303  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \
+
304  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \
+
305  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \
+
306  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \
+
307  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \
+
308  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \
+
309  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \
+
310  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \
+
311  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \
+
312  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \
+
313  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \
+
314  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \
+
315  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \
+
316  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \
+
317  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \
+
318  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \
+
319  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C)
+
320 
+
321 #define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, A, B, C) \
+
322  GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
323  GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \
+
324  GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C)
+
325 
+
326 #define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, P) \
+
327  GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, x, y, z) \
+
328  GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, r, g, b) \
+
329  GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, s, t, p)
+
330 
+
331 #define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
332  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \
+
333  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \
+
334  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \
+
335  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, D) \
+
336  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \
+
337  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \
+
338  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \
+
339  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, D) \
+
340  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \
+
341  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \
+
342  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) \
+
343  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, D) \
+
344  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, A) \
+
345  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, B) \
+
346  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, C) \
+
347  GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, D)
+
348 
+
349 #define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
350  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \
+
351  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \
+
352  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \
+
353  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, D) \
+
354  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \
+
355  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \
+
356  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \
+
357  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, D) \
+
358  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \
+
359  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \
+
360  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \
+
361  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, D) \
+
362  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, A) \
+
363  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, B) \
+
364  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, C) \
+
365  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, D) \
+
366  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \
+
367  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \
+
368  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \
+
369  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, D) \
+
370  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \
+
371  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \
+
372  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \
+
373  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, D) \
+
374  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \
+
375  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \
+
376  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \
+
377  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, D) \
+
378  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, A) \
+
379  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, B) \
+
380  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, C) \
+
381  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, D) \
+
382  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \
+
383  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \
+
384  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \
+
385  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, D) \
+
386  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \
+
387  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \
+
388  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \
+
389  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, D) \
+
390  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \
+
391  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \
+
392  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) \
+
393  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, D) \
+
394  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, A) \
+
395  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, B) \
+
396  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, C) \
+
397  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, D) \
+
398  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, A) \
+
399  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, B) \
+
400  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, C) \
+
401  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, D) \
+
402  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, A) \
+
403  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, B) \
+
404  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, C) \
+
405  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, D) \
+
406  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, A) \
+
407  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, B) \
+
408  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, C) \
+
409  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, D) \
+
410  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, A) \
+
411  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, B) \
+
412  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, C) \
+
413  GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, D)
+
414 
+
415 #define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
416  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \
+
417  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \
+
418  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \
+
419  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, D) \
+
420  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \
+
421  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \
+
422  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \
+
423  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, D) \
+
424  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \
+
425  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \
+
426  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \
+
427  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, D) \
+
428  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, A) \
+
429  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, B) \
+
430  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, C) \
+
431  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, D) \
+
432  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \
+
433  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \
+
434  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \
+
435  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, D) \
+
436  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \
+
437  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \
+
438  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \
+
439  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, D) \
+
440  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \
+
441  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \
+
442  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \
+
443  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, D) \
+
444  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, A) \
+
445  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, B) \
+
446  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, C) \
+
447  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, D) \
+
448  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \
+
449  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \
+
450  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \
+
451  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, D) \
+
452  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \
+
453  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \
+
454  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \
+
455  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, D) \
+
456  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \
+
457  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \
+
458  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \
+
459  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, D) \
+
460  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, A) \
+
461  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, B) \
+
462  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, C) \
+
463  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, D) \
+
464  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, A) \
+
465  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, B) \
+
466  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, C) \
+
467  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, D) \
+
468  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, A) \
+
469  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, B) \
+
470  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, C) \
+
471  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, D) \
+
472  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, A) \
+
473  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, B) \
+
474  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, C) \
+
475  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, D) \
+
476  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, A) \
+
477  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, B) \
+
478  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, C) \
+
479  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, D) \
+
480  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \
+
481  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \
+
482  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \
+
483  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, D) \
+
484  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \
+
485  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \
+
486  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \
+
487  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, D) \
+
488  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \
+
489  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \
+
490  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \
+
491  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, D) \
+
492  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, A) \
+
493  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, B) \
+
494  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, C) \
+
495  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, D) \
+
496  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \
+
497  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \
+
498  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \
+
499  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, D) \
+
500  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \
+
501  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \
+
502  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \
+
503  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, D) \
+
504  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \
+
505  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \
+
506  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \
+
507  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, D) \
+
508  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, A) \
+
509  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, B) \
+
510  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, C) \
+
511  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, D) \
+
512  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \
+
513  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \
+
514  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \
+
515  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, D) \
+
516  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \
+
517  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \
+
518  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \
+
519  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, D) \
+
520  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \
+
521  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \
+
522  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \
+
523  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, D) \
+
524  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, A) \
+
525  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, B) \
+
526  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, C) \
+
527  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, D) \
+
528  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, A) \
+
529  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, B) \
+
530  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, C) \
+
531  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, D) \
+
532  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, A) \
+
533  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, B) \
+
534  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, C) \
+
535  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, D) \
+
536  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, A) \
+
537  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, B) \
+
538  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, C) \
+
539  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, D) \
+
540  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, A) \
+
541  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, B) \
+
542  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, C) \
+
543  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, D) \
+
544  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \
+
545  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \
+
546  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \
+
547  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, D) \
+
548  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \
+
549  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \
+
550  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \
+
551  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, D) \
+
552  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \
+
553  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \
+
554  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \
+
555  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, D) \
+
556  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, A) \
+
557  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, B) \
+
558  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, C) \
+
559  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, D) \
+
560  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \
+
561  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \
+
562  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \
+
563  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, D) \
+
564  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \
+
565  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \
+
566  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \
+
567  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, D) \
+
568  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \
+
569  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \
+
570  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \
+
571  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, D) \
+
572  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, A) \
+
573  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, B) \
+
574  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, C) \
+
575  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, D) \
+
576  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \
+
577  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \
+
578  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \
+
579  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, D) \
+
580  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \
+
581  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \
+
582  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \
+
583  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, D) \
+
584  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \
+
585  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \
+
586  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) \
+
587  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, D) \
+
588  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, A) \
+
589  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, B) \
+
590  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, C) \
+
591  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, D) \
+
592  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, A) \
+
593  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, B) \
+
594  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, C) \
+
595  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, D) \
+
596  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, A) \
+
597  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, B) \
+
598  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, C) \
+
599  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, D) \
+
600  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, A) \
+
601  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, B) \
+
602  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, C) \
+
603  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, D) \
+
604  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, A) \
+
605  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, B) \
+
606  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, C) \
+
607  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, D) \
+
608  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, A) \
+
609  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, B) \
+
610  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, C) \
+
611  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, D) \
+
612  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, A) \
+
613  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, B) \
+
614  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, C) \
+
615  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, D) \
+
616  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, A) \
+
617  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, B) \
+
618  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, C) \
+
619  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, D) \
+
620  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, A) \
+
621  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, B) \
+
622  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, C) \
+
623  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, D) \
+
624  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, A) \
+
625  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, B) \
+
626  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, C) \
+
627  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, D) \
+
628  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, A) \
+
629  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, B) \
+
630  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, C) \
+
631  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, D) \
+
632  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, A) \
+
633  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, B) \
+
634  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, C) \
+
635  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, D) \
+
636  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, A) \
+
637  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, B) \
+
638  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, C) \
+
639  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, D) \
+
640  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, A) \
+
641  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, B) \
+
642  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, C) \
+
643  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, D) \
+
644  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, A) \
+
645  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, B) \
+
646  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, C) \
+
647  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, D) \
+
648  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, A) \
+
649  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, B) \
+
650  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, C) \
+
651  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, D) \
+
652  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, A) \
+
653  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, B) \
+
654  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, C) \
+
655  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, D) \
+
656  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, A) \
+
657  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, B) \
+
658  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, C) \
+
659  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, D) \
+
660  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, A) \
+
661  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, B) \
+
662  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, C) \
+
663  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, D) \
+
664  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, A) \
+
665  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, B) \
+
666  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, C) \
+
667  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, D) \
+
668  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, A) \
+
669  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, B) \
+
670  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, C) \
+
671  GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, D)
+
672 
+
673 #define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, A, B, C, D) \
+
674  GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
675  GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \
+
676  GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D)
+
677 
+
678 #define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, P) \
+
679  GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, x, y, z, w) \
+
680  GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, r, g, b, a) \
+
681  GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, s, t, p, q)
+
682 
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00006_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00006_source.html new file mode 100644 index 0000000000000000000000000000000000000000..96923b3b4081c76313a7610bfa99f2a59b8bf879 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00006_source.html @@ -0,0 +1,262 @@ + + + + + + +0.9.9 API documentation: _vectorize.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
_vectorize.hpp
+
+
+
1 #pragma once
+
2 
+
3 namespace glm{
+
4 namespace detail
+
5 {
+
6  template<template<length_t L, typename T, qualifier Q> class vec, length_t L, typename R, typename T, qualifier Q>
+
7  struct functor1{};
+
8 
+
9  template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+
10  struct functor1<vec, 1, R, T, Q>
+
11  {
+
12  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, R, Q> call(R (*Func) (T x), vec<1, T, Q> const& v)
+
13  {
+
14  return vec<1, R, Q>(Func(v.x));
+
15  }
+
16  };
+
17 
+
18  template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+
19  struct functor1<vec, 2, R, T, Q>
+
20  {
+
21  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, R, Q> call(R (*Func) (T x), vec<2, T, Q> const& v)
+
22  {
+
23  return vec<2, R, Q>(Func(v.x), Func(v.y));
+
24  }
+
25  };
+
26 
+
27  template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+
28  struct functor1<vec, 3, R, T, Q>
+
29  {
+
30  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, R, Q> call(R (*Func) (T x), vec<3, T, Q> const& v)
+
31  {
+
32  return vec<3, R, Q>(Func(v.x), Func(v.y), Func(v.z));
+
33  }
+
34  };
+
35 
+
36  template<template<length_t L, typename T, qualifier Q> class vec, typename R, typename T, qualifier Q>
+
37  struct functor1<vec, 4, R, T, Q>
+
38  {
+
39  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, R, Q> call(R (*Func) (T x), vec<4, T, Q> const& v)
+
40  {
+
41  return vec<4, R, Q>(Func(v.x), Func(v.y), Func(v.z), Func(v.w));
+
42  }
+
43  };
+
44 
+
45  template<template<length_t L, typename T, qualifier Q> class vec, length_t L, typename T, qualifier Q>
+
46  struct functor2{};
+
47 
+
48  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
49  struct functor2<vec, 1, T, Q>
+
50  {
+
51  GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, vec<1, T, Q> const& b)
+
52  {
+
53  return vec<1, T, Q>(Func(a.x, b.x));
+
54  }
+
55  };
+
56 
+
57  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
58  struct functor2<vec, 2, T, Q>
+
59  {
+
60  GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, vec<2, T, Q> const& b)
+
61  {
+
62  return vec<2, T, Q>(Func(a.x, b.x), Func(a.y, b.y));
+
63  }
+
64  };
+
65 
+
66  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
67  struct functor2<vec, 3, T, Q>
+
68  {
+
69  GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, vec<3, T, Q> const& b)
+
70  {
+
71  return vec<3, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z));
+
72  }
+
73  };
+
74 
+
75  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
76  struct functor2<vec, 4, T, Q>
+
77  {
+
78  GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, vec<4, T, Q> const& b)
+
79  {
+
80  return vec<4, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w));
+
81  }
+
82  };
+
83 
+
84  template<template<length_t L, typename T, qualifier Q> class vec, length_t L, typename T, qualifier Q>
+
85  struct functor2_vec_sca{};
+
86 
+
87  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
88  struct functor2_vec_sca<vec, 1, T, Q>
+
89  {
+
90  GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, T b)
+
91  {
+
92  return vec<1, T, Q>(Func(a.x, b));
+
93  }
+
94  };
+
95 
+
96  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
97  struct functor2_vec_sca<vec, 2, T, Q>
+
98  {
+
99  GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, T b)
+
100  {
+
101  return vec<2, T, Q>(Func(a.x, b), Func(a.y, b));
+
102  }
+
103  };
+
104 
+
105  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
106  struct functor2_vec_sca<vec, 3, T, Q>
+
107  {
+
108  GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, T b)
+
109  {
+
110  return vec<3, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b));
+
111  }
+
112  };
+
113 
+
114  template<template<length_t L, typename T, qualifier Q> class vec, typename T, qualifier Q>
+
115  struct functor2_vec_sca<vec, 4, T, Q>
+
116  {
+
117  GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, T b)
+
118  {
+
119  return vec<4, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b), Func(a.w, b));
+
120  }
+
121  };
+
122 
+
123  template<length_t L, typename T, qualifier Q>
+
124  struct functor2_vec_int {};
+
125 
+
126  template<typename T, qualifier Q>
+
127  struct functor2_vec_int<1, T, Q>
+
128  {
+
129  GLM_FUNC_QUALIFIER static vec<1, int, Q> call(int (*Func) (T x, int y), vec<1, T, Q> const& a, vec<1, int, Q> const& b)
+
130  {
+
131  return vec<1, int, Q>(Func(a.x, b.x));
+
132  }
+
133  };
+
134 
+
135  template<typename T, qualifier Q>
+
136  struct functor2_vec_int<2, T, Q>
+
137  {
+
138  GLM_FUNC_QUALIFIER static vec<2, int, Q> call(int (*Func) (T x, int y), vec<2, T, Q> const& a, vec<2, int, Q> const& b)
+
139  {
+
140  return vec<2, int, Q>(Func(a.x, b.x), Func(a.y, b.y));
+
141  }
+
142  };
+
143 
+
144  template<typename T, qualifier Q>
+
145  struct functor2_vec_int<3, T, Q>
+
146  {
+
147  GLM_FUNC_QUALIFIER static vec<3, int, Q> call(int (*Func) (T x, int y), vec<3, T, Q> const& a, vec<3, int, Q> const& b)
+
148  {
+
149  return vec<3, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z));
+
150  }
+
151  };
+
152 
+
153  template<typename T, qualifier Q>
+
154  struct functor2_vec_int<4, T, Q>
+
155  {
+
156  GLM_FUNC_QUALIFIER static vec<4, int, Q> call(int (*Func) (T x, int y), vec<4, T, Q> const& a, vec<4, int, Q> const& b)
+
157  {
+
158  return vec<4, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w));
+
159  }
+
160  };
+
161 }//namespace detail
+
162 }//namespace glm
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00007.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00007.html new file mode 100644 index 0000000000000000000000000000000000000000..bd708c8f97cc84c8aebc1961602d1b06d7b4fc09 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00007.html @@ -0,0 +1,205 @@ + + + + + + +0.9.9 API documentation: associated_min_max.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
associated_min_max.hpp File Reference
+
+
+ +

GLM_GTX_associated_min_max +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T , typename U >
GLM_FUNC_DECL U associatedMax (T x, U a, T y, U b)
 Maximum comparison between 2 variables and returns 2 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< 2, U, Q > associatedMax (vec< L, T, Q > const &x, vec< L, U, Q > const &a, vec< L, T, Q > const &y, vec< L, U, Q > const &b)
 Maximum comparison between 2 variables and returns 2 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > associatedMax (T x, vec< L, U, Q > const &a, T y, vec< L, U, Q > const &b)
 Maximum comparison between 2 variables and returns 2 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMax (vec< L, T, Q > const &x, U a, vec< L, T, Q > const &y, U b)
 Maximum comparison between 2 variables and returns 2 associated variable values. More...
 
template<typename T , typename U >
GLM_FUNC_DECL U associatedMax (T x, U a, T y, U b, T z, U c)
 Maximum comparison between 3 variables and returns 3 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMax (vec< L, T, Q > const &x, vec< L, U, Q > const &a, vec< L, T, Q > const &y, vec< L, U, Q > const &b, vec< L, T, Q > const &z, vec< L, U, Q > const &c)
 Maximum comparison between 3 variables and returns 3 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > associatedMax (T x, vec< L, U, Q > const &a, T y, vec< L, U, Q > const &b, T z, vec< L, U, Q > const &c)
 Maximum comparison between 3 variables and returns 3 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMax (vec< L, T, Q > const &x, U a, vec< L, T, Q > const &y, U b, vec< L, T, Q > const &z, U c)
 Maximum comparison between 3 variables and returns 3 associated variable values. More...
 
template<typename T , typename U >
GLM_FUNC_DECL U associatedMax (T x, U a, T y, U b, T z, U c, T w, U d)
 Maximum comparison between 4 variables and returns 4 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMax (vec< L, T, Q > const &x, vec< L, U, Q > const &a, vec< L, T, Q > const &y, vec< L, U, Q > const &b, vec< L, T, Q > const &z, vec< L, U, Q > const &c, vec< L, T, Q > const &w, vec< L, U, Q > const &d)
 Maximum comparison between 4 variables and returns 4 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMax (T x, vec< L, U, Q > const &a, T y, vec< L, U, Q > const &b, T z, vec< L, U, Q > const &c, T w, vec< L, U, Q > const &d)
 Maximum comparison between 4 variables and returns 4 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMax (vec< L, T, Q > const &x, U a, vec< L, T, Q > const &y, U b, vec< L, T, Q > const &z, U c, vec< L, T, Q > const &w, U d)
 Maximum comparison between 4 variables and returns 4 associated variable values. More...
 
template<typename T , typename U , qualifier Q>
GLM_FUNC_DECL U associatedMin (T x, U a, T y, U b)
 Minimum comparison between 2 variables and returns 2 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< 2, U, Q > associatedMin (vec< L, T, Q > const &x, vec< L, U, Q > const &a, vec< L, T, Q > const &y, vec< L, U, Q > const &b)
 Minimum comparison between 2 variables and returns 2 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMin (T x, const vec< L, U, Q > &a, T y, const vec< L, U, Q > &b)
 Minimum comparison between 2 variables and returns 2 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMin (vec< L, T, Q > const &x, U a, vec< L, T, Q > const &y, U b)
 Minimum comparison between 2 variables and returns 2 associated variable values. More...
 
template<typename T , typename U >
GLM_FUNC_DECL U associatedMin (T x, U a, T y, U b, T z, U c)
 Minimum comparison between 3 variables and returns 3 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMin (vec< L, T, Q > const &x, vec< L, U, Q > const &a, vec< L, T, Q > const &y, vec< L, U, Q > const &b, vec< L, T, Q > const &z, vec< L, U, Q > const &c)
 Minimum comparison between 3 variables and returns 3 associated variable values. More...
 
template<typename T , typename U >
GLM_FUNC_DECL U associatedMin (T x, U a, T y, U b, T z, U c, T w, U d)
 Minimum comparison between 4 variables and returns 4 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMin (vec< L, T, Q > const &x, vec< L, U, Q > const &a, vec< L, T, Q > const &y, vec< L, U, Q > const &b, vec< L, T, Q > const &z, vec< L, U, Q > const &c, vec< L, T, Q > const &w, vec< L, U, Q > const &d)
 Minimum comparison between 4 variables and returns 4 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMin (T x, vec< L, U, Q > const &a, T y, vec< L, U, Q > const &b, T z, vec< L, U, Q > const &c, T w, vec< L, U, Q > const &d)
 Minimum comparison between 4 variables and returns 4 associated variable values. More...
 
template<length_t L, typename T , typename U , qualifier Q>
GLM_FUNC_DECL vec< L, U, Q > associatedMin (vec< L, T, Q > const &x, U a, vec< L, T, Q > const &y, U b, vec< L, T, Q > const &z, U c, vec< L, T, Q > const &w, U d)
 Minimum comparison between 4 variables and returns 4 associated variable values. More...
 
+

Detailed Description

+

GLM_GTX_associated_min_max

+
See also
Core features (dependence)
+
+gtx_extented_min_max (dependence)
+ +

Definition in file associated_min_max.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00007_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00007_source.html new file mode 100644 index 0000000000000000000000000000000000000000..45d76a2d85ed1ce3dbb8eed838b61a30a47ecf95 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00007_source.html @@ -0,0 +1,250 @@ + + + + + + +0.9.9 API documentation: associated_min_max.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
associated_min_max.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependency:
+
17 #include "../glm.hpp"
+
18 
+
19 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
20 # ifndef GLM_ENABLE_EXPERIMENTAL
+
21 # pragma message("GLM: GLM_GTX_associated_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
22 # else
+
23 # pragma message("GLM: GLM_GTX_associated_min_max extension included")
+
24 # endif
+
25 #endif
+
26 
+
27 namespace glm
+
28 {
+
31 
+
34  template<typename T, typename U, qualifier Q>
+
35  GLM_FUNC_DECL U associatedMin(T x, U a, T y, U b);
+
36 
+
39  template<length_t L, typename T, typename U, qualifier Q>
+
40  GLM_FUNC_DECL vec<2, U, Q> associatedMin(
+
41  vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+
42  vec<L, T, Q> const& y, vec<L, U, Q> const& b);
+
43 
+
46  template<length_t L, typename T, typename U, qualifier Q>
+
47  GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+
48  T x, const vec<L, U, Q>& a,
+
49  T y, const vec<L, U, Q>& b);
+
50 
+
53  template<length_t L, typename T, typename U, qualifier Q>
+
54  GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+
55  vec<L, T, Q> const& x, U a,
+
56  vec<L, T, Q> const& y, U b);
+
57 
+
60  template<typename T, typename U>
+
61  GLM_FUNC_DECL U associatedMin(
+
62  T x, U a,
+
63  T y, U b,
+
64  T z, U c);
+
65 
+
68  template<length_t L, typename T, typename U, qualifier Q>
+
69  GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+
70  vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+
71  vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+
72  vec<L, T, Q> const& z, vec<L, U, Q> const& c);
+
73 
+
76  template<typename T, typename U>
+
77  GLM_FUNC_DECL U associatedMin(
+
78  T x, U a,
+
79  T y, U b,
+
80  T z, U c,
+
81  T w, U d);
+
82 
+
85  template<length_t L, typename T, typename U, qualifier Q>
+
86  GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+
87  vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+
88  vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+
89  vec<L, T, Q> const& z, vec<L, U, Q> const& c,
+
90  vec<L, T, Q> const& w, vec<L, U, Q> const& d);
+
91 
+
94  template<length_t L, typename T, typename U, qualifier Q>
+
95  GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+
96  T x, vec<L, U, Q> const& a,
+
97  T y, vec<L, U, Q> const& b,
+
98  T z, vec<L, U, Q> const& c,
+
99  T w, vec<L, U, Q> const& d);
+
100 
+
103  template<length_t L, typename T, typename U, qualifier Q>
+
104  GLM_FUNC_DECL vec<L, U, Q> associatedMin(
+
105  vec<L, T, Q> const& x, U a,
+
106  vec<L, T, Q> const& y, U b,
+
107  vec<L, T, Q> const& z, U c,
+
108  vec<L, T, Q> const& w, U d);
+
109 
+
112  template<typename T, typename U>
+
113  GLM_FUNC_DECL U associatedMax(T x, U a, T y, U b);
+
114 
+
117  template<length_t L, typename T, typename U, qualifier Q>
+
118  GLM_FUNC_DECL vec<2, U, Q> associatedMax(
+
119  vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+
120  vec<L, T, Q> const& y, vec<L, U, Q> const& b);
+
121 
+
124  template<length_t L, typename T, typename U, qualifier Q>
+
125  GLM_FUNC_DECL vec<L, T, Q> associatedMax(
+
126  T x, vec<L, U, Q> const& a,
+
127  T y, vec<L, U, Q> const& b);
+
128 
+
131  template<length_t L, typename T, typename U, qualifier Q>
+
132  GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+
133  vec<L, T, Q> const& x, U a,
+
134  vec<L, T, Q> const& y, U b);
+
135 
+
138  template<typename T, typename U>
+
139  GLM_FUNC_DECL U associatedMax(
+
140  T x, U a,
+
141  T y, U b,
+
142  T z, U c);
+
143 
+
146  template<length_t L, typename T, typename U, qualifier Q>
+
147  GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+
148  vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+
149  vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+
150  vec<L, T, Q> const& z, vec<L, U, Q> const& c);
+
151 
+
154  template<length_t L, typename T, typename U, qualifier Q>
+
155  GLM_FUNC_DECL vec<L, T, Q> associatedMax(
+
156  T x, vec<L, U, Q> const& a,
+
157  T y, vec<L, U, Q> const& b,
+
158  T z, vec<L, U, Q> const& c);
+
159 
+
162  template<length_t L, typename T, typename U, qualifier Q>
+
163  GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+
164  vec<L, T, Q> const& x, U a,
+
165  vec<L, T, Q> const& y, U b,
+
166  vec<L, T, Q> const& z, U c);
+
167 
+
170  template<typename T, typename U>
+
171  GLM_FUNC_DECL U associatedMax(
+
172  T x, U a,
+
173  T y, U b,
+
174  T z, U c,
+
175  T w, U d);
+
176 
+
179  template<length_t L, typename T, typename U, qualifier Q>
+
180  GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+
181  vec<L, T, Q> const& x, vec<L, U, Q> const& a,
+
182  vec<L, T, Q> const& y, vec<L, U, Q> const& b,
+
183  vec<L, T, Q> const& z, vec<L, U, Q> const& c,
+
184  vec<L, T, Q> const& w, vec<L, U, Q> const& d);
+
185 
+
188  template<length_t L, typename T, typename U, qualifier Q>
+
189  GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+
190  T x, vec<L, U, Q> const& a,
+
191  T y, vec<L, U, Q> const& b,
+
192  T z, vec<L, U, Q> const& c,
+
193  T w, vec<L, U, Q> const& d);
+
194 
+
197  template<length_t L, typename T, typename U, qualifier Q>
+
198  GLM_FUNC_DECL vec<L, U, Q> associatedMax(
+
199  vec<L, T, Q> const& x, U a,
+
200  vec<L, T, Q> const& y, U b,
+
201  vec<L, T, Q> const& z, U c,
+
202  vec<L, T, Q> const& w, U d);
+
203 
+
205 } //namespace glm
+
206 
+
207 #include "associated_min_max.inl"
+
GLM_FUNC_DECL vec< L, U, Q > associatedMax(vec< L, T, Q > const &x, U a, vec< L, T, Q > const &y, U b, vec< L, T, Q > const &z, U c, vec< L, T, Q > const &w, U d)
Maximum comparison between 4 variables and returns 4 associated variable values.
+
GLM_FUNC_DECL vec< L, U, Q > associatedMin(vec< L, T, Q > const &x, U a, vec< L, T, Q > const &y, U b, vec< L, T, Q > const &z, U c, vec< L, T, Q > const &w, U d)
Minimum comparison between 4 variables and returns 4 associated variable values.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00008.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00008.html new file mode 100644 index 0000000000000000000000000000000000000000..481484f37b5387e2ffd9c3ee15389128077ffd81 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00008.html @@ -0,0 +1,149 @@ + + + + + + +0.9.9 API documentation: bit.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
bit.hpp File Reference
+
+
+ +

GLM_GTX_bit +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genIUType >
GLM_FUNC_DECL genIUType highestBitValue (genIUType Value)
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > highestBitValue (vec< L, T, Q > const &value)
 Find the highest bit set to 1 in a integer variable and return its value. More...
 
template<typename genIUType >
GLM_FUNC_DECL genIUType lowestBitValue (genIUType Value)
 
template<typename genIUType >
GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoAbove (genIUType Value)
 Return the power of two number which value is just higher the input value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_DEPRECATED GLM_FUNC_DECL vec< L, T, Q > powerOfTwoAbove (vec< L, T, Q > const &value)
 Return the power of two number which value is just higher the input value. More...
 
template<typename genIUType >
GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoBelow (genIUType Value)
 Return the power of two number which value is just lower the input value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_DEPRECATED GLM_FUNC_DECL vec< L, T, Q > powerOfTwoBelow (vec< L, T, Q > const &value)
 Return the power of two number which value is just lower the input value. More...
 
template<typename genIUType >
GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoNearest (genIUType Value)
 Return the power of two number which value is the closet to the input value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_DEPRECATED GLM_FUNC_DECL vec< L, T, Q > powerOfTwoNearest (vec< L, T, Q > const &value)
 Return the power of two number which value is the closet to the input value. More...
 
+

Detailed Description

+

GLM_GTX_bit

+
See also
Core features (dependence)
+ +

Definition in file bit.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00008_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00008_source.html new file mode 100644 index 0000000000000000000000000000000000000000..ea56523dd7a86ef0ca796ffc65b38221d81d18f1 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00008_source.html @@ -0,0 +1,154 @@ + + + + + + +0.9.9 API documentation: bit.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
bit.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependencies
+
16 #include "../gtc/bitfield.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_bit is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_bit extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
32  template<typename genIUType>
+
33  GLM_FUNC_DECL genIUType highestBitValue(genIUType Value);
+
34 
+
36  template<typename genIUType>
+
37  GLM_FUNC_DECL genIUType lowestBitValue(genIUType Value);
+
38 
+
42  template<length_t L, typename T, qualifier Q>
+
43  GLM_FUNC_DECL vec<L, T, Q> highestBitValue(vec<L, T, Q> const& value);
+
44 
+
50  template<typename genIUType>
+
51  GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoAbove(genIUType Value);
+
52 
+
58  template<length_t L, typename T, qualifier Q>
+
59  GLM_DEPRECATED GLM_FUNC_DECL vec<L, T, Q> powerOfTwoAbove(vec<L, T, Q> const& value);
+
60 
+
66  template<typename genIUType>
+
67  GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoBelow(genIUType Value);
+
68 
+
74  template<length_t L, typename T, qualifier Q>
+
75  GLM_DEPRECATED GLM_FUNC_DECL vec<L, T, Q> powerOfTwoBelow(vec<L, T, Q> const& value);
+
76 
+
82  template<typename genIUType>
+
83  GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoNearest(genIUType Value);
+
84 
+
90  template<length_t L, typename T, qualifier Q>
+
91  GLM_DEPRECATED GLM_FUNC_DECL vec<L, T, Q> powerOfTwoNearest(vec<L, T, Q> const& value);
+
92 
+
94 } //namespace glm
+
95 
+
96 
+
97 #include "bit.inl"
+
98 
+
GLM_FUNC_DECL vec< L, T, Q > highestBitValue(vec< L, T, Q > const &value)
Find the highest bit set to 1 in a integer variable and return its value.
+
GLM_DEPRECATED GLM_FUNC_DECL vec< L, T, Q > powerOfTwoBelow(vec< L, T, Q > const &value)
Return the power of two number which value is just lower the input value.
+
GLM_DEPRECATED GLM_FUNC_DECL vec< L, T, Q > powerOfTwoAbove(vec< L, T, Q > const &value)
Return the power of two number which value is just higher the input value.
+
GLM_DEPRECATED GLM_FUNC_DECL vec< L, T, Q > powerOfTwoNearest(vec< L, T, Q > const &value)
Return the power of two number which value is the closet to the input value.
+
GLM_FUNC_DECL genIUType lowestBitValue(genIUType Value)
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00009.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00009.html new file mode 100644 index 0000000000000000000000000000000000000000..429ccf045702d560fb06391efb0f5cedeb86f783 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00009.html @@ -0,0 +1,223 @@ + + + + + + +0.9.9 API documentation: bitfield.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
bitfield.hpp File Reference
+
+
+ +

GLM_GTC_bitfield +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

GLM_FUNC_DECL glm::u8vec2 bitfieldDeinterleave (glm::uint16 x)
 Deinterleaves the bits of x. More...
 
GLM_FUNC_DECL glm::u16vec2 bitfieldDeinterleave (glm::uint32 x)
 Deinterleaves the bits of x. More...
 
GLM_FUNC_DECL glm::u32vec2 bitfieldDeinterleave (glm::uint64 x)
 Deinterleaves the bits of x. More...
 
template<typename genIUType >
GLM_FUNC_DECL genIUType bitfieldFillOne (genIUType Value, int FirstBit, int BitCount)
 Set to 1 a range of bits. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > bitfieldFillOne (vec< L, T, Q > const &Value, int FirstBit, int BitCount)
 Set to 1 a range of bits. More...
 
template<typename genIUType >
GLM_FUNC_DECL genIUType bitfieldFillZero (genIUType Value, int FirstBit, int BitCount)
 Set to 0 a range of bits. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > bitfieldFillZero (vec< L, T, Q > const &Value, int FirstBit, int BitCount)
 Set to 0 a range of bits. More...
 
GLM_FUNC_DECL int16 bitfieldInterleave (int8 x, int8 y)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL uint16 bitfieldInterleave (uint8 x, uint8 y)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL uint16 bitfieldInterleave (u8vec2 const &v)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL int32 bitfieldInterleave (int16 x, int16 y)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL uint32 bitfieldInterleave (uint16 x, uint16 y)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL uint32 bitfieldInterleave (u16vec2 const &v)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL int64 bitfieldInterleave (int32 x, int32 y)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL uint64 bitfieldInterleave (uint32 x, uint32 y)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL uint64 bitfieldInterleave (u32vec2 const &v)
 Interleaves the bits of x and y. More...
 
GLM_FUNC_DECL int32 bitfieldInterleave (int8 x, int8 y, int8 z)
 Interleaves the bits of x, y and z. More...
 
GLM_FUNC_DECL uint32 bitfieldInterleave (uint8 x, uint8 y, uint8 z)
 Interleaves the bits of x, y and z. More...
 
GLM_FUNC_DECL int64 bitfieldInterleave (int16 x, int16 y, int16 z)
 Interleaves the bits of x, y and z. More...
 
GLM_FUNC_DECL uint64 bitfieldInterleave (uint16 x, uint16 y, uint16 z)
 Interleaves the bits of x, y and z. More...
 
GLM_FUNC_DECL int64 bitfieldInterleave (int32 x, int32 y, int32 z)
 Interleaves the bits of x, y and z. More...
 
GLM_FUNC_DECL uint64 bitfieldInterleave (uint32 x, uint32 y, uint32 z)
 Interleaves the bits of x, y and z. More...
 
GLM_FUNC_DECL int32 bitfieldInterleave (int8 x, int8 y, int8 z, int8 w)
 Interleaves the bits of x, y, z and w. More...
 
GLM_FUNC_DECL uint32 bitfieldInterleave (uint8 x, uint8 y, uint8 z, uint8 w)
 Interleaves the bits of x, y, z and w. More...
 
GLM_FUNC_DECL int64 bitfieldInterleave (int16 x, int16 y, int16 z, int16 w)
 Interleaves the bits of x, y, z and w. More...
 
GLM_FUNC_DECL uint64 bitfieldInterleave (uint16 x, uint16 y, uint16 z, uint16 w)
 Interleaves the bits of x, y, z and w. More...
 
template<typename genIUType >
GLM_FUNC_DECL genIUType bitfieldRotateLeft (genIUType In, int Shift)
 Rotate all bits to the left. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > bitfieldRotateLeft (vec< L, T, Q > const &In, int Shift)
 Rotate all bits to the left. More...
 
template<typename genIUType >
GLM_FUNC_DECL genIUType bitfieldRotateRight (genIUType In, int Shift)
 Rotate all bits to the right. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > bitfieldRotateRight (vec< L, T, Q > const &In, int Shift)
 Rotate all bits to the right. More...
 
template<typename genIUType >
GLM_FUNC_DECL genIUType mask (genIUType Bits)
 Build a mask of 'count' bits. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > mask (vec< L, T, Q > const &v)
 Build a mask of 'count' bits. More...
 
+

Detailed Description

+

GLM_GTC_bitfield

+
See also
Core features (dependence)
+
+GLM_GTC_bitfield (dependence)
+ +

Definition in file bitfield.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00009_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00009_source.html new file mode 100644 index 0000000000000000000000000000000000000000..ba21496124e311c7be22a7f5cace6aa94bce8dbe --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00009_source.html @@ -0,0 +1,212 @@ + + + + + + +0.9.9 API documentation: bitfield.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
bitfield.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #include "../detail/setup.hpp"
+
15 
+
16 #pragma once
+
17 
+
18 // Dependencies
+
19 #include "../ext/scalar_int_sized.hpp"
+
20 #include "../ext/scalar_uint_sized.hpp"
+
21 #include "../detail/qualifier.hpp"
+
22 #include "../detail/_vectorize.hpp"
+
23 #include "type_precision.hpp"
+
24 #include <limits>
+
25 
+
26 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
27 # pragma message("GLM: GLM_GTC_bitfield extension included")
+
28 #endif
+
29 
+
30 namespace glm
+
31 {
+
34 
+
38  template<typename genIUType>
+
39  GLM_FUNC_DECL genIUType mask(genIUType Bits);
+
40 
+
48  template<length_t L, typename T, qualifier Q>
+
49  GLM_FUNC_DECL vec<L, T, Q> mask(vec<L, T, Q> const& v);
+
50 
+
54  template<typename genIUType>
+
55  GLM_FUNC_DECL genIUType bitfieldRotateRight(genIUType In, int Shift);
+
56 
+
64  template<length_t L, typename T, qualifier Q>
+
65  GLM_FUNC_DECL vec<L, T, Q> bitfieldRotateRight(vec<L, T, Q> const& In, int Shift);
+
66 
+
70  template<typename genIUType>
+
71  GLM_FUNC_DECL genIUType bitfieldRotateLeft(genIUType In, int Shift);
+
72 
+
80  template<length_t L, typename T, qualifier Q>
+
81  GLM_FUNC_DECL vec<L, T, Q> bitfieldRotateLeft(vec<L, T, Q> const& In, int Shift);
+
82 
+
86  template<typename genIUType>
+
87  GLM_FUNC_DECL genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount);
+
88 
+
96  template<length_t L, typename T, qualifier Q>
+
97  GLM_FUNC_DECL vec<L, T, Q> bitfieldFillOne(vec<L, T, Q> const& Value, int FirstBit, int BitCount);
+
98 
+
102  template<typename genIUType>
+
103  GLM_FUNC_DECL genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount);
+
104 
+
112  template<length_t L, typename T, qualifier Q>
+
113  GLM_FUNC_DECL vec<L, T, Q> bitfieldFillZero(vec<L, T, Q> const& Value, int FirstBit, int BitCount);
+
114 
+
120  GLM_FUNC_DECL int16 bitfieldInterleave(int8 x, int8 y);
+
121 
+
127  GLM_FUNC_DECL uint16 bitfieldInterleave(uint8 x, uint8 y);
+
128 
+
134  GLM_FUNC_DECL uint16 bitfieldInterleave(u8vec2 const& v);
+
135 
+ +
140 
+
146  GLM_FUNC_DECL int32 bitfieldInterleave(int16 x, int16 y);
+
147 
+
153  GLM_FUNC_DECL uint32 bitfieldInterleave(uint16 x, uint16 y);
+
154 
+
160  GLM_FUNC_DECL uint32 bitfieldInterleave(u16vec2 const& v);
+
161 
+ +
166 
+
172  GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y);
+
173 
+
179  GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y);
+
180 
+
186  GLM_FUNC_DECL uint64 bitfieldInterleave(u32vec2 const& v);
+
187 
+ +
192 
+
198  GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z);
+
199 
+
205  GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z);
+
206 
+
212  GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z);
+
213 
+
219  GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z);
+
220 
+
226  GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y, int32 z);
+
227 
+
233  GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z);
+
234 
+
240  GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w);
+
241 
+
247  GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w);
+
248 
+
254  GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w);
+
255 
+
261  GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w);
+
262 
+
264 } //namespace glm
+
265 
+
266 #include "bitfield.inl"
+
detail::uint32 uint32
32 bit unsigned integer type.
+
GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w)
Interleaves the bits of x, y, z and w.
+
GLM_FUNC_DECL glm::u32vec2 bitfieldDeinterleave(glm::uint64 x)
Deinterleaves the bits of x.
+
GLM_FUNC_DECL vec< L, T, Q > bitfieldFillZero(vec< L, T, Q > const &Value, int FirstBit, int BitCount)
Set to 0 a range of bits.
+
detail::uint16 uint16
16 bit unsigned integer type.
+
vec< 2, u8, defaultp > u8vec2
Default qualifier 8 bit unsigned integer vector of 2 components type.
Definition: fwd.hpp:340
+
GLM_FUNC_DECL vec< L, T, Q > bitfieldRotateLeft(vec< L, T, Q > const &In, int Shift)
Rotate all bits to the left.
+
GLM_FUNC_DECL vec< L, T, Q > mask(vec< L, T, Q > const &v)
Build a mask of 'count' bits.
+
detail::uint64 uint64
64 bit unsigned integer type.
+
GLM_FUNC_DECL vec< L, T, Q > bitfieldFillOne(vec< L, T, Q > const &Value, int FirstBit, int BitCount)
Set to 1 a range of bits.
+
GLM_GTC_type_precision
+
detail::int64 int64
64 bit signed integer type.
+
vec< 2, u32, defaultp > u32vec2
Default qualifier 32 bit unsigned integer vector of 2 components type.
Definition: fwd.hpp:380
+
GLM_FUNC_DECL vec< L, T, Q > bitfieldRotateRight(vec< L, T, Q > const &In, int Shift)
Rotate all bits to the right.
+
vec< 2, u16, defaultp > u16vec2
Default qualifier 16 bit unsigned integer vector of 2 components type.
Definition: fwd.hpp:360
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00010.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00010.html new file mode 100644 index 0000000000000000000000000000000000000000..427c3ad1a25a9de8cb66e8657cf16dc307d39720 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00010.html @@ -0,0 +1,124 @@ + + + + + + +0.9.9 API documentation: closest_point.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
closest_point.hpp File Reference
+
+
+ +

GLM_GTX_closest_point +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > closestPointOnLine (vec< 3, T, Q > const &point, vec< 3, T, Q > const &a, vec< 3, T, Q > const &b)
 Find the point on a straight line which is the closet of a point. More...
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 2, T, Q > closestPointOnLine (vec< 2, T, Q > const &point, vec< 2, T, Q > const &a, vec< 2, T, Q > const &b)
 2d lines work as well
 
+

Detailed Description

+

GLM_GTX_closest_point

+
See also
Core features (dependence)
+ +

Definition in file closest_point.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00010_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00010_source.html new file mode 100644 index 0000000000000000000000000000000000000000..57de3ceb74e17b7ff8279f2f530cb857b73213a3 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00010_source.html @@ -0,0 +1,133 @@ + + + + + + +0.9.9 API documentation: closest_point.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
closest_point.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_closest_point extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename T, qualifier Q>
+
34  GLM_FUNC_DECL vec<3, T, Q> closestPointOnLine(
+
35  vec<3, T, Q> const& point,
+
36  vec<3, T, Q> const& a,
+
37  vec<3, T, Q> const& b);
+
38 
+
40  template<typename T, qualifier Q>
+
41  GLM_FUNC_DECL vec<2, T, Q> closestPointOnLine(
+
42  vec<2, T, Q> const& point,
+
43  vec<2, T, Q> const& a,
+
44  vec<2, T, Q> const& b);
+
45 
+
47 }// namespace glm
+
48 
+
49 #include "closest_point.inl"
+
GLM_FUNC_DECL vec< 2, T, Q > closestPointOnLine(vec< 2, T, Q > const &point, vec< 2, T, Q > const &a, vec< 2, T, Q > const &b)
2d lines work as well
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00011.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00011.html new file mode 100644 index 0000000000000000000000000000000000000000..fc81397e7ea7a268e46571d248a229c2a847a6ce --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00011.html @@ -0,0 +1,137 @@ + + + + + + +0.9.9 API documentation: color_encoding.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
color_encoding.hpp File Reference
+
+
+ +

GLM_GTX_color_encoding +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + +

+Functions

+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > convertD65XYZToD50XYZ (vec< 3, T, Q > const &ColorD65XYZ)
 Convert a D65 YUV color to D50 YUV.
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > convertD65XYZToLinearSRGB (vec< 3, T, Q > const &ColorD65XYZ)
 Convert a D65 YUV color to linear sRGB.
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > convertLinearSRGBToD50XYZ (vec< 3, T, Q > const &ColorLinearSRGB)
 Convert a linear sRGB color to D50 YUV.
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > convertLinearSRGBToD65XYZ (vec< 3, T, Q > const &ColorLinearSRGB)
 Convert a linear sRGB color to D65 YUV.
 
+

Detailed Description

+

GLM_GTX_color_encoding

+
See also
Core features (dependence)
+
+GLM_GTX_color_encoding (dependence)
+ +

Definition in file color_encoding.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00011_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00011_source.html new file mode 100644 index 0000000000000000000000000000000000000000..0deaac4561b2f7ecdcb70120196f42eca0ef149b --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00011_source.html @@ -0,0 +1,139 @@ + + + + + + +0.9.9 API documentation: color_encoding.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
color_encoding.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependencies
+
17 #include "../detail/setup.hpp"
+
18 #include "../detail/qualifier.hpp"
+
19 #include "../vec3.hpp"
+
20 #include <limits>
+
21 
+
22 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
23 # ifndef GLM_ENABLE_EXPERIMENTAL
+
24 # pragma message("GLM: GLM_GTC_color_encoding is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
25 # else
+
26 # pragma message("GLM: GLM_GTC_color_encoding extension included")
+
27 # endif
+
28 #endif
+
29 
+
30 namespace glm
+
31 {
+
34 
+
36  template<typename T, qualifier Q>
+
37  GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB);
+
38 
+
40  template<typename T, qualifier Q>
+
41  GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB);
+
42 
+
44  template<typename T, qualifier Q>
+
45  GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ);
+
46 
+
48  template<typename T, qualifier Q>
+
49  GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ);
+
50 
+
52 } //namespace glm
+
53 
+
54 #include "color_encoding.inl"
+
GLM_FUNC_DECL vec< 3, T, Q > convertD65XYZToLinearSRGB(vec< 3, T, Q > const &ColorD65XYZ)
Convert a D65 YUV color to linear sRGB.
+
GLM_FUNC_DECL vec< 3, T, Q > convertLinearSRGBToD50XYZ(vec< 3, T, Q > const &ColorLinearSRGB)
Convert a linear sRGB color to D50 YUV.
+
GLM_FUNC_DECL vec< 3, T, Q > convertLinearSRGBToD65XYZ(vec< 3, T, Q > const &ColorLinearSRGB)
Convert a linear sRGB color to D65 YUV.
+
GLM_FUNC_DECL vec< 3, T, Q > convertD65XYZToD50XYZ(vec< 3, T, Q > const &ColorD65XYZ)
Convert a D65 YUV color to D50 YUV.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00012.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00012.html new file mode 100644 index 0000000000000000000000000000000000000000..4262e1e95256213c5e04ff9a0ac223f12a46d499 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00012.html @@ -0,0 +1,134 @@ + + + + + + +0.9.9 API documentation: color_space.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
gtc/color_space.hpp File Reference
+
+
+ +

GLM_GTC_color_space +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + +

+Functions

template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > convertLinearToSRGB (vec< L, T, Q > const &ColorLinear)
 Convert a linear color to sRGB color using a standard gamma correction. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > convertLinearToSRGB (vec< L, T, Q > const &ColorLinear, T Gamma)
 Convert a linear color to sRGB color using a custom gamma correction. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > convertSRGBToLinear (vec< L, T, Q > const &ColorSRGB)
 Convert a sRGB color to linear color using a standard gamma correction. More...
 
+template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > convertSRGBToLinear (vec< L, T, Q > const &ColorSRGB, T Gamma)
 Convert a sRGB color to linear color using a custom gamma correction.
 
+

Detailed Description

+

GLM_GTC_color_space

+
See also
Core features (dependence)
+
+GLM_GTC_color_space (dependence)
+ +

Definition in file gtc/color_space.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00012_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00012_source.html new file mode 100644 index 0000000000000000000000000000000000000000..8f864b9f51152b704930f5a0a8ea1b2035713a11 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00012_source.html @@ -0,0 +1,136 @@ + + + + + + +0.9.9 API documentation: color_space.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
gtc/color_space.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependencies
+
17 #include "../detail/setup.hpp"
+
18 #include "../detail/qualifier.hpp"
+
19 #include "../exponential.hpp"
+
20 #include "../vec3.hpp"
+
21 #include "../vec4.hpp"
+
22 #include <limits>
+
23 
+
24 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
25 # pragma message("GLM: GLM_GTC_color_space extension included")
+
26 #endif
+
27 
+
28 namespace glm
+
29 {
+
32 
+
35  template<length_t L, typename T, qualifier Q>
+
36  GLM_FUNC_DECL vec<L, T, Q> convertLinearToSRGB(vec<L, T, Q> const& ColorLinear);
+
37 
+
40  template<length_t L, typename T, qualifier Q>
+
41  GLM_FUNC_DECL vec<L, T, Q> convertLinearToSRGB(vec<L, T, Q> const& ColorLinear, T Gamma);
+
42 
+
45  template<length_t L, typename T, qualifier Q>
+
46  GLM_FUNC_DECL vec<L, T, Q> convertSRGBToLinear(vec<L, T, Q> const& ColorSRGB);
+
47 
+
49  // IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb
+
50  template<length_t L, typename T, qualifier Q>
+
51  GLM_FUNC_DECL vec<L, T, Q> convertSRGBToLinear(vec<L, T, Q> const& ColorSRGB, T Gamma);
+
52 
+
54 } //namespace glm
+
55 
+
56 #include "color_space.inl"
+
GLM_FUNC_DECL vec< L, T, Q > convertLinearToSRGB(vec< L, T, Q > const &ColorLinear, T Gamma)
Convert a linear color to sRGB color using a custom gamma correction.
+
GLM_FUNC_DECL vec< L, T, Q > convertSRGBToLinear(vec< L, T, Q > const &ColorSRGB, T Gamma)
Convert a sRGB color to linear color using a custom gamma correction.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00013.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00013.html new file mode 100644 index 0000000000000000000000000000000000000000..0c20995098783a442341dd98625ae5dfc03ec23e --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00013.html @@ -0,0 +1,139 @@ + + + + + + +0.9.9 API documentation: color_space.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
gtx/color_space.hpp File Reference
+
+
+ +

GLM_GTX_color_space +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > hsvColor (vec< 3, T, Q > const &rgbValue)
 Converts a color from RGB color space to its color in HSV color space. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL T luminosity (vec< 3, T, Q > const &color)
 Compute color luminosity associating ratios (0.33, 0.59, 0.11) to RGB canals. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > rgbColor (vec< 3, T, Q > const &hsvValue)
 Converts a color from HSV color space to its color in RGB color space. More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > saturation (T const s)
 Build a saturation matrix. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > saturation (T const s, vec< 3, T, Q > const &color)
 Modify the saturation of a color. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 4, T, Q > saturation (T const s, vec< 4, T, Q > const &color)
 Modify the saturation of a color. More...
 
+

Detailed Description

+

GLM_GTX_color_space

+
See also
Core features (dependence)
+ +

Definition in file gtx/color_space.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00013_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00013_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e85a56589f1232038d495ff7e8da42f14c63f816 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00013_source.html @@ -0,0 +1,150 @@ + + + + + + +0.9.9 API documentation: color_space.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
gtx/color_space.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_color_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_color_space extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename T, qualifier Q>
+
34  GLM_FUNC_DECL vec<3, T, Q> rgbColor(
+
35  vec<3, T, Q> const& hsvValue);
+
36 
+
39  template<typename T, qualifier Q>
+
40  GLM_FUNC_DECL vec<3, T, Q> hsvColor(
+
41  vec<3, T, Q> const& rgbValue);
+
42 
+
45  template<typename T>
+
46  GLM_FUNC_DECL mat<4, 4, T, defaultp> saturation(
+
47  T const s);
+
48 
+
51  template<typename T, qualifier Q>
+
52  GLM_FUNC_DECL vec<3, T, Q> saturation(
+
53  T const s,
+
54  vec<3, T, Q> const& color);
+
55 
+
58  template<typename T, qualifier Q>
+
59  GLM_FUNC_DECL vec<4, T, Q> saturation(
+
60  T const s,
+
61  vec<4, T, Q> const& color);
+
62 
+
65  template<typename T, qualifier Q>
+
66  GLM_FUNC_DECL T luminosity(
+
67  vec<3, T, Q> const& color);
+
68 
+
70 }//namespace glm
+
71 
+
72 #include "color_space.inl"
+
GLM_FUNC_DECL T luminosity(vec< 3, T, Q > const &color)
Compute color luminosity associating ratios (0.33, 0.59, 0.11) to RGB canals.
+
GLM_FUNC_DECL vec< 4, T, Q > saturation(T const s, vec< 4, T, Q > const &color)
Modify the saturation of a color.
+
GLM_FUNC_DECL vec< 3, T, Q > rgbColor(vec< 3, T, Q > const &hsvValue)
Converts a color from HSV color space to its color in RGB color space.
+
GLM_FUNC_DECL vec< 3, T, Q > hsvColor(vec< 3, T, Q > const &rgbValue)
Converts a color from RGB color space to its color in HSV color space.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00014.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00014.html new file mode 100644 index 0000000000000000000000000000000000000000..5e838b7a09eaed5e326e3824e2c8ebf193b2ee1f --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00014.html @@ -0,0 +1,131 @@ + + + + + + +0.9.9 API documentation: color_space_YCoCg.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
color_space_YCoCg.hpp File Reference
+
+
+ +

GLM_GTX_color_space_YCoCg +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > rgb2YCoCg (vec< 3, T, Q > const &rgbColor)
 Convert a color from RGB color space to YCoCg color space. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > rgb2YCoCgR (vec< 3, T, Q > const &rgbColor)
 Convert a color from RGB color space to YCoCgR color space. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > YCoCg2rgb (vec< 3, T, Q > const &YCoCgColor)
 Convert a color from YCoCg color space to RGB color space. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > YCoCgR2rgb (vec< 3, T, Q > const &YCoCgColor)
 Convert a color from YCoCgR color space to RGB color space. More...
 
+

Detailed Description

+

GLM_GTX_color_space_YCoCg

+
See also
Core features (dependence)
+ +

Definition in file color_space_YCoCg.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00014_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00014_source.html new file mode 100644 index 0000000000000000000000000000000000000000..903a7d9b707e7d77f6b456b9fd3110ed63f8d94f --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00014_source.html @@ -0,0 +1,141 @@ + + + + + + +0.9.9 API documentation: color_space_YCoCg.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
color_space_YCoCg.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_color_space_YCoCg is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_color_space_YCoCg extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename T, qualifier Q>
+
34  GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCg(
+
35  vec<3, T, Q> const& rgbColor);
+
36 
+
39  template<typename T, qualifier Q>
+
40  GLM_FUNC_DECL vec<3, T, Q> YCoCg2rgb(
+
41  vec<3, T, Q> const& YCoCgColor);
+
42 
+
46  template<typename T, qualifier Q>
+
47  GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCgR(
+
48  vec<3, T, Q> const& rgbColor);
+
49 
+
53  template<typename T, qualifier Q>
+
54  GLM_FUNC_DECL vec<3, T, Q> YCoCgR2rgb(
+
55  vec<3, T, Q> const& YCoCgColor);
+
56 
+
58 }//namespace glm
+
59 
+
60 #include "color_space_YCoCg.inl"
+
GLM_FUNC_DECL vec< 3, T, Q > YCoCgR2rgb(vec< 3, T, Q > const &YCoCgColor)
Convert a color from YCoCgR color space to RGB color space.
+
GLM_FUNC_DECL vec< 3, T, Q > YCoCg2rgb(vec< 3, T, Q > const &YCoCgColor)
Convert a color from YCoCg color space to RGB color space.
+
GLM_FUNC_DECL vec< 3, T, Q > rgbColor(vec< 3, T, Q > const &hsvValue)
Converts a color from HSV color space to its color in RGB color space.
+
GLM_FUNC_DECL vec< 3, T, Q > rgb2YCoCg(vec< 3, T, Q > const &rgbColor)
Convert a color from RGB color space to YCoCg color space.
+
GLM_FUNC_DECL vec< 3, T, Q > rgb2YCoCgR(vec< 3, T, Q > const &rgbColor)
Convert a color from RGB color space to YCoCgR color space.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00015.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00015.html new file mode 100644 index 0000000000000000000000000000000000000000..0f9e2256d5a8e1e636279d89cc972c2ce73e1098 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00015.html @@ -0,0 +1,267 @@ + + + + + + +0.9.9 API documentation: common.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
common.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType abs (genType x)
 Returns x if x >= 0; otherwise, it returns -x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > abs (vec< L, T, Q > const &x)
 Returns x if x >= 0; otherwise, it returns -x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > ceil (vec< L, T, Q > const &x)
 Returns a value equal to the nearest integer that is greater than or equal to x. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType clamp (genType x, genType minVal, genType maxVal)
 Returns min(max(x, minVal), maxVal) for each component in x using the floating-point values minVal and maxVal. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > clamp (vec< L, T, Q > const &x, T minVal, T maxVal)
 Returns min(max(x, minVal), maxVal) for each component in x using the floating-point values minVal and maxVal. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > clamp (vec< L, T, Q > const &x, vec< L, T, Q > const &minVal, vec< L, T, Q > const &maxVal)
 Returns min(max(x, minVal), maxVal) for each component in x using the floating-point values minVal and maxVal. More...
 
GLM_FUNC_DECL int floatBitsToInt (float const &v)
 Returns a signed integer value representing the encoding of a floating-point value. More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL vec< L, int, Q > floatBitsToInt (vec< L, float, Q > const &v)
 Returns a signed integer value representing the encoding of a floating-point value. More...
 
GLM_FUNC_DECL uint floatBitsToUint (float const &v)
 Returns a unsigned integer value representing the encoding of a floating-point value. More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL vec< L, uint, Q > floatBitsToUint (vec< L, float, Q > const &v)
 Returns a unsigned integer value representing the encoding of a floating-point value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > floor (vec< L, T, Q > const &x)
 Returns a value equal to the nearest integer that is less then or equal to x. More...
 
template<typename genType >
GLM_FUNC_DECL genType fma (genType const &a, genType const &b, genType const &c)
 Computes and returns a * b + c. More...
 
template<typename genType >
GLM_FUNC_DECL genType fract (genType x)
 Return x - floor(x). More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fract (vec< L, T, Q > const &x)
 Return x - floor(x). More...
 
template<typename genType >
GLM_FUNC_DECL genType frexp (genType x, int &exp)
 Splits x into a floating-point significand in the range [0.5, 1.0) and an integral exponent of two, such that: x = significand * exp(2, exponent) More...
 
GLM_FUNC_DECL float intBitsToFloat (int const &v)
 Returns a floating-point value corresponding to a signed integer encoding of a floating-point value. More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL vec< L, float, Q > intBitsToFloat (vec< L, int, Q > const &v)
 Returns a floating-point value corresponding to a signed integer encoding of a floating-point value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, bool, Q > isinf (vec< L, T, Q > const &x)
 Returns true if x holds a positive infinity or negative infinity representation in the underlying implementation's set of floating point representations. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, bool, Q > isnan (vec< L, T, Q > const &x)
 Returns true if x holds a NaN (not a number) representation in the underlying implementation's set of floating point representations. More...
 
template<typename genType >
GLM_FUNC_DECL genType ldexp (genType const &x, int const &exp)
 Builds a floating-point number from x and the corresponding integral exponent of two in exp, returning: significand * exp(2, exponent) More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType max (genType x, genType y)
 Returns y if x < y; otherwise, it returns x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > max (vec< L, T, Q > const &x, T y)
 Returns y if x < y; otherwise, it returns x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > max (vec< L, T, Q > const &x, vec< L, T, Q > const &y)
 Returns y if x < y; otherwise, it returns x. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType min (genType x, genType y)
 Returns y if y < x; otherwise, it returns x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > min (vec< L, T, Q > const &x, T y)
 Returns y if y < x; otherwise, it returns x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > min (vec< L, T, Q > const &x, vec< L, T, Q > const &y)
 Returns y if y < x; otherwise, it returns x. More...
 
template<typename genTypeT , typename genTypeU >
GLM_FUNC_DECL genTypeT mix (genTypeT x, genTypeT y, genTypeU a)
 If genTypeU is a floating scalar or vector: Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > mod (vec< L, T, Q > const &x, vec< L, T, Q > const &y)
 Modulus. More...
 
template<typename genType >
GLM_FUNC_DECL genType modf (genType x, genType &i)
 Returns the fractional part of x and sets i to the integer part (as a whole number floating point value). More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > round (vec< L, T, Q > const &x)
 Returns a value equal to the nearest integer to x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > roundEven (vec< L, T, Q > const &x)
 Returns a value equal to the nearest integer to x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > sign (vec< L, T, Q > const &x)
 Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0. More...
 
template<typename genType >
GLM_FUNC_DECL genType smoothstep (genType edge0, genType edge1, genType x)
 Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and performs smooth Hermite interpolation between 0 and 1 when edge0 < x < edge1. More...
 
template<typename genType >
GLM_FUNC_DECL genType step (genType edge, genType x)
 Returns 0.0 if x < edge, otherwise it returns 1.0 for each component of a genType. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > step (T edge, vec< L, T, Q > const &x)
 Returns 0.0 if x < edge, otherwise it returns 1.0. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > step (vec< L, T, Q > const &edge, vec< L, T, Q > const &x)
 Returns 0.0 if x < edge, otherwise it returns 1.0. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > trunc (vec< L, T, Q > const &x)
 Returns a value equal to the nearest integer to x whose absolute value is not larger than the absolute value of x. More...
 
GLM_FUNC_DECL float uintBitsToFloat (uint const &v)
 Returns a floating-point value corresponding to a unsigned integer encoding of a floating-point value. More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL vec< L, float, Q > uintBitsToFloat (vec< L, uint, Q > const &v)
 Returns a floating-point value corresponding to a unsigned integer encoding of a floating-point value. More...
 
+

Detailed Description

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00015_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00015_source.html new file mode 100644 index 0000000000000000000000000000000000000000..6bc9d1035797ebfc57a53bc8b5f7d3af9fd1f5c4 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00015_source.html @@ -0,0 +1,276 @@ + + + + + + +0.9.9 API documentation: common.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
common.hpp
+
+
+Go to the documentation of this file.
1 
+
15 #pragma once
+
16 
+
17 #include "detail/qualifier.hpp"
+
18 #include "detail/_fixes.hpp"
+
19 
+
20 namespace glm
+
21 {
+
24 
+
31  template<typename genType>
+
32  GLM_FUNC_DECL GLM_CONSTEXPR genType abs(genType x);
+
33 
+
42  template<length_t L, typename T, qualifier Q>
+
43  GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> abs(vec<L, T, Q> const& x);
+
44 
+
53  template<length_t L, typename T, qualifier Q>
+
54  GLM_FUNC_DECL vec<L, T, Q> sign(vec<L, T, Q> const& x);
+
55 
+
64  template<length_t L, typename T, qualifier Q>
+
65  GLM_FUNC_DECL vec<L, T, Q> floor(vec<L, T, Q> const& x);
+
66 
+
76  template<length_t L, typename T, qualifier Q>
+
77  GLM_FUNC_DECL vec<L, T, Q> trunc(vec<L, T, Q> const& x);
+
78 
+
91  template<length_t L, typename T, qualifier Q>
+
92  GLM_FUNC_DECL vec<L, T, Q> round(vec<L, T, Q> const& x);
+
93 
+
105  template<length_t L, typename T, qualifier Q>
+
106  GLM_FUNC_DECL vec<L, T, Q> roundEven(vec<L, T, Q> const& x);
+
107 
+
117  template<length_t L, typename T, qualifier Q>
+
118  GLM_FUNC_DECL vec<L, T, Q> ceil(vec<L, T, Q> const& x);
+
119 
+
126  template<typename genType>
+
127  GLM_FUNC_DECL genType fract(genType x);
+
128 
+
137  template<length_t L, typename T, qualifier Q>
+
138  GLM_FUNC_DECL vec<L, T, Q> fract(vec<L, T, Q> const& x);
+
139 
+
140  template<typename genType>
+
141  GLM_FUNC_DECL genType mod(genType x, genType y);
+
142 
+
143  template<length_t L, typename T, qualifier Q>
+
144  GLM_FUNC_DECL vec<L, T, Q> mod(vec<L, T, Q> const& x, T y);
+
145 
+
155  template<length_t L, typename T, qualifier Q>
+
156  GLM_FUNC_DECL vec<L, T, Q> mod(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
157 
+
167  template<typename genType>
+
168  GLM_FUNC_DECL genType modf(genType x, genType& i);
+
169 
+
176  template<typename genType>
+
177  GLM_FUNC_DECL GLM_CONSTEXPR genType min(genType x, genType y);
+
178 
+
187  template<length_t L, typename T, qualifier Q>
+
188  GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& x, T y);
+
189 
+
198  template<length_t L, typename T, qualifier Q>
+
199  GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> min(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
200 
+
207  template<typename genType>
+
208  GLM_FUNC_DECL GLM_CONSTEXPR genType max(genType x, genType y);
+
209 
+
218  template<length_t L, typename T, qualifier Q>
+
219  GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& x, T y);
+
220 
+
229  template<length_t L, typename T, qualifier Q>
+
230  GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> max(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
231 
+
239  template<typename genType>
+
240  GLM_FUNC_DECL GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal);
+
241 
+
251  template<length_t L, typename T, qualifier Q>
+
252  GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> clamp(vec<L, T, Q> const& x, T minVal, T maxVal);
+
253 
+
263  template<length_t L, typename T, qualifier Q>
+
264  GLM_FUNC_DECL GLM_CONSTEXPR vec<L, T, Q> clamp(vec<L, T, Q> const& x, vec<L, T, Q> const& minVal, vec<L, T, Q> const& maxVal);
+
265 
+
308  template<typename genTypeT, typename genTypeU>
+
309  GLM_FUNC_DECL genTypeT mix(genTypeT x, genTypeT y, genTypeU a);
+
310 
+
311  template<length_t L, typename T, typename U, qualifier Q>
+
312  GLM_FUNC_DECL vec<L, T, Q> mix(vec<L, T, Q> const& x, vec<L, T, Q> const& y, vec<L, U, Q> const& a);
+
313 
+
314  template<length_t L, typename T, typename U, qualifier Q>
+
315  GLM_FUNC_DECL vec<L, T, Q> mix(vec<L, T, Q> const& x, vec<L, T, Q> const& y, U a);
+
316 
+
321  template<typename genType>
+
322  GLM_FUNC_DECL genType step(genType edge, genType x);
+
323 
+
332  template<length_t L, typename T, qualifier Q>
+
333  GLM_FUNC_DECL vec<L, T, Q> step(T edge, vec<L, T, Q> const& x);
+
334 
+
343  template<length_t L, typename T, qualifier Q>
+
344  GLM_FUNC_DECL vec<L, T, Q> step(vec<L, T, Q> const& edge, vec<L, T, Q> const& x);
+
345 
+
360  template<typename genType>
+
361  GLM_FUNC_DECL genType smoothstep(genType edge0, genType edge1, genType x);
+
362 
+
363  template<length_t L, typename T, qualifier Q>
+
364  GLM_FUNC_DECL vec<L, T, Q> smoothstep(T edge0, T edge1, vec<L, T, Q> const& x);
+
365 
+
366  template<length_t L, typename T, qualifier Q>
+
367  GLM_FUNC_DECL vec<L, T, Q> smoothstep(vec<L, T, Q> const& edge0, vec<L, T, Q> const& edge1, vec<L, T, Q> const& x);
+
368 
+
383  template<length_t L, typename T, qualifier Q>
+
384  GLM_FUNC_DECL vec<L, bool, Q> isnan(vec<L, T, Q> const& x);
+
385 
+
398  template<length_t L, typename T, qualifier Q>
+
399  GLM_FUNC_DECL vec<L, bool, Q> isinf(vec<L, T, Q> const& x);
+
400 
+
407  GLM_FUNC_DECL int floatBitsToInt(float const& v);
+
408 
+
418  template<length_t L, qualifier Q>
+
419  GLM_FUNC_DECL vec<L, int, Q> floatBitsToInt(vec<L, float, Q> const& v);
+
420 
+
427  GLM_FUNC_DECL uint floatBitsToUint(float const& v);
+
428 
+
438  template<length_t L, qualifier Q>
+
439  GLM_FUNC_DECL vec<L, uint, Q> floatBitsToUint(vec<L, float, Q> const& v);
+
440 
+
449  GLM_FUNC_DECL float intBitsToFloat(int const& v);
+
450 
+
462  template<length_t L, qualifier Q>
+
463  GLM_FUNC_DECL vec<L, float, Q> intBitsToFloat(vec<L, int, Q> const& v);
+
464 
+
473  GLM_FUNC_DECL float uintBitsToFloat(uint const& v);
+
474 
+
486  template<length_t L, qualifier Q>
+
487  GLM_FUNC_DECL vec<L, float, Q> uintBitsToFloat(vec<L, uint, Q> const& v);
+
488 
+
495  template<typename genType>
+
496  GLM_FUNC_DECL genType fma(genType const& a, genType const& b, genType const& c);
+
497 
+
512  template<typename genType>
+
513  GLM_FUNC_DECL genType frexp(genType x, int& exp);
+
514 
+
515  template<length_t L, typename T, qualifier Q>
+
516  GLM_FUNC_DECL vec<L, T, Q> frexp(vec<L, T, Q> const& v, vec<L, int, Q>& exp);
+
517 
+
529  template<typename genType>
+
530  GLM_FUNC_DECL genType ldexp(genType const& x, int const& exp);
+
531 
+
532  template<length_t L, typename T, qualifier Q>
+
533  GLM_FUNC_DECL vec<L, T, Q> ldexp(vec<L, T, Q> const& v, vec<L, int, Q> const& exp);
+
534 
+
536 }//namespace glm
+
537 
+
538 #include "detail/func_common.inl"
+
539 
+
GLM_FUNC_DECL vec< L, T, Q > floor(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer that is less then or equal to x.
+
GLM_FUNC_DECL genType fma(genType const &a, genType const &b, genType const &c)
Computes and returns a * b + c.
+
GLM_FUNC_DECL vec< L, T, Q > trunc(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer to x whose absolute value is not larger than the absolut...
+
GLM_FUNC_DECL vec< L, T, Q > mod(vec< L, T, Q > const &x, vec< L, T, Q > const &y)
Modulus.
+
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > clamp(vec< L, T, Q > const &x, vec< L, T, Q > const &minVal, vec< L, T, Q > const &maxVal)
Returns min(max(x, minVal), maxVal) for each component in x using the floating-point values minVal an...
+
GLM_FUNC_DECL vec< L, T, Q > round(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer to x.
+
GLM_FUNC_DECL vec< L, float, Q > uintBitsToFloat(vec< L, uint, Q > const &v)
Returns a floating-point value corresponding to a unsigned integer encoding of a floating-point value...
+
GLM_FUNC_DECL vec< L, T, Q > sign(vec< L, T, Q > const &x)
Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0.
+
GLM_FUNC_DECL vec< L, bool, Q > isinf(vec< L, T, Q > const &x)
Returns true if x holds a positive infinity or negative infinity representation in the underlying imp...
+
GLM_FUNC_DECL vec< L, T, Q > roundEven(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer to x.
+
GLM_FUNC_DECL genType modf(genType x, genType &i)
Returns the fractional part of x and sets i to the integer part (as a whole number floating point val...
+
GLM_FUNC_DECL vec< L, T, Q > ceil(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer that is greater than or equal to x.
+
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > min(vec< L, T, Q > const &x, vec< L, T, Q > const &y)
Returns y if y < x; otherwise, it returns x.
+
GLM_FUNC_DECL vec< L, float, Q > intBitsToFloat(vec< L, int, Q > const &v)
Returns a floating-point value corresponding to a signed integer encoding of a floating-point value...
+
GLM_FUNC_DECL vec< L, bool, Q > isnan(vec< L, T, Q > const &x)
Returns true if x holds a NaN (not a number) representation in the underlying implementation's set of...
+
GLM_FUNC_DECL vec< L, T, Q > exp(vec< L, T, Q > const &v)
Returns the natural exponentiation of x, i.e., e^x.
+
GLM_FUNC_DECL vec< L, uint, Q > floatBitsToUint(vec< L, float, Q > const &v)
Returns a unsigned integer value representing the encoding of a floating-point value.
+
GLM_FUNC_DECL genType smoothstep(genType edge0, genType edge1, genType x)
Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and performs smooth Hermite interpolation between 0 a...
+
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > abs(vec< L, T, Q > const &x)
Returns x if x >= 0; otherwise, it returns -x.
+
GLM_FUNC_DECL GLM_CONSTEXPR vec< L, T, Q > max(vec< L, T, Q > const &x, vec< L, T, Q > const &y)
Returns y if x < y; otherwise, it returns x.
+
GLM_FUNC_DECL vec< L, T, Q > step(vec< L, T, Q > const &edge, vec< L, T, Q > const &x)
Returns 0.0 if x < edge, otherwise it returns 1.0.
+
GLM_FUNC_DECL vec< L, T, Q > fract(vec< L, T, Q > const &x)
Return x - floor(x).
+
GLM_FUNC_DECL genType ldexp(genType const &x, int const &exp)
Builds a floating-point number from x and the corresponding integral exponent of two in exp...
+
GLM_FUNC_DECL vec< L, int, Q > floatBitsToInt(vec< L, float, Q > const &v)
Returns a signed integer value representing the encoding of a floating-point value.
+
GLM_FUNC_DECL genTypeT mix(genTypeT x, genTypeT y, genTypeU a)
If genTypeU is a floating scalar or vector: Returns x * (1.0 - a) + y * a, i.e., the linear blend of ...
+
GLM_FUNC_DECL genType frexp(genType x, int &exp)
Splits x into a floating-point significand in the range [0.5, 1.0) and an integral exponent of two...
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00016.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00016.html new file mode 100644 index 0000000000000000000000000000000000000000..82bb37508646dfcda23fe391bc3f2e311f2b4a8e --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00016.html @@ -0,0 +1,131 @@ + + + + + + +0.9.9 API documentation: common.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
gtx/common.hpp File Reference
+
+
+ +

GLM_GTX_common +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + +

+Functions

template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, bool, Q > closeBounded (vec< L, T, Q > const &Value, vec< L, T, Q > const &Min, vec< L, T, Q > const &Max)
 Returns whether vector components values are within an interval. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fmod (vec< L, T, Q > const &v)
 Similar to 'mod' but with a different rounding and integer support. More...
 
template<typename genType >
GLM_FUNC_DECL genType::bool_type isdenormal (genType const &x)
 Returns true if x is a denormalized number Numbers whose absolute value is too small to be represented in the normal format are represented in an alternate, denormalized format. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, bool, Q > openBounded (vec< L, T, Q > const &Value, vec< L, T, Q > const &Min, vec< L, T, Q > const &Max)
 Returns whether vector components values are within an interval. More...
 
+

Detailed Description

+

GLM_GTX_common

+
See also
Core features (dependence)
+ +

Definition in file gtx/common.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00016_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00016_source.html new file mode 100644 index 0000000000000000000000000000000000000000..0833436df8a1f2e160ededec3c67233f09eea1b4 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00016_source.html @@ -0,0 +1,139 @@ + + + + + + +0.9.9 API documentation: common.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
gtx/common.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependencies:
+
16 #include "../vec2.hpp"
+
17 #include "../vec3.hpp"
+
18 #include "../vec4.hpp"
+
19 #include "../gtc/vec1.hpp"
+
20 
+
21 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
22 # ifndef GLM_ENABLE_EXPERIMENTAL
+
23 # pragma message("GLM: GLM_GTX_common is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
24 # else
+
25 # pragma message("GLM: GLM_GTX_common extension included")
+
26 # endif
+
27 #endif
+
28 
+
29 namespace glm
+
30 {
+
33 
+
42  template<typename genType>
+
43  GLM_FUNC_DECL typename genType::bool_type isdenormal(genType const& x);
+
44 
+
50  template<length_t L, typename T, qualifier Q>
+
51  GLM_FUNC_DECL vec<L, T, Q> fmod(vec<L, T, Q> const& v);
+
52 
+
60  template <length_t L, typename T, qualifier Q>
+
61  GLM_FUNC_DECL vec<L, bool, Q> openBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
+
62 
+
70  template <length_t L, typename T, qualifier Q>
+
71  GLM_FUNC_DECL vec<L, bool, Q> closeBounded(vec<L, T, Q> const& Value, vec<L, T, Q> const& Min, vec<L, T, Q> const& Max);
+
72 
+
74 }//namespace glm
+
75 
+
76 #include "common.inl"
+
GLM_FUNC_DECL vec< L, T, Q > fmod(vec< L, T, Q > const &v)
Similar to 'mod' but with a different rounding and integer support.
+
GLM_FUNC_DECL vec< L, bool, Q > openBounded(vec< L, T, Q > const &Value, vec< L, T, Q > const &Min, vec< L, T, Q > const &Max)
Returns whether vector components values are within an interval.
+
GLM_FUNC_DECL genType::bool_type isdenormal(genType const &x)
Returns true if x is a denormalized number Numbers whose absolute value is too small to be represente...
+
Definition: common.hpp:20
+
GLM_FUNC_DECL vec< L, bool, Q > closeBounded(vec< L, T, Q > const &Value, vec< L, T, Q > const &Min, vec< L, T, Q > const &Max)
Returns whether vector components values are within an interval.
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00017.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00017.html new file mode 100644 index 0000000000000000000000000000000000000000..f5eda22a3411c5ed20cada98ed9f9104f981582e --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00017.html @@ -0,0 +1,443 @@ + + + + + + +0.9.9 API documentation: compatibility.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
compatibility.hpp File Reference
+
+
+ +

GLM_GTX_compatibility +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Typedefs

+typedef bool bool1
 boolean type with 1 component. (From GLM_GTX_compatibility extension)
 
+typedef bool bool1x1
 boolean matrix with 1 x 1 component. (From GLM_GTX_compatibility extension)
 
+typedef vec< 2, bool, highp > bool2
 boolean type with 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 2, bool, highp > bool2x2
 boolean matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 3, bool, highp > bool2x3
 boolean matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 4, bool, highp > bool2x4
 boolean matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 3, bool, highp > bool3
 boolean type with 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 2, bool, highp > bool3x2
 boolean matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 3, bool, highp > bool3x3
 boolean matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 4, bool, highp > bool3x4
 boolean matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 4, bool, highp > bool4
 boolean type with 4 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 2, bool, highp > bool4x2
 boolean matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 3, bool, highp > bool4x3
 boolean matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 4, bool, highp > bool4x4
 boolean matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef double double1
 double-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension)
 
+typedef double double1x1
 double-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension)
 
+typedef vec< 2, double, highp > double2
 double-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 2, double, highp > double2x2
 double-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 3, double, highp > double2x3
 double-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 4, double, highp > double2x4
 double-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 3, double, highp > double3
 double-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 2, double, highp > double3x2
 double-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 3, double, highp > double3x3
 double-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 4, double, highp > double3x4
 double-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 4, double, highp > double4
 double-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 2, double, highp > double4x2
 double-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 3, double, highp > double4x3
 double-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 4, double, highp > double4x4
 double-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef float float1
 single-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension)
 
+typedef float float1x1
 single-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension)
 
+typedef vec< 2, float, highp > float2
 single-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 2, float, highp > float2x2
 single-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 3, float, highp > float2x3
 single-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 4, float, highp > float2x4
 single-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 3, float, highp > float3
 single-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 2, float, highp > float3x2
 single-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 3, float, highp > float3x3
 single-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 4, float, highp > float3x4
 single-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 4, float, highp > float4
 single-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 2, float, highp > float4x2
 single-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 3, float, highp > float4x3
 single-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 4, float, highp > float4x4
 single-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef int int1
 integer vector with 1 component. (From GLM_GTX_compatibility extension)
 
+typedef int int1x1
 integer matrix with 1 component. (From GLM_GTX_compatibility extension)
 
+typedef vec< 2, int, highp > int2
 integer vector with 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 2, int, highp > int2x2
 integer matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 3, int, highp > int2x3
 integer matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 2, 4, int, highp > int2x4
 integer matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 3, int, highp > int3
 integer vector with 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 2, int, highp > int3x2
 integer matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 3, int, highp > int3x3
 integer matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 3, 4, int, highp > int3x4
 integer matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
 
+typedef vec< 4, int, highp > int4
 integer vector with 4 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 2, int, highp > int4x2
 integer matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 3, int, highp > int4x3
 integer matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
 
+typedef mat< 4, 4, int, highp > int4x4
 integer matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
 
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER T atan2 (T x, T y)
 Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 2, T, Q > atan2 (const vec< 2, T, Q > &x, const vec< 2, T, Q > &y)
 Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 3, T, Q > atan2 (const vec< 3, T, Q > &x, const vec< 3, T, Q > &y)
 Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 4, T, Q > atan2 (const vec< 4, T, Q > &x, const vec< 4, T, Q > &y)
 Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility)
 
+template<typename genType >
GLM_FUNC_DECL bool isfinite (genType const &x)
 Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 1, bool, Q > isfinite (const vec< 1, T, Q > &x)
 Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 2, bool, Q > isfinite (const vec< 2, T, Q > &x)
 Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, bool, Q > isfinite (const vec< 3, T, Q > &x)
 Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 4, bool, Q > isfinite (const vec< 4, T, Q > &x)
 Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)
 
+template<typename T >
GLM_FUNC_QUALIFIER T lerp (T x, T y, T a)
 Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 2, T, Q > lerp (const vec< 2, T, Q > &x, const vec< 2, T, Q > &y, T a)
 Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 3, T, Q > lerp (const vec< 3, T, Q > &x, const vec< 3, T, Q > &y, T a)
 Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 4, T, Q > lerp (const vec< 4, T, Q > &x, const vec< 4, T, Q > &y, T a)
 Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 2, T, Q > lerp (const vec< 2, T, Q > &x, const vec< 2, T, Q > &y, const vec< 2, T, Q > &a)
 Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 3, T, Q > lerp (const vec< 3, T, Q > &x, const vec< 3, T, Q > &y, const vec< 3, T, Q > &a)
 Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 4, T, Q > lerp (const vec< 4, T, Q > &x, const vec< 4, T, Q > &y, const vec< 4, T, Q > &a)
 Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER T saturate (T x)
 Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 2, T, Q > saturate (const vec< 2, T, Q > &x)
 Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 3, T, Q > saturate (const vec< 3, T, Q > &x)
 Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
 
+template<typename T , qualifier Q>
GLM_FUNC_QUALIFIER vec< 4, T, Q > saturate (const vec< 4, T, Q > &x)
 Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
 
+

Detailed Description

+

GLM_GTX_compatibility

+
See also
Core features (dependence)
+ +

Definition in file compatibility.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00017_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00017_source.html new file mode 100644 index 0000000000000000000000000000000000000000..206d3954f9949fdd18833b6a929bbdbf9f28703f --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00017_source.html @@ -0,0 +1,282 @@ + + + + + + +0.9.9 API documentation: compatibility.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
compatibility.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 #include "../gtc/quaternion.hpp"
+
18 
+
19 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
20 # ifndef GLM_ENABLE_EXPERIMENTAL
+
21 # pragma message("GLM: GLM_GTX_compatibility is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
22 # else
+
23 # pragma message("GLM: GLM_GTX_compatibility extension included")
+
24 # endif
+
25 #endif
+
26 
+
27 #if GLM_COMPILER & GLM_COMPILER_VC
+
28 # include <cfloat>
+
29 #elif GLM_COMPILER & GLM_COMPILER_GCC
+
30 # include <cmath>
+
31 # if(GLM_PLATFORM & GLM_PLATFORM_ANDROID)
+
32 # undef isfinite
+
33 # endif
+
34 #endif//GLM_COMPILER
+
35 
+
36 namespace glm
+
37 {
+
40 
+
41  template<typename T> GLM_FUNC_QUALIFIER T lerp(T x, T y, T a){return mix(x, y, a);}
+
42  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, T a){return mix(x, y, a);}
+
43 
+
44  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, T a){return mix(x, y, a);}
+
45  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, T a){return mix(x, y, a);}
+
46  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, const vec<2, T, Q>& a){return mix(x, y, a);}
+
47  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, const vec<3, T, Q>& a){return mix(x, y, a);}
+
48  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, const vec<4, T, Q>& a){return mix(x, y, a);}
+
49 
+
50  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER T saturate(T x){return clamp(x, T(0), T(1));}
+
51  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> saturate(const vec<2, T, Q>& x){return clamp(x, T(0), T(1));}
+
52  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> saturate(const vec<3, T, Q>& x){return clamp(x, T(0), T(1));}
+
53  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> saturate(const vec<4, T, Q>& x){return clamp(x, T(0), T(1));}
+
54 
+
55  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER T atan2(T x, T y){return atan(x, y);}
+
56  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<2, T, Q> atan2(const vec<2, T, Q>& x, const vec<2, T, Q>& y){return atan(x, y);}
+
57  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<3, T, Q> atan2(const vec<3, T, Q>& x, const vec<3, T, Q>& y){return atan(x, y);}
+
58  template<typename T, qualifier Q> GLM_FUNC_QUALIFIER vec<4, T, Q> atan2(const vec<4, T, Q>& x, const vec<4, T, Q>& y){return atan(x, y);}
+
59 
+
60  template<typename genType> GLM_FUNC_DECL bool isfinite(genType const& x);
+
61  template<typename T, qualifier Q> GLM_FUNC_DECL vec<1, bool, Q> isfinite(const vec<1, T, Q>& x);
+
62  template<typename T, qualifier Q> GLM_FUNC_DECL vec<2, bool, Q> isfinite(const vec<2, T, Q>& x);
+
63  template<typename T, qualifier Q> GLM_FUNC_DECL vec<3, bool, Q> isfinite(const vec<3, T, Q>& x);
+
64  template<typename T, qualifier Q> GLM_FUNC_DECL vec<4, bool, Q> isfinite(const vec<4, T, Q>& x);
+
65 
+
66  typedef bool bool1;
+
67  typedef vec<2, bool, highp> bool2;
+
68  typedef vec<3, bool, highp> bool3;
+
69  typedef vec<4, bool, highp> bool4;
+
70 
+
71  typedef bool bool1x1;
+
72  typedef mat<2, 2, bool, highp> bool2x2;
+
73  typedef mat<2, 3, bool, highp> bool2x3;
+
74  typedef mat<2, 4, bool, highp> bool2x4;
+
75  typedef mat<3, 2, bool, highp> bool3x2;
+
76  typedef mat<3, 3, bool, highp> bool3x3;
+
77  typedef mat<3, 4, bool, highp> bool3x4;
+
78  typedef mat<4, 2, bool, highp> bool4x2;
+
79  typedef mat<4, 3, bool, highp> bool4x3;
+
80  typedef mat<4, 4, bool, highp> bool4x4;
+
81 
+
82  typedef int int1;
+
83  typedef vec<2, int, highp> int2;
+
84  typedef vec<3, int, highp> int3;
+
85  typedef vec<4, int, highp> int4;
+
86 
+
87  typedef int int1x1;
+
88  typedef mat<2, 2, int, highp> int2x2;
+
89  typedef mat<2, 3, int, highp> int2x3;
+
90  typedef mat<2, 4, int, highp> int2x4;
+
91  typedef mat<3, 2, int, highp> int3x2;
+
92  typedef mat<3, 3, int, highp> int3x3;
+
93  typedef mat<3, 4, int, highp> int3x4;
+
94  typedef mat<4, 2, int, highp> int4x2;
+
95  typedef mat<4, 3, int, highp> int4x3;
+
96  typedef mat<4, 4, int, highp> int4x4;
+
97 
+
98  typedef float float1;
+
99  typedef vec<2, float, highp> float2;
+
100  typedef vec<3, float, highp> float3;
+
101  typedef vec<4, float, highp> float4;
+
102 
+
103  typedef float float1x1;
+
104  typedef mat<2, 2, float, highp> float2x2;
+
105  typedef mat<2, 3, float, highp> float2x3;
+
106  typedef mat<2, 4, float, highp> float2x4;
+
107  typedef mat<3, 2, float, highp> float3x2;
+
108  typedef mat<3, 3, float, highp> float3x3;
+
109  typedef mat<3, 4, float, highp> float3x4;
+
110  typedef mat<4, 2, float, highp> float4x2;
+
111  typedef mat<4, 3, float, highp> float4x3;
+
112  typedef mat<4, 4, float, highp> float4x4;
+
113 
+
114  typedef double double1;
+
115  typedef vec<2, double, highp> double2;
+
116  typedef vec<3, double, highp> double3;
+
117  typedef vec<4, double, highp> double4;
+
118 
+
119  typedef double double1x1;
+
120  typedef mat<2, 2, double, highp> double2x2;
+
121  typedef mat<2, 3, double, highp> double2x3;
+
122  typedef mat<2, 4, double, highp> double2x4;
+
123  typedef mat<3, 2, double, highp> double3x2;
+
124  typedef mat<3, 3, double, highp> double3x3;
+
125  typedef mat<3, 4, double, highp> double3x4;
+
126  typedef mat<4, 2, double, highp> double4x2;
+
127  typedef mat<4, 3, double, highp> double4x3;
+
128  typedef mat<4, 4, double, highp> double4x4;
+
129 
+
131 }//namespace glm
+
132 
+
133 #include "compatibility.inl"
+
mat< 4, 4, double, highp > double4x4
double-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) ...
+
mat< 3, 4, int, highp > int3x4
integer matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
+
GLM_FUNC_DECL vec< L, T, Q > atan(vec< L, T, Q > const &y, vec< L, T, Q > const &x)
Arc tangent.
+
bool bool1
boolean type with 1 component. (From GLM_GTX_compatibility extension)
+
mat< 4, 3, float, highp > float4x3
single-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) ...
+
mat< 4, 4, float, highp > float4x4
single-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) ...
+
mat< 2, 4, double, highp > double2x4
double-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) ...
+
mat< 2, 2, double, highp > double2x2
double-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) ...
+
mat< 3, 2, double, highp > double3x2
double-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) ...
+
GLM_FUNC_QUALIFIER vec< 4, T, Q > atan2(const vec< 4, T, Q > &x, const vec< 4, T, Q > &y)
Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what q...
+
double double1x1
double-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) ...
+
GLM_FUNC_QUALIFIER vec< 4, T, Q > lerp(const vec< 4, T, Q > &x, const vec< 4, T, Q > &y, const vec< 4, T, Q > &a)
Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using v...
+
mat< 3, 3, double, highp > double3x3
double-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) ...
+
vec< 4, float, highp > float4
single-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) ...
+
int int1x1
integer matrix with 1 component. (From GLM_GTX_compatibility extension)
+
vec< 2, float, highp > float2
single-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) ...
+
GLM_FUNC_DECL vec< 4, bool, Q > isfinite(const vec< 4, T, Q > &x)
Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility)...
+
mat< 2, 3, bool, highp > bool2x3
boolean matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
+
mat< 2, 3, int, highp > int2x3
integer matrix with 2 x 3 components. (From GLM_GTX_compatibility extension)
+
int int1
integer vector with 1 component. (From GLM_GTX_compatibility extension)
+
vec< 3, float, highp > float3
single-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) ...
+
mat< 2, 4, float, highp > float2x4
single-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) ...
+
mat< 2, 2, bool, highp > bool2x2
boolean matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
+
mat< 4, 4, bool, highp > bool4x4
boolean matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
+
float float1
single-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) ...
+
float float1x1
single-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) ...
+
mat< 4, 2, double, highp > double4x2
double-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) ...
+
mat< 4, 3, int, highp > int4x3
integer matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
+
mat< 4, 2, bool, highp > bool4x2
boolean matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
+
mat< 2, 2, float, highp > float2x2
single-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) ...
+
vec< 3, int, highp > int3
integer vector with 3 components. (From GLM_GTX_compatibility extension)
+
mat< 4, 2, float, highp > float4x2
single-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) ...
+
mat< 2, 3, double, highp > double2x3
double-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) ...
+
mat< 2, 3, float, highp > float2x3
single-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) ...
+
mat< 3, 2, int, highp > int3x2
integer matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
+
vec< 4, bool, highp > bool4
boolean type with 4 components. (From GLM_GTX_compatibility extension)
+
mat< 4, 2, int, highp > int4x2
integer matrix with 4 x 2 components. (From GLM_GTX_compatibility extension)
+
bool bool1x1
boolean matrix with 1 x 1 component. (From GLM_GTX_compatibility extension)
+
GLM_FUNC_QUALIFIER vec< 4, T, Q > saturate(const vec< 4, T, Q > &x)
Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility)
+
vec< 3, bool, highp > bool3
boolean type with 3 components. (From GLM_GTX_compatibility extension)
+
GLM_FUNC_DECL GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal)
Returns min(max(x, minVal), maxVal) for each component in x using the floating-point values minVal an...
+
mat< 2, 2, int, highp > int2x2
integer matrix with 2 x 2 components. (From GLM_GTX_compatibility extension)
+
vec< 2, int, highp > int2
integer vector with 2 components. (From GLM_GTX_compatibility extension)
+
mat< 4, 4, int, highp > int4x4
integer matrix with 4 x 4 components. (From GLM_GTX_compatibility extension)
+
mat< 3, 2, bool, highp > bool3x2
boolean matrix with 3 x 2 components. (From GLM_GTX_compatibility extension)
+
mat< 4, 3, double, highp > double4x3
double-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) ...
+
mat< 4, 3, bool, highp > bool4x3
boolean matrix with 4 x 3 components. (From GLM_GTX_compatibility extension)
+
double double1
double-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) ...
+
vec< 3, double, highp > double3
double-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) ...
+
vec< 4, double, highp > double4
double-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) ...
+
mat< 3, 3, int, highp > int3x3
integer matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
+
mat< 3, 3, bool, highp > bool3x3
boolean matrix with 3 x 3 components. (From GLM_GTX_compatibility extension)
+
mat< 3, 2, float, highp > float3x2
single-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) ...
+
vec< 4, int, highp > int4
integer vector with 4 components. (From GLM_GTX_compatibility extension)
+
vec< 2, double, highp > double2
double-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) ...
+
mat< 3, 3, float, highp > float3x3
single-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) ...
+
GLM_FUNC_DECL genTypeT mix(genTypeT x, genTypeT y, genTypeU a)
If genTypeU is a floating scalar or vector: Returns x * (1.0 - a) + y * a, i.e., the linear blend of ...
+
vec< 2, bool, highp > bool2
boolean type with 2 components. (From GLM_GTX_compatibility extension)
+
mat< 3, 4, bool, highp > bool3x4
boolean matrix with 3 x 4 components. (From GLM_GTX_compatibility extension)
+
mat< 2, 4, int, highp > int2x4
integer matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
+
mat< 2, 4, bool, highp > bool2x4
boolean matrix with 2 x 4 components. (From GLM_GTX_compatibility extension)
+
mat< 3, 4, double, highp > double3x4
double-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) ...
+
Definition: common.hpp:20
+
mat< 3, 4, float, highp > float3x4
single-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) ...
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00018.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00018.html new file mode 100644 index 0000000000000000000000000000000000000000..5d5fd80405c2821a9ffd30bac56b9bfa9d43fade --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00018.html @@ -0,0 +1,141 @@ + + + + + + +0.9.9 API documentation: component_wise.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
component_wise.hpp File Reference
+
+
+ +

GLM_GTX_component_wise +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL genType::value_type compAdd (genType const &v)
 Add all vector components together. More...
 
template<typename genType >
GLM_FUNC_DECL genType::value_type compMax (genType const &v)
 Find the maximum value between single vector components. More...
 
template<typename genType >
GLM_FUNC_DECL genType::value_type compMin (genType const &v)
 Find the minimum value between single vector components. More...
 
template<typename genType >
GLM_FUNC_DECL genType::value_type compMul (genType const &v)
 Multiply all vector components together. More...
 
template<typename floatType , length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, floatType, Q > compNormalize (vec< L, T, Q > const &v)
 Convert an integer vector to a normalized float vector. More...
 
template<length_t L, typename T , typename floatType , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > compScale (vec< L, floatType, Q > const &v)
 Convert a normalized float vector to an integer vector. More...
 
+

Detailed Description

+

GLM_GTX_component_wise

+
Date
2007-05-21 / 2011-06-07
+
Author
Christophe Riccio
+
See also
Core features (dependence)
+ +

Definition in file component_wise.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00018_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00018_source.html new file mode 100644 index 0000000000000000000000000000000000000000..81414087d564a2b219ccd627a0e44a2b3a150f6e --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00018_source.html @@ -0,0 +1,145 @@ + + + + + + +0.9.9 API documentation: component_wise.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
component_wise.hpp
+
+
+Go to the documentation of this file.
1 
+
15 #pragma once
+
16 
+
17 // Dependencies
+
18 #include "../detail/setup.hpp"
+
19 #include "../detail/qualifier.hpp"
+
20 
+
21 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
22 # ifndef GLM_ENABLE_EXPERIMENTAL
+
23 # pragma message("GLM: GLM_GTX_component_wise is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
24 # else
+
25 # pragma message("GLM: GLM_GTX_component_wise extension included")
+
26 # endif
+
27 #endif
+
28 
+
29 namespace glm
+
30 {
+
33 
+
37  template<typename floatType, length_t L, typename T, qualifier Q>
+
38  GLM_FUNC_DECL vec<L, floatType, Q> compNormalize(vec<L, T, Q> const& v);
+
39 
+
43  template<length_t L, typename T, typename floatType, qualifier Q>
+
44  GLM_FUNC_DECL vec<L, T, Q> compScale(vec<L, floatType, Q> const& v);
+
45 
+
48  template<typename genType>
+
49  GLM_FUNC_DECL typename genType::value_type compAdd(genType const& v);
+
50 
+
53  template<typename genType>
+
54  GLM_FUNC_DECL typename genType::value_type compMul(genType const& v);
+
55 
+
58  template<typename genType>
+
59  GLM_FUNC_DECL typename genType::value_type compMin(genType const& v);
+
60 
+
63  template<typename genType>
+
64  GLM_FUNC_DECL typename genType::value_type compMax(genType const& v);
+
65 
+
67 }//namespace glm
+
68 
+
69 #include "component_wise.inl"
+
GLM_FUNC_DECL genType::value_type compMax(genType const &v)
Find the maximum value between single vector components.
+
GLM_FUNC_DECL genType::value_type compMul(genType const &v)
Multiply all vector components together.
+
GLM_FUNC_DECL vec< L, T, Q > compScale(vec< L, floatType, Q > const &v)
Convert a normalized float vector to an integer vector.
+
GLM_FUNC_DECL vec< L, floatType, Q > compNormalize(vec< L, T, Q > const &v)
Convert an integer vector to a normalized float vector.
+
GLM_FUNC_DECL genType::value_type compMin(genType const &v)
Find the minimum value between single vector components.
+
GLM_FUNC_DECL genType::value_type compAdd(genType const &v)
Add all vector components together.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00019_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00019_source.html new file mode 100644 index 0000000000000000000000000000000000000000..d2fd66f722e3726d067af8fe407daa7d0bfeb668 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00019_source.html @@ -0,0 +1,150 @@ + + + + + + +0.9.9 API documentation: compute_common.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
compute_common.hpp
+
+
+
1 #pragma once
+
2 
+
3 #include "setup.hpp"
+
4 #include <limits>
+
5 
+
6 namespace glm{
+
7 namespace detail
+
8 {
+
9  template<typename genFIType, bool /*signed*/>
+
10  struct compute_abs
+
11  {};
+
12 
+
13  template<typename genFIType>
+
14  struct compute_abs<genFIType, true>
+
15  {
+
16  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x)
+
17  {
+
18  GLM_STATIC_ASSERT(
+
19  std::numeric_limits<genFIType>::is_iec559 || std::numeric_limits<genFIType>::is_signed,
+
20  "'abs' only accept floating-point and integer scalar or vector inputs");
+
21 
+
22  return x >= genFIType(0) ? x : -x;
+
23  // TODO, perf comp with: *(((int *) &x) + 1) &= 0x7fffffff;
+
24  }
+
25  };
+
26 
+
27 #if GLM_COMPILER & GLM_COMPILER_CUDA
+
28  template<>
+
29  struct compute_abs<float, true>
+
30  {
+
31  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static float call(float x)
+
32  {
+
33  return fabsf(x);
+
34  }
+
35  };
+
36 #endif
+
37 
+
38  template<typename genFIType>
+
39  struct compute_abs<genFIType, false>
+
40  {
+
41  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x)
+
42  {
+
43  GLM_STATIC_ASSERT(
+
44  (!std::numeric_limits<genFIType>::is_signed && std::numeric_limits<genFIType>::is_integer),
+
45  "'abs' only accept floating-point and integer scalar or vector inputs");
+
46  return x;
+
47  }
+
48  };
+
49 }//namespace detail
+
50 }//namespace glm
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00020_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00020_source.html new file mode 100644 index 0000000000000000000000000000000000000000..049fde6eae2850dd8cc8638daa45dfdbc75c54fb --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00020_source.html @@ -0,0 +1,130 @@ + + + + + + +0.9.9 API documentation: compute_vector_relational.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
compute_vector_relational.hpp
+
+
+
1 #pragma once
+
2 
+
3 //#include "compute_common.hpp"
+
4 #include "setup.hpp"
+
5 #include <limits>
+
6 
+
7 namespace glm{
+
8 namespace detail
+
9 {
+
10  template <typename T, bool isFloat>
+
11  struct compute_equal
+
12  {
+
13  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b)
+
14  {
+
15  return a == b;
+
16  }
+
17  };
+
18 /*
+
19  template <typename T>
+
20  struct compute_equal<T, true>
+
21  {
+
22  GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b)
+
23  {
+
24  return detail::compute_abs<T, std::numeric_limits<T>::is_signed>::call(b - a) <= static_cast<T>(0);
+
25  //return std::memcmp(&a, &b, sizeof(T)) == 0;
+
26  }
+
27  };
+
28 */
+
29 }//namespace detail
+
30 }//namespace glm
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00021.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00021.html new file mode 100644 index 0000000000000000000000000000000000000000..0203aaad51abc80e657a6dfdfaac4aadb040e757 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00021.html @@ -0,0 +1,223 @@ + + + + + + +0.9.9 API documentation: constants.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
constants.hpp File Reference
+
+
+ +

GLM_GTC_constants +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType e ()
 Return e constant. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType euler ()
 Return Euler's constant. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi ()
 Return 4 / pi. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio ()
 Return the golden ratio constant. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi ()
 Return pi / 2. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two ()
 Return ln(ln(2)). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten ()
 Return ln(10). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two ()
 Return ln(2). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType one ()
 Return 1. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi ()
 Return 1 / pi. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two ()
 Return 1 / sqrt(2). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi ()
 Return 1 / (pi * 2). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi ()
 Return pi / 4. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType root_five ()
 Return sqrt(5). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi ()
 Return sqrt(pi / 2). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four ()
 Return sqrt(ln(4)). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi ()
 Return square root of pi. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType root_three ()
 Return sqrt(3). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType root_two ()
 Return sqrt(2). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi ()
 Return sqrt(2 * pi). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType third ()
 Return 1 / 3. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi ()
 Return pi / 2 * 3. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi ()
 Return 2 / pi. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi ()
 Return 2 / sqrt(pi). More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi ()
 Return pi * 2. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds ()
 Return 2 / 3. More...
 
template<typename genType >
GLM_FUNC_DECL GLM_CONSTEXPR genType zero ()
 Return 0. More...
 
+

Detailed Description

+

GLM_GTC_constants

+
See also
Core features (dependence)
+ +

Definition in file constants.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00021_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00021_source.html new file mode 100644 index 0000000000000000000000000000000000000000..67c7767d74c304ea63a31f9ea1dbabdf45f89b77 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00021_source.html @@ -0,0 +1,224 @@ + + + + + + +0.9.9 API documentation: constants.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
constants.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependencies
+
16 #include "../ext/scalar_constants.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # pragma message("GLM: GLM_GTC_constants extension included")
+
20 #endif
+
21 
+
22 namespace glm
+
23 {
+
26 
+
29  template<typename genType>
+
30  GLM_FUNC_DECL GLM_CONSTEXPR genType zero();
+
31 
+
34  template<typename genType>
+
35  GLM_FUNC_DECL GLM_CONSTEXPR genType one();
+
36 
+
39  template<typename genType>
+
40  GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi();
+
41 
+
44  template<typename genType>
+
45  GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi();
+
46 
+
49  template<typename genType>
+
50  GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi();
+
51 
+
54  template<typename genType>
+
55  GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi();
+
56 
+
59  template<typename genType>
+
60  GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi();
+
61 
+
64  template<typename genType>
+
65  GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi();
+
66 
+
69  template<typename genType>
+
70  GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi();
+
71 
+
74  template<typename genType>
+
75  GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi();
+
76 
+
79  template<typename genType>
+
80  GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi();
+
81 
+
84  template<typename genType>
+
85  GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi();
+
86 
+
89  template<typename genType>
+
90  GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two();
+
91 
+
94  template<typename genType>
+
95  GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi();
+
96 
+
99  template<typename genType>
+
100  GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi();
+
101 
+
104  template<typename genType>
+
105  GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four();
+
106 
+
109  template<typename genType>
+
110  GLM_FUNC_DECL GLM_CONSTEXPR genType e();
+
111 
+
114  template<typename genType>
+
115  GLM_FUNC_DECL GLM_CONSTEXPR genType euler();
+
116 
+
119  template<typename genType>
+
120  GLM_FUNC_DECL GLM_CONSTEXPR genType root_two();
+
121 
+
124  template<typename genType>
+
125  GLM_FUNC_DECL GLM_CONSTEXPR genType root_three();
+
126 
+
129  template<typename genType>
+
130  GLM_FUNC_DECL GLM_CONSTEXPR genType root_five();
+
131 
+
134  template<typename genType>
+
135  GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two();
+
136 
+
139  template<typename genType>
+
140  GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten();
+
141 
+
144  template<typename genType>
+
145  GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two();
+
146 
+
149  template<typename genType>
+
150  GLM_FUNC_DECL GLM_CONSTEXPR genType third();
+
151 
+
154  template<typename genType>
+
155  GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds();
+
156 
+
159  template<typename genType>
+
160  GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio();
+
161 
+
163 } //namespace glm
+
164 
+
165 #include "constants.inl"
+
GLM_FUNC_DECL GLM_CONSTEXPR genType third()
Return 1 / 3.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType root_two()
Return sqrt(2).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two()
Return 1 / sqrt(2).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType euler()
Return Euler's constant.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds()
Return 2 / 3.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi()
Return pi * 2.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio()
Return the golden ratio constant.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi()
Return pi / 4.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType one()
Return 1.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType root_five()
Return sqrt(5).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi()
Return pi / 2 * 3.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType zero()
Return 0.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten()
Return ln(10).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType root_three()
Return sqrt(3).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi()
Return square root of pi.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType e()
Return e constant.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi()
Return 1 / pi.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi()
Return 2 / pi.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi()
Return 4 / pi.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi()
Return sqrt(2 * pi).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two()
Return ln(2).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four()
Return sqrt(ln(4)).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi()
Return 2 / sqrt(pi).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two()
Return ln(ln(2)).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi()
Return sqrt(pi / 2).
+
GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi()
Return pi / 2.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi()
Return 1 / (pi * 2).
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00022.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00022.html new file mode 100644 index 0000000000000000000000000000000000000000..450161283be50f67638a10be708d41d94b8e7cc1 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00022.html @@ -0,0 +1,192 @@ + + + + + + +0.9.9 API documentation: dual_quaternion.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
dual_quaternion.hpp File Reference
+
+
+ +

GLM_GTX_dual_quaternion +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Typedefs

typedef highp_ddualquat ddualquat
 Dual-quaternion of default double-qualifier floating-point numbers. More...
 
typedef highp_fdualquat dualquat
 Dual-quaternion of floating-point numbers. More...
 
typedef highp_fdualquat fdualquat
 Dual-quaternion of single-qualifier floating-point numbers. More...
 
typedef tdualquat< double, highp > highp_ddualquat
 Dual-quaternion of high double-qualifier floating-point numbers. More...
 
typedef tdualquat< float, highp > highp_dualquat
 Dual-quaternion of high single-qualifier floating-point numbers. More...
 
typedef tdualquat< float, highp > highp_fdualquat
 Dual-quaternion of high single-qualifier floating-point numbers. More...
 
typedef tdualquat< double, lowp > lowp_ddualquat
 Dual-quaternion of low double-qualifier floating-point numbers. More...
 
typedef tdualquat< float, lowp > lowp_dualquat
 Dual-quaternion of low single-qualifier floating-point numbers. More...
 
typedef tdualquat< float, lowp > lowp_fdualquat
 Dual-quaternion of low single-qualifier floating-point numbers. More...
 
typedef tdualquat< double, mediump > mediump_ddualquat
 Dual-quaternion of medium double-qualifier floating-point numbers. More...
 
typedef tdualquat< float, mediump > mediump_dualquat
 Dual-quaternion of medium single-qualifier floating-point numbers. More...
 
typedef tdualquat< float, mediump > mediump_fdualquat
 Dual-quaternion of medium single-qualifier floating-point numbers. More...
 
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL tdualquat< T, Q > dual_quat_identity ()
 Creates an identity dual quaternion. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL tdualquat< T, Q > dualquat_cast (mat< 2, 4, T, Q > const &x)
 Converts a 2 * 4 matrix (matrix which holds real and dual parts) to a quaternion. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL tdualquat< T, Q > dualquat_cast (mat< 3, 4, T, Q > const &x)
 Converts a 3 * 4 matrix (augmented matrix rotation + translation) to a quaternion. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL tdualquat< T, Q > inverse (tdualquat< T, Q > const &q)
 Returns the q inverse. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL tdualquat< T, Q > lerp (tdualquat< T, Q > const &x, tdualquat< T, Q > const &y, T const &a)
 Returns the linear interpolation of two dual quaternion. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL mat< 2, 4, T, Q > mat2x4_cast (tdualquat< T, Q > const &x)
 Converts a quaternion to a 2 * 4 matrix. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL mat< 3, 4, T, Q > mat3x4_cast (tdualquat< T, Q > const &x)
 Converts a quaternion to a 3 * 4 matrix. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL tdualquat< T, Q > normalize (tdualquat< T, Q > const &q)
 Returns the normalized quaternion. More...
 
+

Detailed Description

+

GLM_GTX_dual_quaternion

+
Author
Maksim Vorobiev (msome.nosp@m.one@.nosp@m.gmail.nosp@m..com)
+
See also
Core features (dependence)
+
+GLM_GTC_constants (dependence)
+
+GLM_GTC_quaternion (dependence)
+ +

Definition in file dual_quaternion.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00022_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00022_source.html new file mode 100644 index 0000000000000000000000000000000000000000..6be65eec58c4a59b70d7bdb3529ac820050eb2bb --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00022_source.html @@ -0,0 +1,317 @@ + + + + + + +0.9.9 API documentation: dual_quaternion.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
dual_quaternion.hpp
+
+
+Go to the documentation of this file.
1 
+
16 #pragma once
+
17 
+
18 // Dependency:
+
19 #include "../glm.hpp"
+
20 #include "../gtc/constants.hpp"
+
21 #include "../gtc/quaternion.hpp"
+
22 
+
23 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
24 # ifndef GLM_ENABLE_EXPERIMENTAL
+
25 # pragma message("GLM: GLM_GTX_dual_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
26 # else
+
27 # pragma message("GLM: GLM_GTX_dual_quaternion extension included")
+
28 # endif
+
29 #endif
+
30 
+
31 namespace glm
+
32 {
+
35 
+
36  template<typename T, qualifier Q = defaultp>
+
37  struct tdualquat
+
38  {
+
39  // -- Implementation detail --
+
40 
+
41  typedef T value_type;
+
42  typedef qua<T, Q> part_type;
+
43 
+
44  // -- Data --
+
45 
+
46  qua<T, Q> real, dual;
+
47 
+
48  // -- Component accesses --
+
49 
+
50  typedef length_t length_type;
+
52  GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;}
+
53 
+
54  GLM_FUNC_DECL part_type & operator[](length_type i);
+
55  GLM_FUNC_DECL part_type const& operator[](length_type i) const;
+
56 
+
57  // -- Implicit basic constructors --
+
58 
+
59  GLM_FUNC_DECL GLM_CONSTEXPR tdualquat() GLM_DEFAULT;
+
60  GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat<T, Q> const& d) GLM_DEFAULT;
+
61  template<qualifier P>
+
62  GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat<T, P> const& d);
+
63 
+
64  // -- Explicit basic constructors --
+
65 
+
66  GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua<T, Q> const& real);
+
67  GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua<T, Q> const& orientation, vec<3, T, Q> const& translation);
+
68  GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua<T, Q> const& real, qua<T, Q> const& dual);
+
69 
+
70  // -- Conversion constructors --
+
71 
+
72  template<typename U, qualifier P>
+
73  GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT tdualquat(tdualquat<U, P> const& q);
+
74 
+
75  GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<2, 4, T, Q> const& holder_mat);
+
76  GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<3, 4, T, Q> const& aug_mat);
+
77 
+
78  // -- Unary arithmetic operators --
+
79 
+
80  GLM_FUNC_DECL tdualquat<T, Q> & operator=(tdualquat<T, Q> const& m) GLM_DEFAULT;
+
81 
+
82  template<typename U>
+
83  GLM_FUNC_DECL tdualquat<T, Q> & operator=(tdualquat<U, Q> const& m);
+
84  template<typename U>
+
85  GLM_FUNC_DECL tdualquat<T, Q> & operator*=(U s);
+
86  template<typename U>
+
87  GLM_FUNC_DECL tdualquat<T, Q> & operator/=(U s);
+
88  };
+
89 
+
90  // -- Unary bit operators --
+
91 
+
92  template<typename T, qualifier Q>
+
93  GLM_FUNC_DECL tdualquat<T, Q> operator+(tdualquat<T, Q> const& q);
+
94 
+
95  template<typename T, qualifier Q>
+
96  GLM_FUNC_DECL tdualquat<T, Q> operator-(tdualquat<T, Q> const& q);
+
97 
+
98  // -- Binary operators --
+
99 
+
100  template<typename T, qualifier Q>
+
101  GLM_FUNC_DECL tdualquat<T, Q> operator+(tdualquat<T, Q> const& q, tdualquat<T, Q> const& p);
+
102 
+
103  template<typename T, qualifier Q>
+
104  GLM_FUNC_DECL tdualquat<T, Q> operator*(tdualquat<T, Q> const& q, tdualquat<T, Q> const& p);
+
105 
+
106  template<typename T, qualifier Q>
+
107  GLM_FUNC_DECL vec<3, T, Q> operator*(tdualquat<T, Q> const& q, vec<3, T, Q> const& v);
+
108 
+
109  template<typename T, qualifier Q>
+
110  GLM_FUNC_DECL vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat<T, Q> const& q);
+
111 
+
112  template<typename T, qualifier Q>
+
113  GLM_FUNC_DECL vec<4, T, Q> operator*(tdualquat<T, Q> const& q, vec<4, T, Q> const& v);
+
114 
+
115  template<typename T, qualifier Q>
+
116  GLM_FUNC_DECL vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat<T, Q> const& q);
+
117 
+
118  template<typename T, qualifier Q>
+
119  GLM_FUNC_DECL tdualquat<T, Q> operator*(tdualquat<T, Q> const& q, T const& s);
+
120 
+
121  template<typename T, qualifier Q>
+
122  GLM_FUNC_DECL tdualquat<T, Q> operator*(T const& s, tdualquat<T, Q> const& q);
+
123 
+
124  template<typename T, qualifier Q>
+
125  GLM_FUNC_DECL tdualquat<T, Q> operator/(tdualquat<T, Q> const& q, T const& s);
+
126 
+
127  // -- Boolean operators --
+
128 
+
129  template<typename T, qualifier Q>
+
130  GLM_FUNC_DECL bool operator==(tdualquat<T, Q> const& q1, tdualquat<T, Q> const& q2);
+
131 
+
132  template<typename T, qualifier Q>
+
133  GLM_FUNC_DECL bool operator!=(tdualquat<T, Q> const& q1, tdualquat<T, Q> const& q2);
+
134 
+
138  template <typename T, qualifier Q>
+
139  GLM_FUNC_DECL tdualquat<T, Q> dual_quat_identity();
+
140 
+
144  template<typename T, qualifier Q>
+
145  GLM_FUNC_DECL tdualquat<T, Q> normalize(tdualquat<T, Q> const& q);
+
146 
+
150  template<typename T, qualifier Q>
+
151  GLM_FUNC_DECL tdualquat<T, Q> lerp(tdualquat<T, Q> const& x, tdualquat<T, Q> const& y, T const& a);
+
152 
+
156  template<typename T, qualifier Q>
+
157  GLM_FUNC_DECL tdualquat<T, Q> inverse(tdualquat<T, Q> const& q);
+
158 
+
162  template<typename T, qualifier Q>
+
163  GLM_FUNC_DECL mat<2, 4, T, Q> mat2x4_cast(tdualquat<T, Q> const& x);
+
164 
+
168  template<typename T, qualifier Q>
+
169  GLM_FUNC_DECL mat<3, 4, T, Q> mat3x4_cast(tdualquat<T, Q> const& x);
+
170 
+
174  template<typename T, qualifier Q>
+
175  GLM_FUNC_DECL tdualquat<T, Q> dualquat_cast(mat<2, 4, T, Q> const& x);
+
176 
+
180  template<typename T, qualifier Q>
+
181  GLM_FUNC_DECL tdualquat<T, Q> dualquat_cast(mat<3, 4, T, Q> const& x);
+
182 
+
183 
+
187  typedef tdualquat<float, lowp> lowp_dualquat;
+
188 
+
192  typedef tdualquat<float, mediump> mediump_dualquat;
+
193 
+
197  typedef tdualquat<float, highp> highp_dualquat;
+
198 
+
199 
+
203  typedef tdualquat<float, lowp> lowp_fdualquat;
+
204 
+
208  typedef tdualquat<float, mediump> mediump_fdualquat;
+
209 
+
213  typedef tdualquat<float, highp> highp_fdualquat;
+
214 
+
215 
+
219  typedef tdualquat<double, lowp> lowp_ddualquat;
+
220 
+
224  typedef tdualquat<double, mediump> mediump_ddualquat;
+
225 
+
229  typedef tdualquat<double, highp> highp_ddualquat;
+
230 
+
231 
+
232 #if(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT))
+
233  typedef highp_fdualquat dualquat;
+
237 
+
241  typedef highp_fdualquat fdualquat;
+
242 #elif(defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT))
+
243  typedef highp_fdualquat dualquat;
+
244  typedef highp_fdualquat fdualquat;
+
245 #elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT))
+
246  typedef mediump_fdualquat dualquat;
+
247  typedef mediump_fdualquat fdualquat;
+
248 #elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && defined(GLM_PRECISION_LOWP_FLOAT))
+
249  typedef lowp_fdualquat dualquat;
+
250  typedef lowp_fdualquat fdualquat;
+
251 #else
+
252 # error "GLM error: multiple default precision requested for single-precision floating-point types"
+
253 #endif
+
254 
+
255 
+
256 #if(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE))
+
257  typedef highp_ddualquat ddualquat;
+
261 #elif(defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE))
+
262  typedef highp_ddualquat ddualquat;
+
263 #elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE))
+
264  typedef mediump_ddualquat ddualquat;
+
265 #elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && defined(GLM_PRECISION_LOWP_DOUBLE))
+
266  typedef lowp_ddualquat ddualquat;
+
267 #else
+
268 # error "GLM error: Multiple default precision requested for double-precision floating-point types"
+
269 #endif
+
270 
+
272 } //namespace glm
+
273 
+
274 #include "dual_quaternion.inl"
+
highp_ddualquat ddualquat
Dual-quaternion of default double-qualifier floating-point numbers.
+
highp_fdualquat fdualquat
Dual-quaternion of single-qualifier floating-point numbers.
+
GLM_FUNC_DECL mat< 2, 4, T, Q > mat2x4_cast(tdualquat< T, Q > const &x)
Converts a quaternion to a 2 * 4 matrix.
+
tdualquat< double, highp > highp_ddualquat
Dual-quaternion of high double-qualifier floating-point numbers.
+
GLM_FUNC_DECL tdualquat< T, Q > normalize(tdualquat< T, Q > const &q)
Returns the normalized quaternion.
+
GLM_FUNC_DECL tdualquat< T, Q > dual_quat_identity()
Creates an identity dual quaternion.
+
GLM_FUNC_DECL tdualquat< T, Q > inverse(tdualquat< T, Q > const &q)
Returns the q inverse.
+
GLM_FUNC_DECL tdualquat< T, Q > lerp(tdualquat< T, Q > const &x, tdualquat< T, Q > const &y, T const &a)
Returns the linear interpolation of two dual quaternion.
+
tdualquat< float, lowp > lowp_dualquat
Dual-quaternion of low single-qualifier floating-point numbers.
+
tdualquat< float, lowp > lowp_fdualquat
Dual-quaternion of low single-qualifier floating-point numbers.
+
GLM_FUNC_DECL T length(qua< T, Q > const &q)
Returns the norm of a quaternions.
+
tdualquat< double, lowp > lowp_ddualquat
Dual-quaternion of low double-qualifier floating-point numbers.
+
GLM_FUNC_DECL mat< 3, 4, T, Q > mat3x4_cast(tdualquat< T, Q > const &x)
Converts a quaternion to a 3 * 4 matrix.
+
highp_fdualquat dualquat
Dual-quaternion of floating-point numbers.
+
tdualquat< float, highp > highp_fdualquat
Dual-quaternion of high single-qualifier floating-point numbers.
+
GLM_FUNC_DECL mat< 4, 4, T, Q > orientation(vec< 3, T, Q > const &Normal, vec< 3, T, Q > const &Up)
Build a rotation matrix from a normal and a up vector.
+
tdualquat< float, mediump > mediump_dualquat
Dual-quaternion of medium single-qualifier floating-point numbers.
+
tdualquat< float, mediump > mediump_fdualquat
Dual-quaternion of medium single-qualifier floating-point numbers.
+
tdualquat< double, mediump > mediump_ddualquat
Dual-quaternion of medium double-qualifier floating-point numbers.
+
GLM_FUNC_DECL tdualquat< T, Q > dualquat_cast(mat< 3, 4, T, Q > const &x)
Converts a 3 * 4 matrix (augmented matrix rotation + translation) to a quaternion.
+
tdualquat< float, highp > highp_dualquat
Dual-quaternion of high single-qualifier floating-point numbers.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00023.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00023.html new file mode 100644 index 0000000000000000000000000000000000000000..61002b829fa3f7384ec1fdd1a4f577237d8a02c1 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00023.html @@ -0,0 +1,244 @@ + + + + + + +0.9.9 API documentation: easing.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
easing.hpp File Reference
+
+
+ +

GLM_GTX_easing +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL genType backEaseIn (genType const &a)
 
template<typename genType >
GLM_FUNC_DECL genType backEaseIn (genType const &a, genType const &o)
 
template<typename genType >
GLM_FUNC_DECL genType backEaseInOut (genType const &a)
 
template<typename genType >
GLM_FUNC_DECL genType backEaseInOut (genType const &a, genType const &o)
 
template<typename genType >
GLM_FUNC_DECL genType backEaseOut (genType const &a)
 
template<typename genType >
GLM_FUNC_DECL genType backEaseOut (genType const &a, genType const &o)
 
template<typename genType >
GLM_FUNC_DECL genType bounceEaseIn (genType const &a)
 
template<typename genType >
GLM_FUNC_DECL genType bounceEaseInOut (genType const &a)
 
template<typename genType >
GLM_FUNC_DECL genType bounceEaseOut (genType const &a)
 
template<typename genType >
GLM_FUNC_DECL genType circularEaseIn (genType const &a)
 Modelled after shifted quadrant IV of unit circle. More...
 
template<typename genType >
GLM_FUNC_DECL genType circularEaseInOut (genType const &a)
 Modelled after the piecewise circular function y = (1/2)(1 - sqrt(1 - 4x^2)) ; [0, 0.5) y = (1/2)(sqrt(-(2x - 3)*(2x - 1)) + 1) ; [0.5, 1]. More...
 
template<typename genType >
GLM_FUNC_DECL genType circularEaseOut (genType const &a)
 Modelled after shifted quadrant II of unit circle. More...
 
+template<typename genType >
GLM_FUNC_DECL genType cubicEaseIn (genType const &a)
 Modelled after the cubic y = x^3.
 
template<typename genType >
GLM_FUNC_DECL genType cubicEaseInOut (genType const &a)
 Modelled after the piecewise cubic y = (1/2)((2x)^3) ; [0, 0.5) y = (1/2)((2x-2)^3 + 2) ; [0.5, 1]. More...
 
template<typename genType >
GLM_FUNC_DECL genType cubicEaseOut (genType const &a)
 Modelled after the cubic y = (x - 1)^3 + 1. More...
 
template<typename genType >
GLM_FUNC_DECL genType elasticEaseIn (genType const &a)
 Modelled after the damped sine wave y = sin(13pi/2*x)*pow(2, 10 * (x - 1)) More...
 
template<typename genType >
GLM_FUNC_DECL genType elasticEaseInOut (genType const &a)
 Modelled after the piecewise exponentially-damped sine wave: y = (1/2)*sin(13pi/2*(2*x))*pow(2, 10 * ((2*x) - 1)) ; [0,0.5) y = (1/2)*(sin(-13pi/2*((2x-1)+1))*pow(2,-10(2*x-1)) + 2) ; [0.5, 1]. More...
 
template<typename genType >
GLM_FUNC_DECL genType elasticEaseOut (genType const &a)
 Modelled after the damped sine wave y = sin(-13pi/2*(x + 1))*pow(2, -10x) + 1. More...
 
template<typename genType >
GLM_FUNC_DECL genType exponentialEaseIn (genType const &a)
 Modelled after the exponential function y = 2^(10(x - 1)) More...
 
template<typename genType >
GLM_FUNC_DECL genType exponentialEaseInOut (genType const &a)
 Modelled after the piecewise exponential y = (1/2)2^(10(2x - 1)) ; [0,0.5) y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1]. More...
 
template<typename genType >
GLM_FUNC_DECL genType exponentialEaseOut (genType const &a)
 Modelled after the exponential function y = -2^(-10x) + 1. More...
 
template<typename genType >
GLM_FUNC_DECL genType linearInterpolation (genType const &a)
 Modelled after the line y = x. More...
 
template<typename genType >
GLM_FUNC_DECL genType quadraticEaseIn (genType const &a)
 Modelled after the parabola y = x^2. More...
 
template<typename genType >
GLM_FUNC_DECL genType quadraticEaseInOut (genType const &a)
 Modelled after the piecewise quadratic y = (1/2)((2x)^2) ; [0, 0.5) y = -(1/2)((2x-1)*(2x-3) - 1) ; [0.5, 1]. More...
 
template<typename genType >
GLM_FUNC_DECL genType quadraticEaseOut (genType const &a)
 Modelled after the parabola y = -x^2 + 2x. More...
 
template<typename genType >
GLM_FUNC_DECL genType quarticEaseIn (genType const &a)
 Modelled after the quartic x^4. More...
 
template<typename genType >
GLM_FUNC_DECL genType quarticEaseInOut (genType const &a)
 Modelled after the piecewise quartic y = (1/2)((2x)^4) ; [0, 0.5) y = -(1/2)((2x-2)^4 - 2) ; [0.5, 1]. More...
 
template<typename genType >
GLM_FUNC_DECL genType quarticEaseOut (genType const &a)
 Modelled after the quartic y = 1 - (x - 1)^4. More...
 
template<typename genType >
GLM_FUNC_DECL genType quinticEaseIn (genType const &a)
 Modelled after the quintic y = x^5. More...
 
template<typename genType >
GLM_FUNC_DECL genType quinticEaseInOut (genType const &a)
 Modelled after the piecewise quintic y = (1/2)((2x)^5) ; [0, 0.5) y = (1/2)((2x-2)^5 + 2) ; [0.5, 1]. More...
 
template<typename genType >
GLM_FUNC_DECL genType quinticEaseOut (genType const &a)
 Modelled after the quintic y = (x - 1)^5 + 1. More...
 
template<typename genType >
GLM_FUNC_DECL genType sineEaseIn (genType const &a)
 Modelled after quarter-cycle of sine wave. More...
 
template<typename genType >
GLM_FUNC_DECL genType sineEaseInOut (genType const &a)
 Modelled after half sine wave. More...
 
template<typename genType >
GLM_FUNC_DECL genType sineEaseOut (genType const &a)
 Modelled after quarter-cycle of sine wave (different phase) More...
 
+

Detailed Description

+

GLM_GTX_easing

+
Author
Robert Chisholm
+
See also
Core features (dependence)
+ +

Definition in file easing.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00023_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00023_source.html new file mode 100644 index 0000000000000000000000000000000000000000..92e1529b94e62b1a252ab6ab52cd01f9c2184b03 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00023_source.html @@ -0,0 +1,254 @@ + + + + + + +0.9.9 API documentation: easing.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
easing.hpp
+
+
+Go to the documentation of this file.
1 
+
17 #pragma once
+
18 
+
19 // Dependency:
+
20 #include "../glm.hpp"
+
21 #include "../gtc/constants.hpp"
+
22 #include "../detail/qualifier.hpp"
+
23 
+
24 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
25 # ifndef GLM_ENABLE_EXPERIMENTAL
+
26 # pragma message("GLM: GLM_GTX_easing is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
27 # else
+
28 # pragma message("GLM: GLM_GTX_easing extension included")
+
29 # endif
+
30 #endif
+
31 
+
32 namespace glm{
+
35 
+
38  template <typename genType>
+
39  GLM_FUNC_DECL genType linearInterpolation(genType const & a);
+
40 
+
43  template <typename genType>
+
44  GLM_FUNC_DECL genType quadraticEaseIn(genType const & a);
+
45 
+
48  template <typename genType>
+
49  GLM_FUNC_DECL genType quadraticEaseOut(genType const & a);
+
50 
+
55  template <typename genType>
+
56  GLM_FUNC_DECL genType quadraticEaseInOut(genType const & a);
+
57 
+
59  template <typename genType>
+
60  GLM_FUNC_DECL genType cubicEaseIn(genType const & a);
+
61 
+
64  template <typename genType>
+
65  GLM_FUNC_DECL genType cubicEaseOut(genType const & a);
+
66 
+
71  template <typename genType>
+
72  GLM_FUNC_DECL genType cubicEaseInOut(genType const & a);
+
73 
+
76  template <typename genType>
+
77  GLM_FUNC_DECL genType quarticEaseIn(genType const & a);
+
78 
+
81  template <typename genType>
+
82  GLM_FUNC_DECL genType quarticEaseOut(genType const & a);
+
83 
+
88  template <typename genType>
+
89  GLM_FUNC_DECL genType quarticEaseInOut(genType const & a);
+
90 
+
93  template <typename genType>
+
94  GLM_FUNC_DECL genType quinticEaseIn(genType const & a);
+
95 
+
98  template <typename genType>
+
99  GLM_FUNC_DECL genType quinticEaseOut(genType const & a);
+
100 
+
105  template <typename genType>
+
106  GLM_FUNC_DECL genType quinticEaseInOut(genType const & a);
+
107 
+
110  template <typename genType>
+
111  GLM_FUNC_DECL genType sineEaseIn(genType const & a);
+
112 
+
115  template <typename genType>
+
116  GLM_FUNC_DECL genType sineEaseOut(genType const & a);
+
117 
+
120  template <typename genType>
+
121  GLM_FUNC_DECL genType sineEaseInOut(genType const & a);
+
122 
+
125  template <typename genType>
+
126  GLM_FUNC_DECL genType circularEaseIn(genType const & a);
+
127 
+
130  template <typename genType>
+
131  GLM_FUNC_DECL genType circularEaseOut(genType const & a);
+
132 
+
137  template <typename genType>
+
138  GLM_FUNC_DECL genType circularEaseInOut(genType const & a);
+
139 
+
142  template <typename genType>
+
143  GLM_FUNC_DECL genType exponentialEaseIn(genType const & a);
+
144 
+
147  template <typename genType>
+
148  GLM_FUNC_DECL genType exponentialEaseOut(genType const & a);
+
149 
+
154  template <typename genType>
+
155  GLM_FUNC_DECL genType exponentialEaseInOut(genType const & a);
+
156 
+
159  template <typename genType>
+
160  GLM_FUNC_DECL genType elasticEaseIn(genType const & a);
+
161 
+
164  template <typename genType>
+
165  GLM_FUNC_DECL genType elasticEaseOut(genType const & a);
+
166 
+
171  template <typename genType>
+
172  GLM_FUNC_DECL genType elasticEaseInOut(genType const & a);
+
173 
+
175  template <typename genType>
+
176  GLM_FUNC_DECL genType backEaseIn(genType const& a);
+
177 
+
179  template <typename genType>
+
180  GLM_FUNC_DECL genType backEaseOut(genType const& a);
+
181 
+
183  template <typename genType>
+
184  GLM_FUNC_DECL genType backEaseInOut(genType const& a);
+
185 
+
189  template <typename genType>
+
190  GLM_FUNC_DECL genType backEaseIn(genType const& a, genType const& o);
+
191 
+
195  template <typename genType>
+
196  GLM_FUNC_DECL genType backEaseOut(genType const& a, genType const& o);
+
197 
+
201  template <typename genType>
+
202  GLM_FUNC_DECL genType backEaseInOut(genType const& a, genType const& o);
+
203 
+
205  template <typename genType>
+
206  GLM_FUNC_DECL genType bounceEaseIn(genType const& a);
+
207 
+
209  template <typename genType>
+
210  GLM_FUNC_DECL genType bounceEaseOut(genType const& a);
+
211 
+
213  template <typename genType>
+
214  GLM_FUNC_DECL genType bounceEaseInOut(genType const& a);
+
215 
+
217 }//namespace glm
+
218 
+
219 #include "easing.inl"
+
GLM_FUNC_DECL genType bounceEaseIn(genType const &a)
+
GLM_FUNC_DECL genType circularEaseInOut(genType const &a)
Modelled after the piecewise circular function y = (1/2)(1 - sqrt(1 - 4x^2)) ; [0, 0.5) y = (1/2)(sqrt(-(2x - 3)*(2x - 1)) + 1) ; [0.5, 1].
+
GLM_FUNC_DECL genType cubicEaseIn(genType const &a)
Modelled after the cubic y = x^3.
+
GLM_FUNC_DECL genType elasticEaseIn(genType const &a)
Modelled after the damped sine wave y = sin(13pi/2*x)*pow(2, 10 * (x - 1))
+
GLM_FUNC_DECL genType quinticEaseIn(genType const &a)
Modelled after the quintic y = x^5.
+
GLM_FUNC_DECL genType sineEaseInOut(genType const &a)
Modelled after half sine wave.
+
GLM_FUNC_DECL genType circularEaseOut(genType const &a)
Modelled after shifted quadrant II of unit circle.
+
GLM_FUNC_DECL genType elasticEaseOut(genType const &a)
Modelled after the damped sine wave y = sin(-13pi/2*(x + 1))*pow(2, -10x) + 1.
+
GLM_FUNC_DECL genType elasticEaseInOut(genType const &a)
Modelled after the piecewise exponentially-damped sine wave: y = (1/2)*sin(13pi/2*(2*x))*pow(2, 10 * ((2*x) - 1)) ; [0,0.5) y = (1/2)*(sin(-13pi/2*((2x-1)+1))*pow(2,-10(2*x-1)) + 2) ; [0.5, 1].
+
GLM_FUNC_DECL genType sineEaseIn(genType const &a)
Modelled after quarter-cycle of sine wave.
+
GLM_FUNC_DECL genType linearInterpolation(genType const &a)
Modelled after the line y = x.
+
GLM_FUNC_DECL genType quarticEaseIn(genType const &a)
Modelled after the quartic x^4.
+
GLM_FUNC_DECL genType quarticEaseOut(genType const &a)
Modelled after the quartic y = 1 - (x - 1)^4.
+
GLM_FUNC_DECL genType sineEaseOut(genType const &a)
Modelled after quarter-cycle of sine wave (different phase)
+
GLM_FUNC_DECL genType quadraticEaseInOut(genType const &a)
Modelled after the piecewise quadratic y = (1/2)((2x)^2) ; [0, 0.5) y = -(1/2)((2x-1)*(2x-3) - 1) ; [...
+
GLM_FUNC_DECL genType circularEaseIn(genType const &a)
Modelled after shifted quadrant IV of unit circle.
+
GLM_FUNC_DECL genType quadraticEaseOut(genType const &a)
Modelled after the parabola y = -x^2 + 2x.
+
GLM_FUNC_DECL genType exponentialEaseOut(genType const &a)
Modelled after the exponential function y = -2^(-10x) + 1.
+
GLM_FUNC_DECL genType quinticEaseOut(genType const &a)
Modelled after the quintic y = (x - 1)^5 + 1.
+
GLM_FUNC_DECL genType cubicEaseOut(genType const &a)
Modelled after the cubic y = (x - 1)^3 + 1.
+
GLM_FUNC_DECL genType exponentialEaseInOut(genType const &a)
Modelled after the piecewise exponential y = (1/2)2^(10(2x - 1)) ; [0,0.5) y = -(1/2)*2^(-10(2x - 1))...
+
GLM_FUNC_DECL genType bounceEaseOut(genType const &a)
+
GLM_FUNC_DECL genType quinticEaseInOut(genType const &a)
Modelled after the piecewise quintic y = (1/2)((2x)^5) ; [0, 0.5) y = (1/2)((2x-2)^5 + 2) ; [0...
+
GLM_FUNC_DECL genType backEaseIn(genType const &a, genType const &o)
+
GLM_FUNC_DECL genType exponentialEaseIn(genType const &a)
Modelled after the exponential function y = 2^(10(x - 1))
+
GLM_FUNC_DECL genType quadraticEaseIn(genType const &a)
Modelled after the parabola y = x^2.
+
GLM_FUNC_DECL genType quarticEaseInOut(genType const &a)
Modelled after the piecewise quartic y = (1/2)((2x)^4) ; [0, 0.5) y = -(1/2)((2x-2)^4 - 2) ; [0...
+
GLM_FUNC_DECL genType cubicEaseInOut(genType const &a)
Modelled after the piecewise cubic y = (1/2)((2x)^3) ; [0, 0.5) y = (1/2)((2x-2)^3 + 2) ; [0...
+
GLM_FUNC_DECL genType bounceEaseInOut(genType const &a)
+
GLM_FUNC_DECL genType backEaseInOut(genType const &a, genType const &o)
+
GLM_FUNC_DECL genType backEaseOut(genType const &a, genType const &o)
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00024.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00024.html new file mode 100644 index 0000000000000000000000000000000000000000..8a392d24a24890ef75f0329214bf2c6e3a135996 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00024.html @@ -0,0 +1,133 @@ + + + + + + +0.9.9 API documentation: epsilon.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
epsilon.hpp File Reference
+
+
+ +

GLM_GTC_epsilon +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + +

+Functions

template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, bool, Q > epsilonEqual (vec< L, T, Q > const &x, vec< L, T, Q > const &y, T const &epsilon)
 Returns the component-wise comparison of |x - y| < epsilon. More...
 
template<typename genType >
GLM_FUNC_DECL bool epsilonEqual (genType const &x, genType const &y, genType const &epsilon)
 Returns the component-wise comparison of |x - y| < epsilon. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, bool, Q > epsilonNotEqual (vec< L, T, Q > const &x, vec< L, T, Q > const &y, T const &epsilon)
 Returns the component-wise comparison of |x - y| < epsilon. More...
 
template<typename genType >
GLM_FUNC_DECL bool epsilonNotEqual (genType const &x, genType const &y, genType const &epsilon)
 Returns the component-wise comparison of |x - y| >= epsilon. More...
 
+

Detailed Description

+

GLM_GTC_epsilon

+
See also
Core features (dependence)
+
+GLM_GTC_quaternion (dependence)
+ +

Definition in file epsilon.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00024_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00024_source.html new file mode 100644 index 0000000000000000000000000000000000000000..a1da38389e05ae52089a5d5226491650bd1fc6da --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00024_source.html @@ -0,0 +1,132 @@ + + + + + + +0.9.9 API documentation: epsilon.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
epsilon.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependencies
+
17 #include "../detail/setup.hpp"
+
18 #include "../detail/qualifier.hpp"
+
19 
+
20 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
21 # pragma message("GLM: GLM_GTC_epsilon extension included")
+
22 #endif
+
23 
+
24 namespace glm
+
25 {
+
28 
+
33  template<length_t L, typename T, qualifier Q>
+
34  GLM_FUNC_DECL vec<L, bool, Q> epsilonEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T const& epsilon);
+
35 
+
40  template<typename genType>
+
41  GLM_FUNC_DECL bool epsilonEqual(genType const& x, genType const& y, genType const& epsilon);
+
42 
+
47  template<length_t L, typename T, qualifier Q>
+
48  GLM_FUNC_DECL vec<L, bool, Q> epsilonNotEqual(vec<L, T, Q> const& x, vec<L, T, Q> const& y, T const& epsilon);
+
49 
+
54  template<typename genType>
+
55  GLM_FUNC_DECL bool epsilonNotEqual(genType const& x, genType const& y, genType const& epsilon);
+
56 
+
58 }//namespace glm
+
59 
+
60 #include "epsilon.inl"
+
GLM_FUNC_DECL bool epsilonEqual(genType const &x, genType const &y, genType const &epsilon)
Returns the component-wise comparison of |x - y| < epsilon.
+
GLM_FUNC_DECL bool epsilonNotEqual(genType const &x, genType const &y, genType const &epsilon)
Returns the component-wise comparison of |x - y| >= epsilon.
+
GLM_FUNC_DECL GLM_CONSTEXPR genType epsilon()
Return the epsilon constant for floating point types.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00025.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00025.html new file mode 100644 index 0000000000000000000000000000000000000000..2904e622e7427e3db429f2b504c16a2953a1af51 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00025.html @@ -0,0 +1,279 @@ + + + + + + +0.9.9 API documentation: euler_angles.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
euler_angles.hpp File Reference
+
+
+ +

GLM_GTX_euler_angles +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > derivedEulerAngleX (T const &angleX, T const &angularVelocityX)
 Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about X-axis. More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > derivedEulerAngleY (T const &angleY, T const &angularVelocityY)
 Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Y-axis. More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > derivedEulerAngleZ (T const &angleZ, T const &angularVelocityZ)
 Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Z-axis. More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleX (T const &angleX)
 Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle X. More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXY (T const &angleX, T const &angleY)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXYX (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * X). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXYZ (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * Z). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXZ (T const &angleX, T const &angleZ)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXZX (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * X). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXZY (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * Y). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleY (T const &angleY)
 Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Y. More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYX (T const &angleY, T const &angleX)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYXY (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Y). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYXZ (T const &yaw, T const &pitch, T const &roll)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYZ (T const &angleY, T const &angleZ)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYZX (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * X). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYZY (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * Y). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZ (T const &angleZ)
 Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Z. More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZX (T const &angle, T const &angleX)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZXY (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Y). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZXZ (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Z). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZY (T const &angleZ, T const &angleY)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZYX (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * X). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZYZ (T const &t1, T const &t2, T const &t3)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * Z). More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleXYX (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (X * Y * X) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleXYZ (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (X * Y * Z) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleXZX (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (X * Z * X) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleXZY (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (X * Z * Y) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleYXY (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Y * X * Y) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleYXZ (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Y * X * Z) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleYZX (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Y * Z * X) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleYZY (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Y * Z * Y) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleZXY (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Z * X * Y) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleZXZ (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Z * X * Z) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleZYX (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Z * Y * X) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL void extractEulerAngleZYZ (mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
 Extracts the (Z * Y * Z) Euler angles from the rotation matrix M. More...
 
template<typename T >
GLM_FUNC_DECL mat< 2, 2, T, defaultp > orientate2 (T const &angle)
 Creates a 2D 2 * 2 rotation matrix from an euler angle. More...
 
template<typename T >
GLM_FUNC_DECL mat< 3, 3, T, defaultp > orientate3 (T const &angle)
 Creates a 2D 4 * 4 homogeneous rotation matrix from an euler angle. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL mat< 3, 3, T, Q > orientate3 (vec< 3, T, Q > const &angles)
 Creates a 3D 3 * 3 rotation matrix from euler angles (Y * X * Z). More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL mat< 4, 4, T, Q > orientate4 (vec< 3, T, Q > const &angles)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). More...
 
template<typename T >
GLM_FUNC_DECL mat< 4, 4, T, defaultp > yawPitchRoll (T const &yaw, T const &pitch, T const &roll)
 Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). More...
 
+

Detailed Description

+

GLM_GTX_euler_angles

+
See also
Core features (dependence)
+ +

Definition in file euler_angles.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00025_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00025_source.html new file mode 100644 index 0000000000000000000000000000000000000000..5c6402e7e45298b561490c7817bb09d50a395157 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00025_source.html @@ -0,0 +1,380 @@ + + + + + + +0.9.9 API documentation: euler_angles.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
euler_angles.hpp
+
+
+Go to the documentation of this file.
1 
+
16 #pragma once
+
17 
+
18 // Dependency:
+
19 #include "../glm.hpp"
+
20 
+
21 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
22 # ifndef GLM_ENABLE_EXPERIMENTAL
+
23 # pragma message("GLM: GLM_GTX_euler_angles is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
24 # else
+
25 # pragma message("GLM: GLM_GTX_euler_angles extension included")
+
26 # endif
+
27 #endif
+
28 
+
29 namespace glm
+
30 {
+
33 
+
36  template<typename T>
+
37  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleX(
+
38  T const& angleX);
+
39 
+
42  template<typename T>
+
43  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleY(
+
44  T const& angleY);
+
45 
+
48  template<typename T>
+
49  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZ(
+
50  T const& angleZ);
+
51 
+
54  template <typename T>
+
55  GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleX(
+
56  T const & angleX, T const & angularVelocityX);
+
57 
+
60  template <typename T>
+
61  GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleY(
+
62  T const & angleY, T const & angularVelocityY);
+
63 
+
66  template <typename T>
+
67  GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleZ(
+
68  T const & angleZ, T const & angularVelocityZ);
+
69 
+
72  template<typename T>
+
73  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXY(
+
74  T const& angleX,
+
75  T const& angleY);
+
76 
+
79  template<typename T>
+
80  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYX(
+
81  T const& angleY,
+
82  T const& angleX);
+
83 
+
86  template<typename T>
+
87  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZ(
+
88  T const& angleX,
+
89  T const& angleZ);
+
90 
+
93  template<typename T>
+
94  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZX(
+
95  T const& angle,
+
96  T const& angleX);
+
97 
+
100  template<typename T>
+
101  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZ(
+
102  T const& angleY,
+
103  T const& angleZ);
+
104 
+
107  template<typename T>
+
108  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZY(
+
109  T const& angleZ,
+
110  T const& angleY);
+
111 
+
114  template<typename T>
+
115  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYZ(
+
116  T const& t1,
+
117  T const& t2,
+
118  T const& t3);
+
119 
+
122  template<typename T>
+
123  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXZ(
+
124  T const& yaw,
+
125  T const& pitch,
+
126  T const& roll);
+
127 
+
130  template <typename T>
+
131  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZX(
+
132  T const & t1,
+
133  T const & t2,
+
134  T const & t3);
+
135 
+
138  template <typename T>
+
139  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYX(
+
140  T const & t1,
+
141  T const & t2,
+
142  T const & t3);
+
143 
+
146  template <typename T>
+
147  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXY(
+
148  T const & t1,
+
149  T const & t2,
+
150  T const & t3);
+
151 
+
154  template <typename T>
+
155  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZY(
+
156  T const & t1,
+
157  T const & t2,
+
158  T const & t3);
+
159 
+
162  template <typename T>
+
163  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYZ(
+
164  T const & t1,
+
165  T const & t2,
+
166  T const & t3);
+
167 
+
170  template <typename T>
+
171  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXZ(
+
172  T const & t1,
+
173  T const & t2,
+
174  T const & t3);
+
175 
+
178  template <typename T>
+
179  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZY(
+
180  T const & t1,
+
181  T const & t2,
+
182  T const & t3);
+
183 
+
186  template <typename T>
+
187  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZX(
+
188  T const & t1,
+
189  T const & t2,
+
190  T const & t3);
+
191 
+
194  template <typename T>
+
195  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYX(
+
196  T const & t1,
+
197  T const & t2,
+
198  T const & t3);
+
199 
+
202  template <typename T>
+
203  GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXY(
+
204  T const & t1,
+
205  T const & t2,
+
206  T const & t3);
+
207 
+
210  template<typename T>
+
211  GLM_FUNC_DECL mat<4, 4, T, defaultp> yawPitchRoll(
+
212  T const& yaw,
+
213  T const& pitch,
+
214  T const& roll);
+
215 
+
218  template<typename T>
+
219  GLM_FUNC_DECL mat<2, 2, T, defaultp> orientate2(T const& angle);
+
220 
+
223  template<typename T>
+
224  GLM_FUNC_DECL mat<3, 3, T, defaultp> orientate3(T const& angle);
+
225 
+
228  template<typename T, qualifier Q>
+
229  GLM_FUNC_DECL mat<3, 3, T, Q> orientate3(vec<3, T, Q> const& angles);
+
230 
+
233  template<typename T, qualifier Q>
+
234  GLM_FUNC_DECL mat<4, 4, T, Q> orientate4(vec<3, T, Q> const& angles);
+
235 
+
238  template<typename T>
+
239  GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M,
+
240  T & t1,
+
241  T & t2,
+
242  T & t3);
+
243 
+
246  template <typename T>
+
247  GLM_FUNC_DECL void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M,
+
248  T & t1,
+
249  T & t2,
+
250  T & t3);
+
251 
+
254  template <typename T>
+
255  GLM_FUNC_DECL void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M,
+
256  T & t1,
+
257  T & t2,
+
258  T & t3);
+
259 
+
262  template <typename T>
+
263  GLM_FUNC_DECL void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M,
+
264  T & t1,
+
265  T & t2,
+
266  T & t3);
+
267 
+
270  template <typename T>
+
271  GLM_FUNC_DECL void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M,
+
272  T & t1,
+
273  T & t2,
+
274  T & t3);
+
275 
+
278  template <typename T>
+
279  GLM_FUNC_DECL void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M,
+
280  T & t1,
+
281  T & t2,
+
282  T & t3);
+
283 
+
286  template <typename T>
+
287  GLM_FUNC_DECL void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M,
+
288  T & t1,
+
289  T & t2,
+
290  T & t3);
+
291 
+
294  template <typename T>
+
295  GLM_FUNC_DECL void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M,
+
296  T & t1,
+
297  T & t2,
+
298  T & t3);
+
299 
+
302  template <typename T>
+
303  GLM_FUNC_DECL void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M,
+
304  T & t1,
+
305  T & t2,
+
306  T & t3);
+
307 
+
310  template <typename T>
+
311  GLM_FUNC_DECL void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M,
+
312  T & t1,
+
313  T & t2,
+
314  T & t3);
+
315 
+
318  template <typename T>
+
319  GLM_FUNC_DECL void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M,
+
320  T & t1,
+
321  T & t2,
+
322  T & t3);
+
323 
+
326  template <typename T>
+
327  GLM_FUNC_DECL void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M,
+
328  T & t1,
+
329  T & t2,
+
330  T & t3);
+
331 
+
333 }//namespace glm
+
334 
+
335 #include "euler_angles.inl"
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXY(T const &angleX, T const &angleY)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYZY(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * Y).
+
GLM_FUNC_DECL void extractEulerAngleYXZ(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Y * X * Z) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXYZ(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * Z).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXZY(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * Y).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > derivedEulerAngleZ(T const &angleZ, T const &angularVelocityZ)
Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Z-axis.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYX(T const &angleY, T const &angleX)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleY(T const &angleY)
Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Y.
+
GLM_FUNC_DECL T angle(qua< T, Q > const &x)
Returns the quaternion rotation angle.
+
GLM_FUNC_DECL void extractEulerAngleZYZ(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Z * Y * Z) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > derivedEulerAngleX(T const &angleX, T const &angularVelocityX)
Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about X-axis.
+
GLM_FUNC_DECL void extractEulerAngleXYX(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (X * Y * X) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZXY(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Y).
+
GLM_FUNC_DECL T roll(qua< T, Q > const &x)
Returns roll value of euler angles expressed in radians.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleX(T const &angleX)
Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle X.
+
GLM_FUNC_DECL mat< 2, 2, T, defaultp > orientate2(T const &angle)
Creates a 2D 2 * 2 rotation matrix from an euler angle.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXYX(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * X).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYXZ(T const &yaw, T const &pitch, T const &roll)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z).
+
GLM_FUNC_DECL void extractEulerAngleXZX(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (X * Z * X) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL T yaw(qua< T, Q > const &x)
Returns yaw value of euler angles expressed in radians.
+
GLM_FUNC_DECL void extractEulerAngleYXY(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Y * X * Y) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL void extractEulerAngleZXY(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Z * X * Y) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL void extractEulerAngleXZY(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (X * Z * Y) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL void extractEulerAngleYZX(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Y * Z * X) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXZX(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * X).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZYX(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * X).
+
GLM_FUNC_DECL mat< 4, 4, T, Q > orientate4(vec< 3, T, Q > const &angles)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z).
+
GLM_FUNC_DECL void extractEulerAngleZYX(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Z * Y * X) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZ(T const &angleZ)
Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Z.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYXY(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Y).
+
GLM_FUNC_DECL void extractEulerAngleYZY(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Y * Z * Y) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > yawPitchRoll(T const &yaw, T const &pitch, T const &roll)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleXZ(T const &angleX, T const &angleZ)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z).
+
GLM_FUNC_DECL void extractEulerAngleXYZ(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (X * Y * Z) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZXZ(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Z).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYZX(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * X).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZY(T const &angleZ, T const &angleY)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZYZ(T const &t1, T const &t2, T const &t3)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * Z).
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleYZ(T const &angleY, T const &angleZ)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z).
+
GLM_FUNC_DECL mat< 3, 3, T, Q > orientate3(vec< 3, T, Q > const &angles)
Creates a 3D 3 * 3 rotation matrix from euler angles (Y * X * Z).
+
GLM_FUNC_DECL void extractEulerAngleZXZ(mat< 4, 4, T, defaultp > const &M, T &t1, T &t2, T &t3)
Extracts the (Z * X * Z) Euler angles from the rotation matrix M.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > derivedEulerAngleY(T const &angleY, T const &angularVelocityY)
Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Y-axis.
+
GLM_FUNC_DECL T pitch(qua< T, Q > const &x)
Returns pitch value of euler angles expressed in radians.
+
GLM_FUNC_DECL mat< 4, 4, T, defaultp > eulerAngleZX(T const &angle, T const &angleX)
Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X).
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00026.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00026.html new file mode 100644 index 0000000000000000000000000000000000000000..552b6ed247df1b5a700a1a854bec7d7518a76a2c --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00026.html @@ -0,0 +1,143 @@ + + + + + + +0.9.9 API documentation: exponential.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
exponential.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > exp (vec< L, T, Q > const &v)
 Returns the natural exponentiation of x, i.e., e^x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > exp2 (vec< L, T, Q > const &v)
 Returns 2 raised to the v power. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > inversesqrt (vec< L, T, Q > const &v)
 Returns the reciprocal of the positive square root of v. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > log (vec< L, T, Q > const &v)
 Returns the natural logarithm of v, i.e., returns the value y which satisfies the equation x = e^y. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > log2 (vec< L, T, Q > const &v)
 Returns the base 2 log of x, i.e., returns the value y, which satisfies the equation x = 2 ^ y. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > pow (vec< L, T, Q > const &base, vec< L, T, Q > const &exponent)
 Returns 'base' raised to the power 'exponent'. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > sqrt (vec< L, T, Q > const &v)
 Returns the positive square root of v. More...
 
+

Detailed Description

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00026_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00026_source.html new file mode 100644 index 0000000000000000000000000000000000000000..56d929d8d957182431f771bb8c7fe7cf03c5a7d0 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00026_source.html @@ -0,0 +1,147 @@ + + + + + + +0.9.9 API documentation: exponential.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
exponential.hpp
+
+
+Go to the documentation of this file.
1 
+
15 #pragma once
+
16 
+
17 #include "detail/type_vec1.hpp"
+
18 #include "detail/type_vec2.hpp"
+
19 #include "detail/type_vec3.hpp"
+
20 #include "detail/type_vec4.hpp"
+
21 #include <cmath>
+
22 
+
23 namespace glm
+
24 {
+
27 
+
35  template<length_t L, typename T, qualifier Q>
+
36  GLM_FUNC_DECL vec<L, T, Q> pow(vec<L, T, Q> const& base, vec<L, T, Q> const& exponent);
+
37 
+
46  template<length_t L, typename T, qualifier Q>
+
47  GLM_FUNC_DECL vec<L, T, Q> exp(vec<L, T, Q> const& v);
+
48 
+
59  template<length_t L, typename T, qualifier Q>
+
60  GLM_FUNC_DECL vec<L, T, Q> log(vec<L, T, Q> const& v);
+
61 
+
70  template<length_t L, typename T, qualifier Q>
+
71  GLM_FUNC_DECL vec<L, T, Q> exp2(vec<L, T, Q> const& v);
+
72 
+
82  template<length_t L, typename T, qualifier Q>
+
83  GLM_FUNC_DECL vec<L, T, Q> log2(vec<L, T, Q> const& v);
+
84 
+
93  template<length_t L, typename T, qualifier Q>
+
94  GLM_FUNC_DECL vec<L, T, Q> sqrt(vec<L, T, Q> const& v);
+
95 
+
104  template<length_t L, typename T, qualifier Q>
+
105  GLM_FUNC_DECL vec<L, T, Q> inversesqrt(vec<L, T, Q> const& v);
+
106 
+
108 }//namespace glm
+
109 
+
110 #include "detail/func_exponential.inl"
+
Core features
+
GLM_FUNC_DECL vec< L, T, Q > sqrt(vec< L, T, Q > const &v)
Returns the positive square root of v.
+
GLM_FUNC_DECL vec< L, T, Q > exp2(vec< L, T, Q > const &v)
Returns 2 raised to the v power.
+
GLM_FUNC_DECL vec< L, T, Q > inversesqrt(vec< L, T, Q > const &v)
Returns the reciprocal of the positive square root of v.
+
Core features
+
Core features
+
GLM_FUNC_DECL vec< L, T, Q > pow(vec< L, T, Q > const &base, vec< L, T, Q > const &exponent)
Returns 'base' raised to the power 'exponent'.
+
GLM_FUNC_DECL vec< L, T, Q > exp(vec< L, T, Q > const &v)
Returns the natural exponentiation of x, i.e., e^x.
+
GLM_FUNC_DECL vec< L, T, Q > log(vec< L, T, Q > const &v)
Returns the natural logarithm of v, i.e., returns the value y which satisfies the equation x = e^y...
+
Core features
+
GLM_FUNC_DECL vec< L, T, Q > log2(vec< L, T, Q > const &v)
Returns the base 2 log of x, i.e., returns the value y, which satisfies the equation x = 2 ^ y...
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00027.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00027.html new file mode 100644 index 0000000000000000000000000000000000000000..d70d9442df27d598547452fe7036b119fe898f06 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00027.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: ext.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
ext.hpp File Reference
+
+
+ +

Core features (Dependence) +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features (Dependence)

+ +

Definition in file ext.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00027_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00027_source.html new file mode 100644 index 0000000000000000000000000000000000000000..4142831aae1e3698a8b4e546dbd5c096762be488 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00027_source.html @@ -0,0 +1,449 @@ + + + + + + +0.9.9 API documentation: ext.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
ext.hpp
+
+
+Go to the documentation of this file.
1 
+
5 #include "detail/setup.hpp"
+
6 
+
7 #pragma once
+
8 
+
9 #include "glm.hpp"
+
10 
+
11 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_EXT_INCLUDED_DISPLAYED)
+
12 # define GLM_MESSAGE_EXT_INCLUDED_DISPLAYED
+
13 # pragma message("GLM: All extensions included (not recommended)")
+
14 #endif//GLM_MESSAGES
+
15 
+ + + + + + + + + + + + + + + + + + +
34 
+ + + + + + + + + + + + + +
48 #include "./ext/matrix_float4x2_precision.hpp"
+ + + + +
53 
+ +
55 
+ + + + + + +
62 
+ + + +
66 
+
67 #include "./ext/vector_bool1.hpp"
+ +
69 #include "./ext/vector_bool2.hpp"
+ +
71 #include "./ext/vector_bool3.hpp"
+ +
73 #include "./ext/vector_bool4.hpp"
+ +
75 
+
76 #include "./ext/vector_double1.hpp"
+ +
78 #include "./ext/vector_double2.hpp"
+ +
80 #include "./ext/vector_double3.hpp"
+ +
82 #include "./ext/vector_double4.hpp"
+ +
84 
+
85 #include "./ext/vector_float1.hpp"
+ +
87 #include "./ext/vector_float2.hpp"
+ +
89 #include "./ext/vector_float3.hpp"
+ +
91 #include "./ext/vector_float4.hpp"
+ +
93 
+
94 #include "./ext/vector_int1.hpp"
+ +
96 #include "./ext/vector_int2.hpp"
+ +
98 #include "./ext/vector_int3.hpp"
+ +
100 #include "./ext/vector_int4.hpp"
+ +
102 
+ +
104 
+
105 #include "./ext/vector_uint1.hpp"
+ +
107 #include "./ext/vector_uint2.hpp"
+ +
109 #include "./ext/vector_uint3.hpp"
+ +
111 #include "./ext/vector_uint4.hpp"
+ +
113 
+
114 #include "./gtc/bitfield.hpp"
+
115 #include "./gtc/color_space.hpp"
+
116 #include "./gtc/constants.hpp"
+
117 #include "./gtc/epsilon.hpp"
+
118 #include "./gtc/integer.hpp"
+
119 #include "./gtc/matrix_access.hpp"
+
120 #include "./gtc/matrix_integer.hpp"
+
121 #include "./gtc/matrix_inverse.hpp"
+ +
123 #include "./gtc/noise.hpp"
+
124 #include "./gtc/packing.hpp"
+
125 #include "./gtc/quaternion.hpp"
+
126 #include "./gtc/random.hpp"
+
127 #include "./gtc/reciprocal.hpp"
+
128 #include "./gtc/round.hpp"
+
129 #include "./gtc/type_precision.hpp"
+
130 #include "./gtc/type_ptr.hpp"
+
131 #include "./gtc/ulp.hpp"
+
132 #include "./gtc/vec1.hpp"
+
133 #if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE
+
134 # include "./gtc/type_aligned.hpp"
+
135 #endif
+
136 
+
137 #ifdef GLM_ENABLE_EXPERIMENTAL
+ +
139 #include "./gtx/bit.hpp"
+
140 #include "./gtx/closest_point.hpp"
+
141 #include "./gtx/color_encoding.hpp"
+
142 #include "./gtx/color_space.hpp"
+ +
144 #include "./gtx/compatibility.hpp"
+
145 #include "./gtx/component_wise.hpp"
+
146 #include "./gtx/dual_quaternion.hpp"
+
147 #include "./gtx/euler_angles.hpp"
+
148 #include "./gtx/extend.hpp"
+ + + + +
153 #include "./gtx/functions.hpp"
+
154 #include "./gtx/gradient_paint.hpp"
+ +
156 #include "./gtx/integer.hpp"
+
157 #include "./gtx/intersect.hpp"
+
158 #include "./gtx/log_base.hpp"
+ + + + +
163 #include "./gtx/matrix_query.hpp"
+
164 #include "./gtx/mixed_product.hpp"
+
165 #include "./gtx/norm.hpp"
+
166 #include "./gtx/normal.hpp"
+
167 #include "./gtx/normalize_dot.hpp"
+ +
169 #include "./gtx/optimum_pow.hpp"
+
170 #include "./gtx/orthonormalize.hpp"
+
171 #include "./gtx/perpendicular.hpp"
+ +
173 #include "./gtx/projection.hpp"
+
174 #include "./gtx/quaternion.hpp"
+
175 #include "./gtx/raw_data.hpp"
+
176 #include "./gtx/rotate_vector.hpp"
+
177 #include "./gtx/spline.hpp"
+
178 #include "./gtx/std_based_type.hpp"
+
179 #if !(GLM_COMPILER & GLM_COMPILER_CUDA)
+
180 # include "./gtx/string_cast.hpp"
+
181 #endif
+
182 #include "./gtx/transform.hpp"
+
183 #include "./gtx/transform2.hpp"
+
184 #include "./gtx/vec_swizzle.hpp"
+
185 #include "./gtx/vector_angle.hpp"
+
186 #include "./gtx/vector_query.hpp"
+
187 #include "./gtx/wrap.hpp"
+
188 
+
189 #if GLM_HAS_TEMPLATE_ALIASES
+ +
191 #endif
+
192 
+
193 #if GLM_HAS_RANGE_FOR
+
194 # include "./gtx/range.hpp"
+
195 #endif
+
196 #endif//GLM_ENABLE_EXPERIMENTAL
+
GLM_GTC_epsilon
+
GLM_EXT_vector_relational
+
GLM_GTX_dual_quaternion
+
GLM_GTX_polar_coordinates
+
GLM_GTX_closest_point
+
Core features
+ + +
GLM_GTX_handed_coordinate_space
+
Core features
+
GLM_GTX_raw_data
+ +
Core features
+
GLM_GTX_string_cast
+
GLM_EXT_vector_uint1_precision
+
GLM_GTX_intersect
+
GLM_EXT_vector_int1_precision
+
GLM_GTX_normalize_dot
+
GLM_GTX_integer
+
GLM_GTX_rotate_vector
+ +
GLM_GTX_matrix_major_storage
+
Core features
+ +
Core features
+
GLM_GTX_matrix_interpolation
+
GLM_GTX_vector_angle
+
GLM_GTX_transform2
+ + +
GLM_GTX_wrap
+
GLM_GTX_vector_query
+
GLM_GTX_projection
+
GLM_GTC_constants
+ +
GLM_GTX_perpendicular
+
Core features
+
Core features
+
Core features
+ +
Core features
+
GLM_GTX_std_based_type
+
Core features
+
GLM_GTX_component_wise
+
GLM_GTC_ulp
+
GLM_GTC_round
+
Core features
+
GLM_GTX_orthonormalize
+ +
GLM_GTC_integer
+
GLM_EXT_vector_float1
+ +
GLM_GTX_matrix_query
+
GLM_EXT_vector_double1_precision
+ +
GLM_GTX_vec_swizzle
+
Core features
+
GLM_GTC_type_ptr
+
Core features
+
GLM_GTX_gradient_paint
+
GLM_GTC_bitfield
+
GLM_GTX_range
+ +
Core features
+
GLM_GTC_matrix_transform
+
GLM_GTX_matrix_cross_product
+
GLM_EXT_vector_bool1_precision
+
GLM_GTC_type_aligned
+
GLM_EXT_vector_uint1
+
GLM_GTX_quaternion
+
GLM_GTX_color_space_YCoCg
+
GLM_EXT_vector_int1
+
GLM_GTX_normal
+
GLM_GTC_color_space
+
Core features
+
GLM_GTC_noise
+
Core features
+
Core features
+ +
GLM_GTC_matrix_integer
+
GLM_GTC_matrix_access
+
GLM_GTX_extented_min_max
+
GLM_GTC_vec1
+
GLM_GTX_transform
+ +
GLM_EXT_quaternion_double_precision
+
GLM_GTX_log_base
+
GLM_GTX_compatibility
+
GLM_EXT_scalar_int_sized
+ +
GLM_GTX_optimum_pow
+
GLM_GTX_functions
+
GLM_EXT_quaternion_relational
+ +
GLM_GTX_fast_square_root
+
Core features
+
GLM_EXT_quaternion_float_precision
+
Core features
+ +
GLM_EXT_scalar_relational
+ +
Core features
+
GLM_GTC_random
+
GLM_GTX_euler_angles
+
GLM_GTX_spline
+
GLM_GTC_quaternion
+
GLM_GTX_color_space
+ +
GLM_GTX_norm
+
GLM_GTX_color_encoding
+
GLM_GTC_reciprocal
+ +
Core features
+
GLM_GTX_mixed_producte
+
Core features
+
GLM_EXT_vector_double1
+
Core features
+ +
GLM_GTC_type_precision
+
GLM_EXT_scalar_constants
+ +
GLM_GTX_fast_trigonometry
+
GLM_GTX_bit
+ +
GLM_EXT_quaternion_geometric
+
Core features
+
GLM_GTX_fast_exponential
+ +
GLM_EXT_quaternion_float
+ +
GLM_EXT_vector_bool1
+
Core features
+
Core features
+
Core features
+ +
Core features
+
GLM_GTX_extend
+ +
Core features
+
GLM_EXT_quaternion_double
+ +
Core features
+
GLM_GTX_number_precision
+
Core features
+ +
GLM_GTX_matrix_operation
+
Core features
+ +
GLM_GTC_matrix_inverse
+
Core features
+
Experimental extensions
+ +
GLM_GTC_packing
+
Core features
+
GLM_GTX_associated_min_max
+
GLM_EXT_vector_float1_precision
+
GLM_EXT_matrix_relational
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00028.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00028.html new file mode 100644 index 0000000000000000000000000000000000000000..99d4646d54ad11886a8dd648c8c9e80befb43f0b --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00028.html @@ -0,0 +1,119 @@ + + + + + + +0.9.9 API documentation: extend.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
extend.hpp File Reference
+
+
+ +

GLM_GTX_extend +More...

+ +

Go to the source code of this file.

+ + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL genType extend (genType const &Origin, genType const &Source, typename genType::value_type const Length)
 Extends of Length the Origin position using the (Source - Origin) direction. More...
 
+

Detailed Description

+

GLM_GTX_extend

+
See also
Core features (dependence)
+ +

Definition in file extend.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00028_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00028_source.html new file mode 100644 index 0000000000000000000000000000000000000000..71f6bf9a5528029edef45039ec34ce23372d82e6 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00028_source.html @@ -0,0 +1,127 @@ + + + + + + +0.9.9 API documentation: extend.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
extend.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_extend extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename genType>
+
34  GLM_FUNC_DECL genType extend(
+
35  genType const& Origin,
+
36  genType const& Source,
+
37  typename genType::value_type const Length);
+
38 
+
40 }//namespace glm
+
41 
+
42 #include "extend.inl"
+
GLM_FUNC_DECL genType extend(genType const &Origin, genType const &Source, typename genType::value_type const Length)
Extends of Length the Origin position using the (Source - Origin) direction.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00029.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00029.html new file mode 100644 index 0000000000000000000000000000000000000000..892f596aa321f020cedb5d42b432e85060ea4c18 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00029.html @@ -0,0 +1,183 @@ + + + + + + +0.9.9 API documentation: extended_min_max.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
extended_min_max.hpp File Reference
+
+
+ +

GLM_GTX_extented_min_max +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL genType fclamp (genType x, genType minVal, genType maxVal)
 Returns min(max(x, minVal), maxVal) for each component in x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fclamp (vec< L, T, Q > const &x, T minVal, T maxVal)
 Returns min(max(x, minVal), maxVal) for each component in x. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fclamp (vec< L, T, Q > const &x, vec< L, T, Q > const &minVal, vec< L, T, Q > const &maxVal)
 Returns min(max(x, minVal), maxVal) for each component in x. More...
 
template<typename genType >
GLM_FUNC_DECL genType fmax (genType x, genType y)
 Returns y if x < y; otherwise, it returns x. More...
 
template<typename genType >
GLM_FUNC_DECL genType fmin (genType x, genType y)
 Returns y if y < x; otherwise, it returns x. More...
 
template<typename T >
GLM_FUNC_DECL T max (T const &x, T const &y, T const &z)
 Return the maximum component-wise values of 3 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > max (C< T > const &x, typename C< T >::T const &y, typename C< T >::T const &z)
 Return the maximum component-wise values of 3 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > max (C< T > const &x, C< T > const &y, C< T > const &z)
 Return the maximum component-wise values of 3 inputs. More...
 
template<typename T >
GLM_FUNC_DECL T max (T const &x, T const &y, T const &z, T const &w)
 Return the maximum component-wise values of 4 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > max (C< T > const &x, typename C< T >::T const &y, typename C< T >::T const &z, typename C< T >::T const &w)
 Return the maximum component-wise values of 4 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > max (C< T > const &x, C< T > const &y, C< T > const &z, C< T > const &w)
 Return the maximum component-wise values of 4 inputs. More...
 
template<typename T >
GLM_FUNC_DECL T min (T const &x, T const &y, T const &z)
 Return the minimum component-wise values of 3 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > min (C< T > const &x, typename C< T >::T const &y, typename C< T >::T const &z)
 Return the minimum component-wise values of 3 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > min (C< T > const &x, C< T > const &y, C< T > const &z)
 Return the minimum component-wise values of 3 inputs. More...
 
template<typename T >
GLM_FUNC_DECL T min (T const &x, T const &y, T const &z, T const &w)
 Return the minimum component-wise values of 4 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > min (C< T > const &x, typename C< T >::T const &y, typename C< T >::T const &z, typename C< T >::T const &w)
 Return the minimum component-wise values of 4 inputs. More...
 
template<typename T , template< typename > class C>
GLM_FUNC_DECL C< T > min (C< T > const &x, C< T > const &y, C< T > const &z, C< T > const &w)
 Return the minimum component-wise values of 4 inputs. More...
 
+

Detailed Description

+

GLM_GTX_extented_min_max

+
See also
Core features (dependence)
+ +

Definition in file extended_min_max.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00029_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00029_source.html new file mode 100644 index 0000000000000000000000000000000000000000..2cb43baf335cced1c4e64d845e80f444a6ffd12a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00029_source.html @@ -0,0 +1,219 @@ + + + + + + +0.9.9 API documentation: extended_min_max.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
extended_min_max.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_extented_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_extented_min_max extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename T>
+
34  GLM_FUNC_DECL T min(
+
35  T const& x,
+
36  T const& y,
+
37  T const& z);
+
38 
+
41  template<typename T, template<typename> class C>
+
42  GLM_FUNC_DECL C<T> min(
+
43  C<T> const& x,
+
44  typename C<T>::T const& y,
+
45  typename C<T>::T const& z);
+
46 
+
49  template<typename T, template<typename> class C>
+
50  GLM_FUNC_DECL C<T> min(
+
51  C<T> const& x,
+
52  C<T> const& y,
+
53  C<T> const& z);
+
54 
+
57  template<typename T>
+
58  GLM_FUNC_DECL T min(
+
59  T const& x,
+
60  T const& y,
+
61  T const& z,
+
62  T const& w);
+
63 
+
66  template<typename T, template<typename> class C>
+
67  GLM_FUNC_DECL C<T> min(
+
68  C<T> const& x,
+
69  typename C<T>::T const& y,
+
70  typename C<T>::T const& z,
+
71  typename C<T>::T const& w);
+
72 
+
75  template<typename T, template<typename> class C>
+
76  GLM_FUNC_DECL C<T> min(
+
77  C<T> const& x,
+
78  C<T> const& y,
+
79  C<T> const& z,
+
80  C<T> const& w);
+
81 
+
84  template<typename T>
+
85  GLM_FUNC_DECL T max(
+
86  T const& x,
+
87  T const& y,
+
88  T const& z);
+
89 
+
92  template<typename T, template<typename> class C>
+
93  GLM_FUNC_DECL C<T> max(
+
94  C<T> const& x,
+
95  typename C<T>::T const& y,
+
96  typename C<T>::T const& z);
+
97 
+
100  template<typename T, template<typename> class C>
+
101  GLM_FUNC_DECL C<T> max(
+
102  C<T> const& x,
+
103  C<T> const& y,
+
104  C<T> const& z);
+
105 
+
108  template<typename T>
+
109  GLM_FUNC_DECL T max(
+
110  T const& x,
+
111  T const& y,
+
112  T const& z,
+
113  T const& w);
+
114 
+
117  template<typename T, template<typename> class C>
+
118  GLM_FUNC_DECL C<T> max(
+
119  C<T> const& x,
+
120  typename C<T>::T const& y,
+
121  typename C<T>::T const& z,
+
122  typename C<T>::T const& w);
+
123 
+
126  template<typename T, template<typename> class C>
+
127  GLM_FUNC_DECL C<T> max(
+
128  C<T> const& x,
+
129  C<T> const& y,
+
130  C<T> const& z,
+
131  C<T> const& w);
+
132 
+
138  template<typename genType>
+
139  GLM_FUNC_DECL genType fmin(genType x, genType y);
+
140 
+
147  template<typename genType>
+
148  GLM_FUNC_DECL genType fmax(genType x, genType y);
+
149 
+
155  template<typename genType>
+
156  GLM_FUNC_DECL genType fclamp(genType x, genType minVal, genType maxVal);
+
157 
+
165  template<length_t L, typename T, qualifier Q>
+
166  GLM_FUNC_DECL vec<L, T, Q> fclamp(vec<L, T, Q> const& x, T minVal, T maxVal);
+
167 
+
175  template<length_t L, typename T, qualifier Q>
+
176  GLM_FUNC_DECL vec<L, T, Q> fclamp(vec<L, T, Q> const& x, vec<L, T, Q> const& minVal, vec<L, T, Q> const& maxVal);
+
177 
+
178 
+
180 }//namespace glm
+
181 
+
182 #include "extended_min_max.inl"
+
GLM_FUNC_DECL vec< L, T, Q > fclamp(vec< L, T, Q > const &x, vec< L, T, Q > const &minVal, vec< L, T, Q > const &maxVal)
Returns min(max(x, minVal), maxVal) for each component in x.
+
GLM_FUNC_DECL genType fmin(genType x, genType y)
Returns y if y < x; otherwise, it returns x.
+
GLM_FUNC_DECL genType fmax(genType x, genType y)
Returns y if x < y; otherwise, it returns x.
+
GLM_FUNC_DECL C< T > max(C< T > const &x, C< T > const &y, C< T > const &z, C< T > const &w)
Return the maximum component-wise values of 4 inputs.
+
GLM_FUNC_DECL C< T > min(C< T > const &x, C< T > const &y, C< T > const &z, C< T > const &w)
Return the minimum component-wise values of 4 inputs.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00030.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00030.html new file mode 100644 index 0000000000000000000000000000000000000000..5f1b5b83c993064b26b4b06d0ece9b55fc1a51e2 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00030.html @@ -0,0 +1,121 @@ + + + + + + +0.9.9 API documentation: exterior_product.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
exterior_product.hpp File Reference
+
+
+ +

GLM_GTX_exterior_product +More...

+ +

Go to the source code of this file.

+ + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL T cross (vec< 2, T, Q > const &v, vec< 2, T, Q > const &u)
 Returns the cross product of x and y. More...
 
+

Detailed Description

+

GLM_GTX_exterior_product

+
See also
Core features (dependence)
+
+GLM_GTX_exterior_product (dependence)
+ +

Definition in file exterior_product.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00030_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00030_source.html new file mode 100644 index 0000000000000000000000000000000000000000..9bdf622e7b57a79b72ddc1a5396f6496fa87a04a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00030_source.html @@ -0,0 +1,125 @@ + + + + + + +0.9.9 API documentation: exterior_product.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
exterior_product.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependencies
+
17 #include "../detail/setup.hpp"
+
18 #include "../detail/qualifier.hpp"
+
19 
+
20 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
21 # ifndef GLM_ENABLE_EXPERIMENTAL
+
22 # pragma message("GLM: GLM_GTX_exterior_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
23 # else
+
24 # pragma message("GLM: GLM_GTX_exterior_product extension included")
+
25 # endif
+
26 #endif
+
27 
+
28 namespace glm
+
29 {
+
32 
+
39  template<typename T, qualifier Q>
+
40  GLM_FUNC_DECL T cross(vec<2, T, Q> const& v, vec<2, T, Q> const& u);
+
41 
+
43 } //namespace glm
+
44 
+
45 #include "exterior_product.inl"
+
GLM_FUNC_DECL T cross(vec< 2, T, Q > const &v, vec< 2, T, Q > const &u)
Returns the cross product of x and y.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00031.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00031.html new file mode 100644 index 0000000000000000000000000000000000000000..40ce828f460d081bd85a6f58d4a5987de8a5cb3b --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00031.html @@ -0,0 +1,165 @@ + + + + + + +0.9.9 API documentation: fast_exponential.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
fast_exponential.hpp File Reference
+
+
+ +

GLM_GTX_fast_exponential +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T >
GLM_FUNC_DECL T fastExp (T x)
 Faster than the common exp function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastExp (vec< L, T, Q > const &x)
 Faster than the common exp function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastExp2 (T x)
 Faster than the common exp2 function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastExp2 (vec< L, T, Q > const &x)
 Faster than the common exp2 function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastLog (T x)
 Faster than the common log function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastLog (vec< L, T, Q > const &x)
 Faster than the common exp2 function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastLog2 (T x)
 Faster than the common log2 function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastLog2 (vec< L, T, Q > const &x)
 Faster than the common log2 function but less accurate. More...
 
template<typename genType >
GLM_FUNC_DECL genType fastPow (genType x, genType y)
 Faster than the common pow function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastPow (vec< L, T, Q > const &x, vec< L, T, Q > const &y)
 Faster than the common pow function but less accurate. More...
 
template<typename genTypeT , typename genTypeU >
GLM_FUNC_DECL genTypeT fastPow (genTypeT x, genTypeU y)
 Faster than the common pow function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastPow (vec< L, T, Q > const &x)
 Faster than the common pow function but less accurate. More...
 
+

Detailed Description

+

GLM_GTX_fast_exponential

+
See also
Core features (dependence)
+
+gtx_half_float (dependence)
+ +

Definition in file fast_exponential.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00031_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00031_source.html new file mode 100644 index 0000000000000000000000000000000000000000..40945b5be14fd3fdc589085f901cd07e1153d7a4 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00031_source.html @@ -0,0 +1,161 @@ + + + + + + +0.9.9 API documentation: fast_exponential.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
fast_exponential.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependency:
+
17 #include "../glm.hpp"
+
18 
+
19 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
20 # ifndef GLM_ENABLE_EXPERIMENTAL
+
21 # pragma message("GLM: GLM_GTX_fast_exponential is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
22 # else
+
23 # pragma message("GLM: GLM_GTX_fast_exponential extension included")
+
24 # endif
+
25 #endif
+
26 
+
27 namespace glm
+
28 {
+
31 
+
34  template<typename genType>
+
35  GLM_FUNC_DECL genType fastPow(genType x, genType y);
+
36 
+
39  template<length_t L, typename T, qualifier Q>
+
40  GLM_FUNC_DECL vec<L, T, Q> fastPow(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
41 
+
44  template<typename genTypeT, typename genTypeU>
+
45  GLM_FUNC_DECL genTypeT fastPow(genTypeT x, genTypeU y);
+
46 
+
49  template<length_t L, typename T, qualifier Q>
+
50  GLM_FUNC_DECL vec<L, T, Q> fastPow(vec<L, T, Q> const& x);
+
51 
+
54  template<typename T>
+
55  GLM_FUNC_DECL T fastExp(T x);
+
56 
+
59  template<length_t L, typename T, qualifier Q>
+
60  GLM_FUNC_DECL vec<L, T, Q> fastExp(vec<L, T, Q> const& x);
+
61 
+
64  template<typename T>
+
65  GLM_FUNC_DECL T fastLog(T x);
+
66 
+
69  template<length_t L, typename T, qualifier Q>
+
70  GLM_FUNC_DECL vec<L, T, Q> fastLog(vec<L, T, Q> const& x);
+
71 
+
74  template<typename T>
+
75  GLM_FUNC_DECL T fastExp2(T x);
+
76 
+
79  template<length_t L, typename T, qualifier Q>
+
80  GLM_FUNC_DECL vec<L, T, Q> fastExp2(vec<L, T, Q> const& x);
+
81 
+
84  template<typename T>
+
85  GLM_FUNC_DECL T fastLog2(T x);
+
86 
+
89  template<length_t L, typename T, qualifier Q>
+
90  GLM_FUNC_DECL vec<L, T, Q> fastLog2(vec<L, T, Q> const& x);
+
91 
+
93 }//namespace glm
+
94 
+
95 #include "fast_exponential.inl"
+
GLM_FUNC_DECL vec< L, T, Q > fastLog(vec< L, T, Q > const &x)
Faster than the common exp2 function but less accurate.
+
GLM_FUNC_DECL vec< L, T, Q > fastPow(vec< L, T, Q > const &x)
Faster than the common pow function but less accurate.
+
GLM_FUNC_DECL vec< L, T, Q > fastLog2(vec< L, T, Q > const &x)
Faster than the common log2 function but less accurate.
+
GLM_FUNC_DECL vec< L, T, Q > fastExp2(vec< L, T, Q > const &x)
Faster than the common exp2 function but less accurate.
+
GLM_FUNC_DECL vec< L, T, Q > fastExp(vec< L, T, Q > const &x)
Faster than the common exp function but less accurate.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00032.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00032.html new file mode 100644 index 0000000000000000000000000000000000000000..7ba4fe4fa15cced54c3c5a6170444c67ca1760be --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00032.html @@ -0,0 +1,151 @@ + + + + + + +0.9.9 API documentation: fast_square_root.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
fast_square_root.hpp File Reference
+
+
+ +

GLM_GTX_fast_square_root +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL genType fastDistance (genType x, genType y)
 Faster than the common distance function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL T fastDistance (vec< L, T, Q > const &x, vec< L, T, Q > const &y)
 Faster than the common distance function but less accurate. More...
 
template<typename genType >
GLM_FUNC_DECL genType fastInverseSqrt (genType x)
 Faster than the common inversesqrt function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastInverseSqrt (vec< L, T, Q > const &x)
 Faster than the common inversesqrt function but less accurate. More...
 
template<typename genType >
GLM_FUNC_DECL genType fastLength (genType x)
 Faster than the common length function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL T fastLength (vec< L, T, Q > const &x)
 Faster than the common length function but less accurate. More...
 
template<typename genType >
GLM_FUNC_DECL genType fastNormalize (genType const &x)
 Faster than the common normalize function but less accurate. More...
 
template<typename genType >
GLM_FUNC_DECL genType fastSqrt (genType x)
 Faster than the common sqrt function but less accurate. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > fastSqrt (vec< L, T, Q > const &x)
 Faster than the common sqrt function but less accurate. More...
 
+

Detailed Description

+

GLM_GTX_fast_square_root

+
See also
Core features (dependence)
+ +

Definition in file fast_square_root.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00032_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00032_source.html new file mode 100644 index 0000000000000000000000000000000000000000..36328925d009a8213fc24871c721623574c4a2c5 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00032_source.html @@ -0,0 +1,154 @@ + + + + + + +0.9.9 API documentation: fast_square_root.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
fast_square_root.hpp
+
+
+Go to the documentation of this file.
1 
+
15 #pragma once
+
16 
+
17 // Dependency:
+
18 #include "../common.hpp"
+
19 #include "../exponential.hpp"
+
20 #include "../geometric.hpp"
+
21 
+
22 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
23 # ifndef GLM_ENABLE_EXPERIMENTAL
+
24 # pragma message("GLM: GLM_GTX_fast_square_root is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
25 # else
+
26 # pragma message("GLM: GLM_GTX_fast_square_root extension included")
+
27 # endif
+
28 #endif
+
29 
+
30 namespace glm
+
31 {
+
34 
+
38  template<typename genType>
+
39  GLM_FUNC_DECL genType fastSqrt(genType x);
+
40 
+
44  template<length_t L, typename T, qualifier Q>
+
45  GLM_FUNC_DECL vec<L, T, Q> fastSqrt(vec<L, T, Q> const& x);
+
46 
+
50  template<typename genType>
+
51  GLM_FUNC_DECL genType fastInverseSqrt(genType x);
+
52 
+
56  template<length_t L, typename T, qualifier Q>
+
57  GLM_FUNC_DECL vec<L, T, Q> fastInverseSqrt(vec<L, T, Q> const& x);
+
58 
+
62  template<typename genType>
+
63  GLM_FUNC_DECL genType fastLength(genType x);
+
64 
+
68  template<length_t L, typename T, qualifier Q>
+
69  GLM_FUNC_DECL T fastLength(vec<L, T, Q> const& x);
+
70 
+
74  template<typename genType>
+
75  GLM_FUNC_DECL genType fastDistance(genType x, genType y);
+
76 
+
80  template<length_t L, typename T, qualifier Q>
+
81  GLM_FUNC_DECL T fastDistance(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
82 
+
86  template<typename genType>
+
87  GLM_FUNC_DECL genType fastNormalize(genType const& x);
+
88 
+
90 }// namespace glm
+
91 
+
92 #include "fast_square_root.inl"
+
GLM_FUNC_DECL T fastLength(vec< L, T, Q > const &x)
Faster than the common length function but less accurate.
+
GLM_FUNC_DECL T fastDistance(vec< L, T, Q > const &x, vec< L, T, Q > const &y)
Faster than the common distance function but less accurate.
+
GLM_FUNC_DECL vec< L, T, Q > fastSqrt(vec< L, T, Q > const &x)
Faster than the common sqrt function but less accurate.
+
GLM_FUNC_DECL genType fastNormalize(genType const &x)
Faster than the common normalize function but less accurate.
+
GLM_FUNC_DECL vec< L, T, Q > fastInverseSqrt(vec< L, T, Q > const &x)
Faster than the common inversesqrt function but less accurate.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00033.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00033.html new file mode 100644 index 0000000000000000000000000000000000000000..3b49687028d6f56e5c4ce67fc31f8160c8662044 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00033.html @@ -0,0 +1,147 @@ + + + + + + +0.9.9 API documentation: fast_trigonometry.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
fast_trigonometry.hpp File Reference
+
+
+ +

GLM_GTX_fast_trigonometry +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T >
GLM_FUNC_DECL T fastAcos (T angle)
 Faster than the common acos function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastAsin (T angle)
 Faster than the common asin function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastAtan (T y, T x)
 Faster than the common atan function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastAtan (T angle)
 Faster than the common atan function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastCos (T angle)
 Faster than the common cos function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastSin (T angle)
 Faster than the common sin function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T fastTan (T angle)
 Faster than the common tan function but less accurate. More...
 
template<typename T >
GLM_FUNC_DECL T wrapAngle (T angle)
 Wrap an angle to [0 2pi[ From GLM_GTX_fast_trigonometry extension. More...
 
+

Detailed Description

+

GLM_GTX_fast_trigonometry

+
See also
Core features (dependence)
+ +

Definition in file fast_trigonometry.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00033_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00033_source.html new file mode 100644 index 0000000000000000000000000000000000000000..c02ae841f0963c1a2e6e89e88f4d9256bce90de5 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00033_source.html @@ -0,0 +1,152 @@ + + + + + + +0.9.9 API documentation: fast_trigonometry.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
fast_trigonometry.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../gtc/constants.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_fast_trigonometry is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_fast_trigonometry extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename T>
+
34  GLM_FUNC_DECL T wrapAngle(T angle);
+
35 
+
38  template<typename T>
+
39  GLM_FUNC_DECL T fastSin(T angle);
+
40 
+
43  template<typename T>
+
44  GLM_FUNC_DECL T fastCos(T angle);
+
45 
+
49  template<typename T>
+
50  GLM_FUNC_DECL T fastTan(T angle);
+
51 
+
55  template<typename T>
+
56  GLM_FUNC_DECL T fastAsin(T angle);
+
57 
+
61  template<typename T>
+
62  GLM_FUNC_DECL T fastAcos(T angle);
+
63 
+
67  template<typename T>
+
68  GLM_FUNC_DECL T fastAtan(T y, T x);
+
69 
+
73  template<typename T>
+
74  GLM_FUNC_DECL T fastAtan(T angle);
+
75 
+
77 }//namespace glm
+
78 
+
79 #include "fast_trigonometry.inl"
+
GLM_FUNC_DECL T fastAsin(T angle)
Faster than the common asin function but less accurate.
+
GLM_FUNC_DECL T angle(qua< T, Q > const &x)
Returns the quaternion rotation angle.
+
GLM_FUNC_DECL T fastAcos(T angle)
Faster than the common acos function but less accurate.
+
GLM_FUNC_DECL T fastTan(T angle)
Faster than the common tan function but less accurate.
+
GLM_FUNC_DECL T fastCos(T angle)
Faster than the common cos function but less accurate.
+
GLM_FUNC_DECL T fastAtan(T angle)
Faster than the common atan function but less accurate.
+
GLM_FUNC_DECL T fastSin(T angle)
Faster than the common sin function but less accurate.
+
GLM_FUNC_DECL T wrapAngle(T angle)
Wrap an angle to [0 2pi[ From GLM_GTX_fast_trigonometry extension.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00034.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00034.html new file mode 100644 index 0000000000000000000000000000000000000000..00e437cdffaaaccaef4ac3c1514d94bb7e1ffadd --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00034.html @@ -0,0 +1,125 @@ + + + + + + +0.9.9 API documentation: functions.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
functions.hpp File Reference
+
+
+ +

GLM_GTX_functions +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + +

+Functions

template<typename T >
GLM_FUNC_DECL T gauss (T x, T ExpectedValue, T StandardDeviation)
 1D gauss function More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL T gauss (vec< 2, T, Q > const &Coord, vec< 2, T, Q > const &ExpectedValue, vec< 2, T, Q > const &StandardDeviation)
 2D gauss function More...
 
+

Detailed Description

+

GLM_GTX_functions

+
See also
Core features (dependence)
+
+GLM_GTC_quaternion (dependence)
+ +

Definition in file functions.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00034_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00034_source.html new file mode 100644 index 0000000000000000000000000000000000000000..2d206e04e1f9180ae9bc956351eb36b088262ba6 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00034_source.html @@ -0,0 +1,136 @@ + + + + + + +0.9.9 API documentation: functions.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
functions.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependencies
+
17 #include "../detail/setup.hpp"
+
18 #include "../detail/qualifier.hpp"
+
19 #include "../detail/type_vec2.hpp"
+
20 
+
21 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
22 # ifndef GLM_ENABLE_EXPERIMENTAL
+
23 # pragma message("GLM: GLM_GTX_functions is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
24 # else
+
25 # pragma message("GLM: GLM_GTX_functions extension included")
+
26 # endif
+
27 #endif
+
28 
+
29 namespace glm
+
30 {
+
33 
+
37  template<typename T>
+
38  GLM_FUNC_DECL T gauss(
+
39  T x,
+
40  T ExpectedValue,
+
41  T StandardDeviation);
+
42 
+
46  template<typename T, qualifier Q>
+
47  GLM_FUNC_DECL T gauss(
+
48  vec<2, T, Q> const& Coord,
+
49  vec<2, T, Q> const& ExpectedValue,
+
50  vec<2, T, Q> const& StandardDeviation);
+
51 
+
53 }//namespace glm
+
54 
+
55 #include "functions.inl"
+
56 
+
GLM_FUNC_DECL T gauss(vec< 2, T, Q > const &Coord, vec< 2, T, Q > const &ExpectedValue, vec< 2, T, Q > const &StandardDeviation)
2D gauss function
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00035_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00035_source.html new file mode 100644 index 0000000000000000000000000000000000000000..52efb83f6a0630df3c09a137811ca41f1ddc30c0 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00035_source.html @@ -0,0 +1,1015 @@ + + + + + + +0.9.9 API documentation: fwd.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
fwd.hpp
+
+
+
1 #pragma once
+
2 
+
3 #include "detail/qualifier.hpp"
+
4 
+
5 namespace glm
+
6 {
+
7 #if GLM_HAS_EXTENDED_INTEGER_TYPE
+
8  typedef std::int8_t int8;
+
9  typedef std::int16_t int16;
+
10  typedef std::int32_t int32;
+
11  typedef std::int64_t int64;
+
12 
+
13  typedef std::uint8_t uint8;
+
14  typedef std::uint16_t uint16;
+
15  typedef std::uint32_t uint32;
+
16  typedef std::uint64_t uint64;
+
17 #else
+
18  typedef signed char int8;
+
19  typedef signed short int16;
+
20  typedef signed int int32;
+
21  typedef detail::int64 int64;
+
22 
+
23  typedef unsigned char uint8;
+
24  typedef unsigned short uint16;
+
25  typedef unsigned int uint32;
+
26  typedef detail::uint64 uint64;
+
27 #endif
+
28 
+
29  // Scalar int
+
30 
+
31  typedef int8 lowp_i8;
+
32  typedef int8 mediump_i8;
+
33  typedef int8 highp_i8;
+
34  typedef int8 i8;
+
35 
+
36  typedef int8 lowp_int8;
+
37  typedef int8 mediump_int8;
+
38  typedef int8 highp_int8;
+
39 
+
40  typedef int8 lowp_int8_t;
+
41  typedef int8 mediump_int8_t;
+
42  typedef int8 highp_int8_t;
+
43  typedef int8 int8_t;
+
44 
+
45  typedef int16 lowp_i16;
+
46  typedef int16 mediump_i16;
+
47  typedef int16 highp_i16;
+
48  typedef int16 i16;
+
49 
+
50  typedef int16 lowp_int16;
+
51  typedef int16 mediump_int16;
+
52  typedef int16 highp_int16;
+
53 
+
54  typedef int16 lowp_int16_t;
+
55  typedef int16 mediump_int16_t;
+
56  typedef int16 highp_int16_t;
+
57  typedef int16 int16_t;
+
58 
+
59  typedef int32 lowp_i32;
+
60  typedef int32 mediump_i32;
+
61  typedef int32 highp_i32;
+
62  typedef int32 i32;
+
63 
+
64  typedef int32 lowp_int32;
+
65  typedef int32 mediump_int32;
+
66  typedef int32 highp_int32;
+
67 
+
68  typedef int32 lowp_int32_t;
+
69  typedef int32 mediump_int32_t;
+
70  typedef int32 highp_int32_t;
+
71  typedef int32 int32_t;
+
72 
+
73  typedef int64 lowp_i64;
+
74  typedef int64 mediump_i64;
+
75  typedef int64 highp_i64;
+
76  typedef int64 i64;
+
77 
+
78  typedef int64 lowp_int64;
+
79  typedef int64 mediump_int64;
+
80  typedef int64 highp_int64;
+
81 
+
82  typedef int64 lowp_int64_t;
+
83  typedef int64 mediump_int64_t;
+
84  typedef int64 highp_int64_t;
+
85  typedef int64 int64_t;
+
86 
+
87  // Scalar uint
+
88 
+
89  typedef uint8 lowp_u8;
+
90  typedef uint8 mediump_u8;
+
91  typedef uint8 highp_u8;
+
92  typedef uint8 u8;
+
93 
+
94  typedef uint8 lowp_uint8;
+
95  typedef uint8 mediump_uint8;
+
96  typedef uint8 highp_uint8;
+
97 
+
98  typedef uint8 lowp_uint8_t;
+
99  typedef uint8 mediump_uint8_t;
+
100  typedef uint8 highp_uint8_t;
+
101  typedef uint8 uint8_t;
+
102 
+
103  typedef uint16 lowp_u16;
+
104  typedef uint16 mediump_u16;
+
105  typedef uint16 highp_u16;
+
106  typedef uint16 u16;
+
107 
+
108  typedef uint16 lowp_uint16;
+
109  typedef uint16 mediump_uint16;
+
110  typedef uint16 highp_uint16;
+
111 
+
112  typedef uint16 lowp_uint16_t;
+
113  typedef uint16 mediump_uint16_t;
+
114  typedef uint16 highp_uint16_t;
+
115  typedef uint16 uint16_t;
+
116 
+
117  typedef uint32 lowp_u32;
+
118  typedef uint32 mediump_u32;
+
119  typedef uint32 highp_u32;
+
120  typedef uint32 u32;
+
121 
+
122  typedef uint32 lowp_uint32;
+
123  typedef uint32 mediump_uint32;
+
124  typedef uint32 highp_uint32;
+
125 
+
126  typedef uint32 lowp_uint32_t;
+
127  typedef uint32 mediump_uint32_t;
+
128  typedef uint32 highp_uint32_t;
+
129  typedef uint32 uint32_t;
+
130 
+
131  typedef uint64 lowp_u64;
+
132  typedef uint64 mediump_u64;
+
133  typedef uint64 highp_u64;
+
134  typedef uint64 u64;
+
135 
+
136  typedef uint64 lowp_uint64;
+
137  typedef uint64 mediump_uint64;
+
138  typedef uint64 highp_uint64;
+
139 
+
140  typedef uint64 lowp_uint64_t;
+
141  typedef uint64 mediump_uint64_t;
+
142  typedef uint64 highp_uint64_t;
+
143  typedef uint64 uint64_t;
+
144 
+
145  // Scalar float
+
146 
+
147  typedef float lowp_f32;
+
148  typedef float mediump_f32;
+
149  typedef float highp_f32;
+
150  typedef float f32;
+
151 
+
152  typedef float lowp_float32;
+
153  typedef float mediump_float32;
+
154  typedef float highp_float32;
+
155  typedef float float32;
+
156 
+
157  typedef float lowp_float32_t;
+
158  typedef float mediump_float32_t;
+
159  typedef float highp_float32_t;
+
160  typedef float float32_t;
+
161 
+
162 
+
163  typedef double lowp_f64;
+
164  typedef double mediump_f64;
+
165  typedef double highp_f64;
+
166  typedef double f64;
+
167 
+
168  typedef double lowp_float64;
+
169  typedef double mediump_float64;
+
170  typedef double highp_float64;
+
171  typedef double float64;
+
172 
+
173  typedef double lowp_float64_t;
+
174  typedef double mediump_float64_t;
+
175  typedef double highp_float64_t;
+
176  typedef double float64_t;
+
177 
+
178  // Vector bool
+
179 
+
180  typedef vec<1, bool, lowp> lowp_bvec1;
+
181  typedef vec<2, bool, lowp> lowp_bvec2;
+
182  typedef vec<3, bool, lowp> lowp_bvec3;
+
183  typedef vec<4, bool, lowp> lowp_bvec4;
+
184 
+
185  typedef vec<1, bool, mediump> mediump_bvec1;
+
186  typedef vec<2, bool, mediump> mediump_bvec2;
+
187  typedef vec<3, bool, mediump> mediump_bvec3;
+
188  typedef vec<4, bool, mediump> mediump_bvec4;
+
189 
+
190  typedef vec<1, bool, highp> highp_bvec1;
+
191  typedef vec<2, bool, highp> highp_bvec2;
+
192  typedef vec<3, bool, highp> highp_bvec3;
+
193  typedef vec<4, bool, highp> highp_bvec4;
+
194 
+
195  typedef vec<1, bool, defaultp> bvec1;
+
196  typedef vec<2, bool, defaultp> bvec2;
+
197  typedef vec<3, bool, defaultp> bvec3;
+
198  typedef vec<4, bool, defaultp> bvec4;
+
199 
+
200  // Vector int
+
201 
+
202  typedef vec<1, i32, lowp> lowp_ivec1;
+
203  typedef vec<2, i32, lowp> lowp_ivec2;
+
204  typedef vec<3, i32, lowp> lowp_ivec3;
+
205  typedef vec<4, i32, lowp> lowp_ivec4;
+
206 
+
207  typedef vec<1, i32, mediump> mediump_ivec1;
+
208  typedef vec<2, i32, mediump> mediump_ivec2;
+
209  typedef vec<3, i32, mediump> mediump_ivec3;
+
210  typedef vec<4, i32, mediump> mediump_ivec4;
+
211 
+
212  typedef vec<1, i32, highp> highp_ivec1;
+
213  typedef vec<2, i32, highp> highp_ivec2;
+
214  typedef vec<3, i32, highp> highp_ivec3;
+
215  typedef vec<4, i32, highp> highp_ivec4;
+
216 
+
217  typedef vec<1, i32, defaultp> ivec1;
+
218  typedef vec<2, i32, defaultp> ivec2;
+
219  typedef vec<3, i32, defaultp> ivec3;
+
220  typedef vec<4, i32, defaultp> ivec4;
+
221 
+
222  typedef vec<1, i8, lowp> lowp_i8vec1;
+
223  typedef vec<2, i8, lowp> lowp_i8vec2;
+
224  typedef vec<3, i8, lowp> lowp_i8vec3;
+
225  typedef vec<4, i8, lowp> lowp_i8vec4;
+
226 
+
227  typedef vec<1, i8, mediump> mediump_i8vec1;
+
228  typedef vec<2, i8, mediump> mediump_i8vec2;
+
229  typedef vec<3, i8, mediump> mediump_i8vec3;
+
230  typedef vec<4, i8, mediump> mediump_i8vec4;
+
231 
+
232  typedef vec<1, i8, highp> highp_i8vec1;
+
233  typedef vec<2, i8, highp> highp_i8vec2;
+
234  typedef vec<3, i8, highp> highp_i8vec3;
+
235  typedef vec<4, i8, highp> highp_i8vec4;
+
236 
+
237  typedef vec<1, i8, defaultp> i8vec1;
+
238  typedef vec<2, i8, defaultp> i8vec2;
+
239  typedef vec<3, i8, defaultp> i8vec3;
+
240  typedef vec<4, i8, defaultp> i8vec4;
+
241 
+
242  typedef vec<1, i16, lowp> lowp_i16vec1;
+
243  typedef vec<2, i16, lowp> lowp_i16vec2;
+
244  typedef vec<3, i16, lowp> lowp_i16vec3;
+
245  typedef vec<4, i16, lowp> lowp_i16vec4;
+
246 
+
247  typedef vec<1, i16, mediump> mediump_i16vec1;
+
248  typedef vec<2, i16, mediump> mediump_i16vec2;
+
249  typedef vec<3, i16, mediump> mediump_i16vec3;
+
250  typedef vec<4, i16, mediump> mediump_i16vec4;
+
251 
+
252  typedef vec<1, i16, highp> highp_i16vec1;
+
253  typedef vec<2, i16, highp> highp_i16vec2;
+
254  typedef vec<3, i16, highp> highp_i16vec3;
+
255  typedef vec<4, i16, highp> highp_i16vec4;
+
256 
+
257  typedef vec<1, i16, defaultp> i16vec1;
+
258  typedef vec<2, i16, defaultp> i16vec2;
+
259  typedef vec<3, i16, defaultp> i16vec3;
+
260  typedef vec<4, i16, defaultp> i16vec4;
+
261 
+
262  typedef vec<1, i32, lowp> lowp_i32vec1;
+
263  typedef vec<2, i32, lowp> lowp_i32vec2;
+
264  typedef vec<3, i32, lowp> lowp_i32vec3;
+
265  typedef vec<4, i32, lowp> lowp_i32vec4;
+
266 
+
267  typedef vec<1, i32, mediump> mediump_i32vec1;
+
268  typedef vec<2, i32, mediump> mediump_i32vec2;
+
269  typedef vec<3, i32, mediump> mediump_i32vec3;
+
270  typedef vec<4, i32, mediump> mediump_i32vec4;
+
271 
+
272  typedef vec<1, i32, highp> highp_i32vec1;
+
273  typedef vec<2, i32, highp> highp_i32vec2;
+
274  typedef vec<3, i32, highp> highp_i32vec3;
+
275  typedef vec<4, i32, highp> highp_i32vec4;
+
276 
+
277  typedef vec<1, i32, defaultp> i32vec1;
+
278  typedef vec<2, i32, defaultp> i32vec2;
+
279  typedef vec<3, i32, defaultp> i32vec3;
+
280  typedef vec<4, i32, defaultp> i32vec4;
+
281 
+
282  typedef vec<1, i64, lowp> lowp_i64vec1;
+
283  typedef vec<2, i64, lowp> lowp_i64vec2;
+
284  typedef vec<3, i64, lowp> lowp_i64vec3;
+
285  typedef vec<4, i64, lowp> lowp_i64vec4;
+
286 
+
287  typedef vec<1, i64, mediump> mediump_i64vec1;
+
288  typedef vec<2, i64, mediump> mediump_i64vec2;
+
289  typedef vec<3, i64, mediump> mediump_i64vec3;
+
290  typedef vec<4, i64, mediump> mediump_i64vec4;
+
291 
+
292  typedef vec<1, i64, highp> highp_i64vec1;
+
293  typedef vec<2, i64, highp> highp_i64vec2;
+
294  typedef vec<3, i64, highp> highp_i64vec3;
+
295  typedef vec<4, i64, highp> highp_i64vec4;
+
296 
+
297  typedef vec<1, i64, defaultp> i64vec1;
+
298  typedef vec<2, i64, defaultp> i64vec2;
+
299  typedef vec<3, i64, defaultp> i64vec3;
+
300  typedef vec<4, i64, defaultp> i64vec4;
+
301 
+
302  // Vector uint
+
303 
+
304  typedef vec<1, u32, lowp> lowp_uvec1;
+
305  typedef vec<2, u32, lowp> lowp_uvec2;
+
306  typedef vec<3, u32, lowp> lowp_uvec3;
+
307  typedef vec<4, u32, lowp> lowp_uvec4;
+
308 
+
309  typedef vec<1, u32, mediump> mediump_uvec1;
+
310  typedef vec<2, u32, mediump> mediump_uvec2;
+
311  typedef vec<3, u32, mediump> mediump_uvec3;
+
312  typedef vec<4, u32, mediump> mediump_uvec4;
+
313 
+
314  typedef vec<1, u32, highp> highp_uvec1;
+
315  typedef vec<2, u32, highp> highp_uvec2;
+
316  typedef vec<3, u32, highp> highp_uvec3;
+
317  typedef vec<4, u32, highp> highp_uvec4;
+
318 
+
319  typedef vec<1, u32, defaultp> uvec1;
+
320  typedef vec<2, u32, defaultp> uvec2;
+
321  typedef vec<3, u32, defaultp> uvec3;
+
322  typedef vec<4, u32, defaultp> uvec4;
+
323 
+
324  typedef vec<1, u8, lowp> lowp_u8vec1;
+
325  typedef vec<2, u8, lowp> lowp_u8vec2;
+
326  typedef vec<3, u8, lowp> lowp_u8vec3;
+
327  typedef vec<4, u8, lowp> lowp_u8vec4;
+
328 
+
329  typedef vec<1, u8, mediump> mediump_u8vec1;
+
330  typedef vec<2, u8, mediump> mediump_u8vec2;
+
331  typedef vec<3, u8, mediump> mediump_u8vec3;
+
332  typedef vec<4, u8, mediump> mediump_u8vec4;
+
333 
+
334  typedef vec<1, u8, highp> highp_u8vec1;
+
335  typedef vec<2, u8, highp> highp_u8vec2;
+
336  typedef vec<3, u8, highp> highp_u8vec3;
+
337  typedef vec<4, u8, highp> highp_u8vec4;
+
338 
+
339  typedef vec<1, u8, defaultp> u8vec1;
+
340  typedef vec<2, u8, defaultp> u8vec2;
+
341  typedef vec<3, u8, defaultp> u8vec3;
+
342  typedef vec<4, u8, defaultp> u8vec4;
+
343 
+
344  typedef vec<1, u16, lowp> lowp_u16vec1;
+
345  typedef vec<2, u16, lowp> lowp_u16vec2;
+
346  typedef vec<3, u16, lowp> lowp_u16vec3;
+
347  typedef vec<4, u16, lowp> lowp_u16vec4;
+
348 
+
349  typedef vec<1, u16, mediump> mediump_u16vec1;
+
350  typedef vec<2, u16, mediump> mediump_u16vec2;
+
351  typedef vec<3, u16, mediump> mediump_u16vec3;
+
352  typedef vec<4, u16, mediump> mediump_u16vec4;
+
353 
+
354  typedef vec<1, u16, highp> highp_u16vec1;
+
355  typedef vec<2, u16, highp> highp_u16vec2;
+
356  typedef vec<3, u16, highp> highp_u16vec3;
+
357  typedef vec<4, u16, highp> highp_u16vec4;
+
358 
+
359  typedef vec<1, u16, defaultp> u16vec1;
+
360  typedef vec<2, u16, defaultp> u16vec2;
+
361  typedef vec<3, u16, defaultp> u16vec3;
+
362  typedef vec<4, u16, defaultp> u16vec4;
+
363 
+
364  typedef vec<1, u32, lowp> lowp_u32vec1;
+
365  typedef vec<2, u32, lowp> lowp_u32vec2;
+
366  typedef vec<3, u32, lowp> lowp_u32vec3;
+
367  typedef vec<4, u32, lowp> lowp_u32vec4;
+
368 
+
369  typedef vec<1, u32, mediump> mediump_u32vec1;
+
370  typedef vec<2, u32, mediump> mediump_u32vec2;
+
371  typedef vec<3, u32, mediump> mediump_u32vec3;
+
372  typedef vec<4, u32, mediump> mediump_u32vec4;
+
373 
+
374  typedef vec<1, u32, highp> highp_u32vec1;
+
375  typedef vec<2, u32, highp> highp_u32vec2;
+
376  typedef vec<3, u32, highp> highp_u32vec3;
+
377  typedef vec<4, u32, highp> highp_u32vec4;
+
378 
+
379  typedef vec<1, u32, defaultp> u32vec1;
+
380  typedef vec<2, u32, defaultp> u32vec2;
+
381  typedef vec<3, u32, defaultp> u32vec3;
+
382  typedef vec<4, u32, defaultp> u32vec4;
+
383 
+
384  typedef vec<1, u64, lowp> lowp_u64vec1;
+
385  typedef vec<2, u64, lowp> lowp_u64vec2;
+
386  typedef vec<3, u64, lowp> lowp_u64vec3;
+
387  typedef vec<4, u64, lowp> lowp_u64vec4;
+
388 
+
389  typedef vec<1, u64, mediump> mediump_u64vec1;
+
390  typedef vec<2, u64, mediump> mediump_u64vec2;
+
391  typedef vec<3, u64, mediump> mediump_u64vec3;
+
392  typedef vec<4, u64, mediump> mediump_u64vec4;
+
393 
+
394  typedef vec<1, u64, highp> highp_u64vec1;
+
395  typedef vec<2, u64, highp> highp_u64vec2;
+
396  typedef vec<3, u64, highp> highp_u64vec3;
+
397  typedef vec<4, u64, highp> highp_u64vec4;
+
398 
+
399  typedef vec<1, u64, defaultp> u64vec1;
+
400  typedef vec<2, u64, defaultp> u64vec2;
+
401  typedef vec<3, u64, defaultp> u64vec3;
+
402  typedef vec<4, u64, defaultp> u64vec4;
+
403 
+
404  // Vector float
+
405 
+
406  typedef vec<1, float, lowp> lowp_vec1;
+
407  typedef vec<2, float, lowp> lowp_vec2;
+
408  typedef vec<3, float, lowp> lowp_vec3;
+
409  typedef vec<4, float, lowp> lowp_vec4;
+
410 
+
411  typedef vec<1, float, mediump> mediump_vec1;
+
412  typedef vec<2, float, mediump> mediump_vec2;
+
413  typedef vec<3, float, mediump> mediump_vec3;
+
414  typedef vec<4, float, mediump> mediump_vec4;
+
415 
+
416  typedef vec<1, float, highp> highp_vec1;
+
417  typedef vec<2, float, highp> highp_vec2;
+
418  typedef vec<3, float, highp> highp_vec3;
+
419  typedef vec<4, float, highp> highp_vec4;
+
420 
+
421  typedef vec<1, float, defaultp> vec1;
+
422  typedef vec<2, float, defaultp> vec2;
+
423  typedef vec<3, float, defaultp> vec3;
+
424  typedef vec<4, float, defaultp> vec4;
+
425 
+
426  typedef vec<1, float, lowp> lowp_fvec1;
+
427  typedef vec<2, float, lowp> lowp_fvec2;
+
428  typedef vec<3, float, lowp> lowp_fvec3;
+
429  typedef vec<4, float, lowp> lowp_fvec4;
+
430 
+
431  typedef vec<1, float, mediump> mediump_fvec1;
+
432  typedef vec<2, float, mediump> mediump_fvec2;
+
433  typedef vec<3, float, mediump> mediump_fvec3;
+
434  typedef vec<4, float, mediump> mediump_fvec4;
+
435 
+
436  typedef vec<1, float, highp> highp_fvec1;
+
437  typedef vec<2, float, highp> highp_fvec2;
+
438  typedef vec<3, float, highp> highp_fvec3;
+
439  typedef vec<4, float, highp> highp_fvec4;
+
440 
+
441  typedef vec<1, f32, defaultp> fvec1;
+
442  typedef vec<2, f32, defaultp> fvec2;
+
443  typedef vec<3, f32, defaultp> fvec3;
+
444  typedef vec<4, f32, defaultp> fvec4;
+
445 
+
446  typedef vec<1, f32, lowp> lowp_f32vec1;
+
447  typedef vec<2, f32, lowp> lowp_f32vec2;
+
448  typedef vec<3, f32, lowp> lowp_f32vec3;
+
449  typedef vec<4, f32, lowp> lowp_f32vec4;
+
450 
+
451  typedef vec<1, f32, mediump> mediump_f32vec1;
+
452  typedef vec<2, f32, mediump> mediump_f32vec2;
+
453  typedef vec<3, f32, mediump> mediump_f32vec3;
+
454  typedef vec<4, f32, mediump> mediump_f32vec4;
+
455 
+
456  typedef vec<1, f32, highp> highp_f32vec1;
+
457  typedef vec<2, f32, highp> highp_f32vec2;
+
458  typedef vec<3, f32, highp> highp_f32vec3;
+
459  typedef vec<4, f32, highp> highp_f32vec4;
+
460 
+
461  typedef vec<1, f32, defaultp> f32vec1;
+
462  typedef vec<2, f32, defaultp> f32vec2;
+
463  typedef vec<3, f32, defaultp> f32vec3;
+
464  typedef vec<4, f32, defaultp> f32vec4;
+
465 
+
466  typedef vec<1, f64, lowp> lowp_dvec1;
+
467  typedef vec<2, f64, lowp> lowp_dvec2;
+
468  typedef vec<3, f64, lowp> lowp_dvec3;
+
469  typedef vec<4, f64, lowp> lowp_dvec4;
+
470 
+
471  typedef vec<1, f64, mediump> mediump_dvec1;
+
472  typedef vec<2, f64, mediump> mediump_dvec2;
+
473  typedef vec<3, f64, mediump> mediump_dvec3;
+
474  typedef vec<4, f64, mediump> mediump_dvec4;
+
475 
+
476  typedef vec<1, f64, highp> highp_dvec1;
+
477  typedef vec<2, f64, highp> highp_dvec2;
+
478  typedef vec<3, f64, highp> highp_dvec3;
+
479  typedef vec<4, f64, highp> highp_dvec4;
+
480 
+
481  typedef vec<1, f64, defaultp> dvec1;
+
482  typedef vec<2, f64, defaultp> dvec2;
+
483  typedef vec<3, f64, defaultp> dvec3;
+
484  typedef vec<4, f64, defaultp> dvec4;
+
485 
+
486  typedef vec<1, f64, lowp> lowp_f64vec1;
+
487  typedef vec<2, f64, lowp> lowp_f64vec2;
+
488  typedef vec<3, f64, lowp> lowp_f64vec3;
+
489  typedef vec<4, f64, lowp> lowp_f64vec4;
+
490 
+
491  typedef vec<1, f64, mediump> mediump_f64vec1;
+
492  typedef vec<2, f64, mediump> mediump_f64vec2;
+
493  typedef vec<3, f64, mediump> mediump_f64vec3;
+
494  typedef vec<4, f64, mediump> mediump_f64vec4;
+
495 
+
496  typedef vec<1, f64, highp> highp_f64vec1;
+
497  typedef vec<2, f64, highp> highp_f64vec2;
+
498  typedef vec<3, f64, highp> highp_f64vec3;
+
499  typedef vec<4, f64, highp> highp_f64vec4;
+
500 
+
501  typedef vec<1, f64, defaultp> f64vec1;
+
502  typedef vec<2, f64, defaultp> f64vec2;
+
503  typedef vec<3, f64, defaultp> f64vec3;
+
504  typedef vec<4, f64, defaultp> f64vec4;
+
505 
+
506  // Matrix NxN
+
507 
+
508  typedef mat<2, 2, f32, lowp> lowp_mat2;
+
509  typedef mat<3, 3, f32, lowp> lowp_mat3;
+
510  typedef mat<4, 4, f32, lowp> lowp_mat4;
+
511 
+
512  typedef mat<2, 2, f32, mediump> mediump_mat2;
+
513  typedef mat<3, 3, f32, mediump> mediump_mat3;
+
514  typedef mat<4, 4, f32, mediump> mediump_mat4;
+
515 
+
516  typedef mat<2, 2, f32, highp> highp_mat2;
+
517  typedef mat<3, 3, f32, highp> highp_mat3;
+
518  typedef mat<4, 4, f32, highp> highp_mat4;
+
519 
+
520  typedef mat<2, 2, f32, defaultp> mat2;
+
521  typedef mat<3, 3, f32, defaultp> mat3;
+
522  typedef mat<4, 4, f32, defaultp> mat4;
+
523 
+
524  typedef mat<2, 2, f32, lowp> lowp_fmat2;
+
525  typedef mat<3, 3, f32, lowp> lowp_fmat3;
+
526  typedef mat<4, 4, f32, lowp> lowp_fmat4;
+
527 
+
528  typedef mat<2, 2, f32, mediump> mediump_fmat2;
+
529  typedef mat<3, 3, f32, mediump> mediump_fmat3;
+
530  typedef mat<4, 4, f32, mediump> mediump_fmat4;
+
531 
+
532  typedef mat<2, 2, f32, highp> highp_fmat2;
+
533  typedef mat<3, 3, f32, highp> highp_fmat3;
+
534  typedef mat<4, 4, f32, highp> highp_fmat4;
+
535 
+
536  typedef mat<2, 2, f32, defaultp> fmat2;
+
537  typedef mat<3, 3, f32, defaultp> fmat3;
+
538  typedef mat<4, 4, f32, defaultp> fmat4;
+
539 
+
540  typedef mat<2, 2, f32, lowp> lowp_f32mat2;
+
541  typedef mat<3, 3, f32, lowp> lowp_f32mat3;
+
542  typedef mat<4, 4, f32, lowp> lowp_f32mat4;
+
543 
+
544  typedef mat<2, 2, f32, mediump> mediump_f32mat2;
+
545  typedef mat<3, 3, f32, mediump> mediump_f32mat3;
+
546  typedef mat<4, 4, f32, mediump> mediump_f32mat4;
+
547 
+
548  typedef mat<2, 2, f32, highp> highp_f32mat2;
+
549  typedef mat<3, 3, f32, highp> highp_f32mat3;
+
550  typedef mat<4, 4, f32, highp> highp_f32mat4;
+
551 
+
552  typedef mat<2, 2, f32, defaultp> f32mat2;
+
553  typedef mat<3, 3, f32, defaultp> f32mat3;
+
554  typedef mat<4, 4, f32, defaultp> f32mat4;
+
555 
+
556  typedef mat<2, 2, f64, lowp> lowp_dmat2;
+
557  typedef mat<3, 3, f64, lowp> lowp_dmat3;
+
558  typedef mat<4, 4, f64, lowp> lowp_dmat4;
+
559 
+
560  typedef mat<2, 2, f64, mediump> mediump_dmat2;
+
561  typedef mat<3, 3, f64, mediump> mediump_dmat3;
+
562  typedef mat<4, 4, f64, mediump> mediump_dmat4;
+
563 
+
564  typedef mat<2, 2, f64, highp> highp_dmat2;
+
565  typedef mat<3, 3, f64, highp> highp_dmat3;
+
566  typedef mat<4, 4, f64, highp> highp_dmat4;
+
567 
+
568  typedef mat<2, 2, f64, defaultp> dmat2;
+
569  typedef mat<3, 3, f64, defaultp> dmat3;
+
570  typedef mat<4, 4, f64, defaultp> dmat4;
+
571 
+
572  typedef mat<2, 2, f64, lowp> lowp_f64mat2;
+
573  typedef mat<3, 3, f64, lowp> lowp_f64mat3;
+
574  typedef mat<4, 4, f64, lowp> lowp_f64mat4;
+
575 
+
576  typedef mat<2, 2, f64, mediump> mediump_f64mat2;
+
577  typedef mat<3, 3, f64, mediump> mediump_f64mat3;
+
578  typedef mat<4, 4, f64, mediump> mediump_f64mat4;
+
579 
+
580  typedef mat<2, 2, f64, highp> highp_f64mat2;
+
581  typedef mat<3, 3, f64, highp> highp_f64mat3;
+
582  typedef mat<4, 4, f64, highp> highp_f64mat4;
+
583 
+
584  typedef mat<2, 2, f64, defaultp> f64mat2;
+
585  typedef mat<3, 3, f64, defaultp> f64mat3;
+
586  typedef mat<4, 4, f64, defaultp> f64mat4;
+
587 
+
588  // Matrix MxN
+
589 
+
590  typedef mat<2, 2, f32, lowp> lowp_mat2x2;
+
591  typedef mat<2, 3, f32, lowp> lowp_mat2x3;
+
592  typedef mat<2, 4, f32, lowp> lowp_mat2x4;
+
593  typedef mat<3, 2, f32, lowp> lowp_mat3x2;
+
594  typedef mat<3, 3, f32, lowp> lowp_mat3x3;
+
595  typedef mat<3, 4, f32, lowp> lowp_mat3x4;
+
596  typedef mat<4, 2, f32, lowp> lowp_mat4x2;
+
597  typedef mat<4, 3, f32, lowp> lowp_mat4x3;
+
598  typedef mat<4, 4, f32, lowp> lowp_mat4x4;
+
599 
+
600  typedef mat<2, 2, f32, mediump> mediump_mat2x2;
+
601  typedef mat<2, 3, f32, mediump> mediump_mat2x3;
+
602  typedef mat<2, 4, f32, mediump> mediump_mat2x4;
+
603  typedef mat<3, 2, f32, mediump> mediump_mat3x2;
+
604  typedef mat<3, 3, f32, mediump> mediump_mat3x3;
+
605  typedef mat<3, 4, f32, mediump> mediump_mat3x4;
+
606  typedef mat<4, 2, f32, mediump> mediump_mat4x2;
+
607  typedef mat<4, 3, f32, mediump> mediump_mat4x3;
+
608  typedef mat<4, 4, f32, mediump> mediump_mat4x4;
+
609 
+
610  typedef mat<2, 2, f32, highp> highp_mat2x2;
+
611  typedef mat<2, 3, f32, highp> highp_mat2x3;
+
612  typedef mat<2, 4, f32, highp> highp_mat2x4;
+
613  typedef mat<3, 2, f32, highp> highp_mat3x2;
+
614  typedef mat<3, 3, f32, highp> highp_mat3x3;
+
615  typedef mat<3, 4, f32, highp> highp_mat3x4;
+
616  typedef mat<4, 2, f32, highp> highp_mat4x2;
+
617  typedef mat<4, 3, f32, highp> highp_mat4x3;
+
618  typedef mat<4, 4, f32, highp> highp_mat4x4;
+
619 
+
620  typedef mat<2, 2, f32, defaultp> mat2x2;
+
621  typedef mat<3, 2, f32, defaultp> mat3x2;
+
622  typedef mat<4, 2, f32, defaultp> mat4x2;
+
623  typedef mat<2, 3, f32, defaultp> mat2x3;
+
624  typedef mat<3, 3, f32, defaultp> mat3x3;
+
625  typedef mat<4, 3, f32, defaultp> mat4x3;
+
626  typedef mat<2, 4, f32, defaultp> mat2x4;
+
627  typedef mat<3, 4, f32, defaultp> mat3x4;
+
628  typedef mat<4, 4, f32, defaultp> mat4x4;
+
629 
+
630  typedef mat<2, 2, f32, lowp> lowp_fmat2x2;
+
631  typedef mat<2, 3, f32, lowp> lowp_fmat2x3;
+
632  typedef mat<2, 4, f32, lowp> lowp_fmat2x4;
+
633  typedef mat<3, 2, f32, lowp> lowp_fmat3x2;
+
634  typedef mat<3, 3, f32, lowp> lowp_fmat3x3;
+
635  typedef mat<3, 4, f32, lowp> lowp_fmat3x4;
+
636  typedef mat<4, 2, f32, lowp> lowp_fmat4x2;
+
637  typedef mat<4, 3, f32, lowp> lowp_fmat4x3;
+
638  typedef mat<4, 4, f32, lowp> lowp_fmat4x4;
+
639 
+
640  typedef mat<2, 2, f32, mediump> mediump_fmat2x2;
+
641  typedef mat<2, 3, f32, mediump> mediump_fmat2x3;
+
642  typedef mat<2, 4, f32, mediump> mediump_fmat2x4;
+
643  typedef mat<3, 2, f32, mediump> mediump_fmat3x2;
+
644  typedef mat<3, 3, f32, mediump> mediump_fmat3x3;
+
645  typedef mat<3, 4, f32, mediump> mediump_fmat3x4;
+
646  typedef mat<4, 2, f32, mediump> mediump_fmat4x2;
+
647  typedef mat<4, 3, f32, mediump> mediump_fmat4x3;
+
648  typedef mat<4, 4, f32, mediump> mediump_fmat4x4;
+
649 
+
650  typedef mat<2, 2, f32, highp> highp_fmat2x2;
+
651  typedef mat<2, 3, f32, highp> highp_fmat2x3;
+
652  typedef mat<2, 4, f32, highp> highp_fmat2x4;
+
653  typedef mat<3, 2, f32, highp> highp_fmat3x2;
+
654  typedef mat<3, 3, f32, highp> highp_fmat3x3;
+
655  typedef mat<3, 4, f32, highp> highp_fmat3x4;
+
656  typedef mat<4, 2, f32, highp> highp_fmat4x2;
+
657  typedef mat<4, 3, f32, highp> highp_fmat4x3;
+
658  typedef mat<4, 4, f32, highp> highp_fmat4x4;
+
659 
+
660  typedef mat<2, 2, f32, defaultp> fmat2x2;
+
661  typedef mat<3, 2, f32, defaultp> fmat3x2;
+
662  typedef mat<4, 2, f32, defaultp> fmat4x2;
+
663  typedef mat<2, 3, f32, defaultp> fmat2x3;
+
664  typedef mat<3, 3, f32, defaultp> fmat3x3;
+
665  typedef mat<4, 3, f32, defaultp> fmat4x3;
+
666  typedef mat<2, 4, f32, defaultp> fmat2x4;
+
667  typedef mat<3, 4, f32, defaultp> fmat3x4;
+
668  typedef mat<4, 4, f32, defaultp> fmat4x4;
+
669 
+
670  typedef mat<2, 2, f32, lowp> lowp_f32mat2x2;
+
671  typedef mat<2, 3, f32, lowp> lowp_f32mat2x3;
+
672  typedef mat<2, 4, f32, lowp> lowp_f32mat2x4;
+
673  typedef mat<3, 2, f32, lowp> lowp_f32mat3x2;
+
674  typedef mat<3, 3, f32, lowp> lowp_f32mat3x3;
+
675  typedef mat<3, 4, f32, lowp> lowp_f32mat3x4;
+
676  typedef mat<4, 2, f32, lowp> lowp_f32mat4x2;
+
677  typedef mat<4, 3, f32, lowp> lowp_f32mat4x3;
+
678  typedef mat<4, 4, f32, lowp> lowp_f32mat4x4;
+
679 
+
680  typedef mat<2, 2, f32, mediump> mediump_f32mat2x2;
+
681  typedef mat<2, 3, f32, mediump> mediump_f32mat2x3;
+
682  typedef mat<2, 4, f32, mediump> mediump_f32mat2x4;
+
683  typedef mat<3, 2, f32, mediump> mediump_f32mat3x2;
+
684  typedef mat<3, 3, f32, mediump> mediump_f32mat3x3;
+
685  typedef mat<3, 4, f32, mediump> mediump_f32mat3x4;
+
686  typedef mat<4, 2, f32, mediump> mediump_f32mat4x2;
+
687  typedef mat<4, 3, f32, mediump> mediump_f32mat4x3;
+
688  typedef mat<4, 4, f32, mediump> mediump_f32mat4x4;
+
689 
+
690  typedef mat<2, 2, f32, highp> highp_f32mat2x2;
+
691  typedef mat<2, 3, f32, highp> highp_f32mat2x3;
+
692  typedef mat<2, 4, f32, highp> highp_f32mat2x4;
+
693  typedef mat<3, 2, f32, highp> highp_f32mat3x2;
+
694  typedef mat<3, 3, f32, highp> highp_f32mat3x3;
+
695  typedef mat<3, 4, f32, highp> highp_f32mat3x4;
+
696  typedef mat<4, 2, f32, highp> highp_f32mat4x2;
+
697  typedef mat<4, 3, f32, highp> highp_f32mat4x3;
+
698  typedef mat<4, 4, f32, highp> highp_f32mat4x4;
+
699 
+
700  typedef mat<2, 2, f32, defaultp> f32mat2x2;
+
701  typedef mat<3, 2, f32, defaultp> f32mat3x2;
+
702  typedef mat<4, 2, f32, defaultp> f32mat4x2;
+
703  typedef mat<2, 3, f32, defaultp> f32mat2x3;
+
704  typedef mat<3, 3, f32, defaultp> f32mat3x3;
+
705  typedef mat<4, 3, f32, defaultp> f32mat4x3;
+
706  typedef mat<2, 4, f32, defaultp> f32mat2x4;
+
707  typedef mat<3, 4, f32, defaultp> f32mat3x4;
+
708  typedef mat<4, 4, f32, defaultp> f32mat4x4;
+
709 
+
710  typedef mat<2, 2, double, lowp> lowp_dmat2x2;
+
711  typedef mat<2, 3, double, lowp> lowp_dmat2x3;
+
712  typedef mat<2, 4, double, lowp> lowp_dmat2x4;
+
713  typedef mat<3, 2, double, lowp> lowp_dmat3x2;
+
714  typedef mat<3, 3, double, lowp> lowp_dmat3x3;
+
715  typedef mat<3, 4, double, lowp> lowp_dmat3x4;
+
716  typedef mat<4, 2, double, lowp> lowp_dmat4x2;
+
717  typedef mat<4, 3, double, lowp> lowp_dmat4x3;
+
718  typedef mat<4, 4, double, lowp> lowp_dmat4x4;
+
719 
+
720  typedef mat<2, 2, double, mediump> mediump_dmat2x2;
+
721  typedef mat<2, 3, double, mediump> mediump_dmat2x3;
+
722  typedef mat<2, 4, double, mediump> mediump_dmat2x4;
+
723  typedef mat<3, 2, double, mediump> mediump_dmat3x2;
+
724  typedef mat<3, 3, double, mediump> mediump_dmat3x3;
+
725  typedef mat<3, 4, double, mediump> mediump_dmat3x4;
+
726  typedef mat<4, 2, double, mediump> mediump_dmat4x2;
+
727  typedef mat<4, 3, double, mediump> mediump_dmat4x3;
+
728  typedef mat<4, 4, double, mediump> mediump_dmat4x4;
+
729 
+
730  typedef mat<2, 2, double, highp> highp_dmat2x2;
+
731  typedef mat<2, 3, double, highp> highp_dmat2x3;
+
732  typedef mat<2, 4, double, highp> highp_dmat2x4;
+
733  typedef mat<3, 2, double, highp> highp_dmat3x2;
+
734  typedef mat<3, 3, double, highp> highp_dmat3x3;
+
735  typedef mat<3, 4, double, highp> highp_dmat3x4;
+
736  typedef mat<4, 2, double, highp> highp_dmat4x2;
+
737  typedef mat<4, 3, double, highp> highp_dmat4x3;
+
738  typedef mat<4, 4, double, highp> highp_dmat4x4;
+
739 
+
740  typedef mat<2, 2, double, defaultp> dmat2x2;
+
741  typedef mat<3, 2, double, defaultp> dmat3x2;
+
742  typedef mat<4, 2, double, defaultp> dmat4x2;
+
743  typedef mat<2, 3, double, defaultp> dmat2x3;
+
744  typedef mat<3, 3, double, defaultp> dmat3x3;
+
745  typedef mat<4, 3, double, defaultp> dmat4x3;
+
746  typedef mat<2, 4, double, defaultp> dmat2x4;
+
747  typedef mat<3, 4, double, defaultp> dmat3x4;
+
748  typedef mat<4, 4, double, defaultp> dmat4x4;
+
749 
+
750  typedef mat<2, 2, f64, lowp> lowp_f64mat2x2;
+
751  typedef mat<2, 3, f64, lowp> lowp_f64mat2x3;
+
752  typedef mat<2, 4, f64, lowp> lowp_f64mat2x4;
+
753  typedef mat<3, 2, f64, lowp> lowp_f64mat3x2;
+
754  typedef mat<3, 3, f64, lowp> lowp_f64mat3x3;
+
755  typedef mat<3, 4, f64, lowp> lowp_f64mat3x4;
+
756  typedef mat<4, 2, f64, lowp> lowp_f64mat4x2;
+
757  typedef mat<4, 3, f64, lowp> lowp_f64mat4x3;
+
758  typedef mat<4, 4, f64, lowp> lowp_f64mat4x4;
+
759 
+
760  typedef mat<2, 2, f64, mediump> mediump_f64mat2x2;
+
761  typedef mat<2, 3, f64, mediump> mediump_f64mat2x3;
+
762  typedef mat<2, 4, f64, mediump> mediump_f64mat2x4;
+
763  typedef mat<3, 2, f64, mediump> mediump_f64mat3x2;
+
764  typedef mat<3, 3, f64, mediump> mediump_f64mat3x3;
+
765  typedef mat<3, 4, f64, mediump> mediump_f64mat3x4;
+
766  typedef mat<4, 2, f64, mediump> mediump_f64mat4x2;
+
767  typedef mat<4, 3, f64, mediump> mediump_f64mat4x3;
+
768  typedef mat<4, 4, f64, mediump> mediump_f64mat4x4;
+
769 
+
770  typedef mat<2, 2, f64, highp> highp_f64mat2x2;
+
771  typedef mat<2, 3, f64, highp> highp_f64mat2x3;
+
772  typedef mat<2, 4, f64, highp> highp_f64mat2x4;
+
773  typedef mat<3, 2, f64, highp> highp_f64mat3x2;
+
774  typedef mat<3, 3, f64, highp> highp_f64mat3x3;
+
775  typedef mat<3, 4, f64, highp> highp_f64mat3x4;
+
776  typedef mat<4, 2, f64, highp> highp_f64mat4x2;
+
777  typedef mat<4, 3, f64, highp> highp_f64mat4x3;
+
778  typedef mat<4, 4, f64, highp> highp_f64mat4x4;
+
779 
+
780  typedef mat<2, 2, f64, defaultp> f64mat2x2;
+
781  typedef mat<3, 2, f64, defaultp> f64mat3x2;
+
782  typedef mat<4, 2, f64, defaultp> f64mat4x2;
+
783  typedef mat<2, 3, f64, defaultp> f64mat2x3;
+
784  typedef mat<3, 3, f64, defaultp> f64mat3x3;
+
785  typedef mat<4, 3, f64, defaultp> f64mat4x3;
+
786  typedef mat<2, 4, f64, defaultp> f64mat2x4;
+
787  typedef mat<3, 4, f64, defaultp> f64mat3x4;
+
788  typedef mat<4, 4, f64, defaultp> f64mat4x4;
+
789 
+
790  // Quaternion
+
791 
+
792  typedef qua<float, lowp> lowp_quat;
+
793  typedef qua<float, mediump> mediump_quat;
+
794  typedef qua<float, highp> highp_quat;
+
795  typedef qua<float, defaultp> quat;
+
796 
+
797  typedef qua<float, lowp> lowp_fquat;
+
798  typedef qua<float, mediump> mediump_fquat;
+
799  typedef qua<float, highp> highp_fquat;
+
800  typedef qua<float, defaultp> fquat;
+
801 
+
802  typedef qua<f32, lowp> lowp_f32quat;
+
803  typedef qua<f32, mediump> mediump_f32quat;
+
804  typedef qua<f32, highp> highp_f32quat;
+
805  typedef qua<f32, defaultp> f32quat;
+
806 
+
807  typedef qua<double, lowp> lowp_dquat;
+
808  typedef qua<double, mediump> mediump_dquat;
+
809  typedef qua<double, highp> highp_dquat;
+
810  typedef qua<double, defaultp> dquat;
+
811 
+
812  typedef qua<f64, lowp> lowp_f64quat;
+
813  typedef qua<f64, mediump> mediump_f64quat;
+
814  typedef qua<f64, highp> highp_f64quat;
+
815  typedef qua<f64, defaultp> f64quat;
+
816 }//namespace glm
+
817 
+
818 
+
vec< 1, u16, highp > highp_u16vec1
High qualifier 16 bit unsigned integer scalar type.
Definition: fwd.hpp:354
+
mat< 4, 2, float, mediump > mediump_mat4x2
4 columns of 2 components matrix of single-precision floating-point numbers using medium precision ar...
+
mat< 4, 2, f32, highp > highp_f32mat4x2
High single-qualifier floating-point 4x2 matrix.
Definition: fwd.hpp:696
+
mat< 4, 3, float, highp > highp_mat4x3
4 columns of 3 components matrix of single-precision floating-point numbers using high precision arit...
+
mat< 4, 4, float, defaultp > mat4x4
4 columns of 4 components matrix of single-precision floating-point numbers.
+
vec< 4, unsigned int, mediump > mediump_uvec4
4 components vector of medium qualifier unsigned integer numbers.
+
uint64 highp_u64
High qualifier 64 bit unsigned integer type.
Definition: fwd.hpp:133
+
vec< 1, f64, mediump > mediump_f64vec1
Medium double-qualifier floating-point vector of 1 component.
Definition: fwd.hpp:491
+
vec< 3, f32, defaultp > f32vec3
Single-qualifier floating-point vector of 3 components.
Definition: fwd.hpp:463
+
mat< 2, 2, f32, mediump > mediump_fmat2
Medium single-qualifier floating-point 1x1 matrix.
Definition: fwd.hpp:528
+
double highp_float64_t
High 64 bit double-qualifier floating-point scalar.
Definition: fwd.hpp:175
+
mat< 4, 4, f64, defaultp > f64mat4
Double-qualifier floating-point 4x4 matrix.
Definition: fwd.hpp:586
+
vec< 1, int, mediump > mediump_ivec1
1 component vector of signed integer values.
+
vec< 4, double, mediump > mediump_dvec4
4 components vector of medium double-qualifier floating-point numbers.
+
vec< 3, float, highp > highp_vec3
3 components vector of high single-qualifier floating-point numbers.
+
mat< 4, 2, double, lowp > lowp_dmat4x2
4 columns of 2 components matrix of double-precision floating-point numbers using low precision arith...
+
mat< 2, 2, float, defaultp > mat2x2
2 columns of 2 components matrix of single-precision floating-point numbers.
+
mat< 2, 2, f64, defaultp > f64mat2
Double-qualifier floating-point 1x1 matrix.
Definition: fwd.hpp:584
+
mat< 4, 3, f32, mediump > mediump_fmat4x3
Medium single-qualifier floating-point 4x3 matrix.
Definition: fwd.hpp:647
+
mat< 3, 3, f32, mediump > mediump_f32mat3
Medium single-qualifier floating-point 3x3 matrix.
Definition: fwd.hpp:545
+
uint32 mediump_uint32_t
Medium qualifier 32 bit unsigned integer type.
Definition: fwd.hpp:127
+
uint64 lowp_uint64
Low qualifier 64 bit unsigned integer type.
Definition: fwd.hpp:136
+
mat< 3, 3, float, mediump > mediump_mat3x3
3 columns of 3 components matrix of single-precision floating-point numbers using medium precision ar...
+
mat< 2, 2, f32, mediump > mediump_fmat2x2
Medium single-qualifier floating-point 1x1 matrix.
Definition: fwd.hpp:640
+
vec< 1, f32, defaultp > f32vec1
Single-qualifier floating-point vector of 1 component.
Definition: fwd.hpp:461
+
mat< 4, 4, f32, highp > highp_f32mat4
High single-qualifier floating-point 4x4 matrix.
Definition: fwd.hpp:550
+
qua< float, highp > highp_quat
Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs...
+
double highp_float64
High 64 bit double-qualifier floating-point scalar.
Definition: fwd.hpp:170
+
mat< 3, 2, double, mediump > mediump_dmat3x2
3 columns of 2 components matrix of double-precision floating-point numbers using medium precision ar...
+
uint8 lowp_u8
Low qualifier 8 bit unsigned integer type.
Definition: fwd.hpp:89
+
mat< 3, 2, double, lowp > lowp_dmat3x2
3 columns of 2 components matrix of double-precision floating-point numbers using low precision arith...
+
uint32 u32
Default qualifier 32 bit unsigned integer type.
Definition: fwd.hpp:120
+
mat< 3, 3, f64, defaultp > f64mat3
Double-qualifier floating-point 3x3 matrix.
Definition: fwd.hpp:585
+
vec< 2, int, highp > highp_ivec2
2 components vector of high qualifier signed integer numbers.
+
mat< 4, 3, double, highp > highp_dmat4x3
4 columns of 3 components matrix of double-precision floating-point numbers using medium precision ar...
+
mat< 2, 3, float, mediump > mediump_mat2x3
2 columns of 3 components matrix of single-precision floating-point numbers using medium precision ar...
+
double lowp_float64
Low 64 bit double-qualifier floating-point scalar.
Definition: fwd.hpp:168
+
vec< 1, i32, defaultp > i32vec1
32 bit signed integer scalar type.
Definition: fwd.hpp:277
+
uint16 highp_uint16
High qualifier 16 bit unsigned integer type.
Definition: fwd.hpp:110
+
mat< 2, 4, f64, mediump > mediump_f64mat2x4
Medium double-qualifier floating-point 2x4 matrix.
Definition: fwd.hpp:762
+
vec< 4, i64, highp > highp_i64vec4
High qualifier 64 bit signed integer vector of 4 components type.
Definition: fwd.hpp:295
+
mat< 4, 4, double, mediump > mediump_dmat4x4
4 columns of 4 components matrix of double-precision floating-point numbers using medium precision ar...
+
mat< 3, 4, f64, defaultp > f64mat3x4
Double-qualifier floating-point 3x4 matrix.
Definition: fwd.hpp:787
+
vec< 4, double, highp > highp_dvec4
4 components vector of high double-qualifier floating-point numbers.
+
mat< 2, 2, f32, defaultp > fmat2
Single-qualifier floating-point 1x1 matrix.
Definition: fwd.hpp:536
+
mat< 3, 4, double, lowp > lowp_dmat3x4
3 columns of 4 components matrix of double-precision floating-point numbers using low precision arith...
+
vec< 3, i16, defaultp > i16vec3
16 bit signed integer vector of 3 components type.
Definition: fwd.hpp:259
+
uint32 lowp_uint32_t
Low qualifier 32 bit unsigned integer type.
Definition: fwd.hpp:126
+
vec< 2, float, lowp > lowp_fvec2
Low single-qualifier floating-point vector of 2 components.
Definition: fwd.hpp:427
+
uint32 mediump_uint32
Medium qualifier 32 bit unsigned integer type.
Definition: fwd.hpp:123
+
mat< 4, 4, f32, mediump > mediump_fmat4
Medium single-qualifier floating-point 4x4 matrix.
Definition: fwd.hpp:530
+
uint64 highp_uint64
High qualifier 64 bit unsigned integer type.
Definition: fwd.hpp:138
+
mat< 2, 2, f32, lowp > lowp_fmat2
Low single-qualifier floating-point 1x1 matrix.
Definition: fwd.hpp:524
+
uint32 lowp_uint32
Low qualifier 32 bit unsigned integer type.
Definition: fwd.hpp:122
+
vec< 3, float, lowp > lowp_fvec3
Low single-qualifier floating-point vector of 3 components.
Definition: fwd.hpp:428
+
vec< 2, float, mediump > mediump_fvec2
Medium Single-qualifier floating-point vector of 2 components.
Definition: fwd.hpp:432
+
mat< 2, 3, float, highp > highp_mat2x3
2 columns of 3 components matrix of single-precision floating-point numbers using high precision arit...
+
mat< 3, 4, f32, lowp > lowp_fmat3x4
Low single-qualifier floating-point 3x4 matrix.
Definition: fwd.hpp:635
+
vec< 2, float, defaultp > vec2
2 components vector of single-precision floating-point numbers.
+
mat< 2, 2, f64, lowp > lowp_f64mat2x2
Low double-qualifier floating-point 1x1 matrix.
Definition: fwd.hpp:750
+
vec< 4, i64, defaultp > i64vec4
64 bit signed integer vector of 4 components type.
Definition: fwd.hpp:300
+
vec< 3, u16, defaultp > u16vec3
Default qualifier 16 bit unsigned integer vector of 3 components type.
Definition: fwd.hpp:361
+
vec< 1, u64, lowp > lowp_u64vec1
Low qualifier 64 bit unsigned integer scalar type.
Definition: fwd.hpp:384
+
mat< 2, 2, double, mediump > mediump_dmat2
2 columns of 2 components matrix of double-precision floating-point numbers using medium precision ar...
+
vec< 1, u16, mediump > mediump_u16vec1
Medium qualifier 16 bit unsigned integer scalar type.
Definition: fwd.hpp:349
+
vec< 2, float, highp > highp_vec2
2 components vector of high single-qualifier floating-point numbers.
+
vec< 2, i8, defaultp > i8vec2
8 bit signed integer vector of 2 components type.
Definition: fwd.hpp:238
+
mat< 2, 3, f64, mediump > mediump_f64mat2x3
Medium double-qualifier floating-point 2x3 matrix.
Definition: fwd.hpp:761
+
vec< 4, u32, lowp > lowp_u32vec4
Low qualifier 32 bit unsigned integer vector of 4 components type.
Definition: fwd.hpp:367
+
vec< 4, f32, highp > highp_f32vec4
High single-qualifier floating-point vector of 4 components.
Definition: fwd.hpp:459
+
vec< 3, unsigned int, defaultp > uvec3
3 components vector of unsigned integer numbers.
+
vec< 1, f32, lowp > lowp_f32vec1
Low single-qualifier floating-point vector of 1 component.
Definition: fwd.hpp:446
+
mat< 2, 3, f32, highp > highp_f32mat2x3
High single-qualifier floating-point 2x3 matrix.
Definition: fwd.hpp:691
+
int64 highp_int64
High qualifier 64 bit signed integer type.
Definition: fwd.hpp:80
+
vec< 2, i32, mediump > mediump_i32vec2
Medium qualifier 32 bit signed integer vector of 2 components type.
Definition: fwd.hpp:268
+
vec< 1, double, lowp > lowp_dvec1
1 component vector of double-precision floating-point numbers using low precision arithmetic in term ...
+
mat< 4, 4, f64, lowp > lowp_f64mat4
Low double-qualifier floating-point 4x4 matrix.
Definition: fwd.hpp:574
+
mat< 4, 4, f32, defaultp > fmat4
Single-qualifier floating-point 4x4 matrix.
Definition: fwd.hpp:538
+
mat< 3, 4, f32, mediump > mediump_fmat3x4
Medium single-qualifier floating-point 3x4 matrix.
Definition: fwd.hpp:645
+
mat< 3, 3, double, lowp > lowp_dmat3
3 columns of 3 components matrix of double-precision floating-point numbers using low precision arith...
+
int16 lowp_int16_t
Low qualifier 16 bit signed integer type.
Definition: fwd.hpp:54
+
vec< 4, i32, highp > highp_i32vec4
High qualifier 32 bit signed integer vector of 4 components type.
Definition: fwd.hpp:275
+
mat< 4, 2, f32, defaultp > f32mat4x2
Single-qualifier floating-point 4x2 matrix.
Definition: fwd.hpp:702
+
mat< 3, 2, f32, highp > highp_fmat3x2
High single-qualifier floating-point 3x2 matrix.
Definition: fwd.hpp:653
+
mat< 2, 4, float, defaultp > mat2x4
2 columns of 4 components matrix of single-precision floating-point numbers.
+
mat< 2, 3, f32, mediump > mediump_fmat2x3
Medium single-qualifier floating-point 2x3 matrix.
Definition: fwd.hpp:641
+
uint32 mediump_u32
Medium qualifier 32 bit unsigned integer type.
Definition: fwd.hpp:118
+
mat< 3, 2, f32, lowp > lowp_fmat3x2
Low single-qualifier floating-point 3x2 matrix.
Definition: fwd.hpp:633
+
mat< 2, 3, float, lowp > lowp_mat2x3
2 columns of 3 components matrix of single-precision floating-point numbers using low precision arith...
+
mat< 2, 2, float, lowp > lowp_mat2
2 columns of 2 components matrix of single-precision floating-point numbers using low precision arith...
+
mat< 4, 2, f64, mediump > mediump_f64mat4x2
Medium double-qualifier floating-point 4x2 matrix.
Definition: fwd.hpp:766
+
vec< 4, bool, lowp > lowp_bvec4
4 components vector of low qualifier bool numbers.
+
vec< 2, u16, highp > highp_u16vec2
High qualifier 16 bit unsigned integer vector of 2 components type.
Definition: fwd.hpp:355
+
vec< 1, f64, highp > highp_f64vec1
High double-qualifier floating-point vector of 1 component.
Definition: fwd.hpp:496
+
vec< 3, int, defaultp > ivec3
3 components vector of signed integer numbers.
Definition: vector_int3.hpp:15
+
vec< 2, i16, mediump > mediump_i16vec2
Medium qualifier 16 bit signed integer vector of 2 components type.
Definition: fwd.hpp:248
+
mat< 2, 4, f32, highp > highp_fmat2x4
High single-qualifier floating-point 2x4 matrix.
Definition: fwd.hpp:652
+
vec< 3, u64, defaultp > u64vec3
Default qualifier 64 bit unsigned integer vector of 3 components type.
Definition: fwd.hpp:401
+
uint8 lowp_uint8
Low qualifier 8 bit unsigned integer type.
Definition: fwd.hpp:94
+
mat< 3, 2, f32, lowp > lowp_f32mat3x2
Low single-qualifier floating-point 3x2 matrix.
Definition: fwd.hpp:673
+
vec< 4, bool, mediump > mediump_bvec4
4 components vector of medium qualifier bool numbers.
+
mat< 3, 2, float, defaultp > mat3x2
3 columns of 2 components matrix of single-precision floating-point numbers.
+
uint64 lowp_u64
Low qualifier 64 bit unsigned integer type.
Definition: fwd.hpp:131
+
vec< 1, unsigned int, mediump > mediump_uvec1
1 component vector of unsigned integer values.
+
vec< 3, i64, highp > highp_i64vec3
High qualifier 64 bit signed integer vector of 3 components type.
Definition: fwd.hpp:294
+
int8 mediump_int8
Medium qualifier 8 bit signed integer type.
Definition: fwd.hpp:37
+
+ + + + + +0.9.9 API documentation: geometric.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
geometric.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL vec< 3, T, Q > cross (vec< 3, T, Q > const &x, vec< 3, T, Q > const &y)
 Returns the cross product of x and y. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL T distance (vec< L, T, Q > const &p0, vec< L, T, Q > const &p1)
 Returns the distance betwwen p0 and p1, i.e., length(p0 - p1). More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL T dot (vec< L, T, Q > const &x, vec< L, T, Q > const &y)
 Returns the dot product of x and y, i.e., result = x * y. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > faceforward (vec< L, T, Q > const &N, vec< L, T, Q > const &I, vec< L, T, Q > const &Nref)
 If dot(Nref, I) < 0.0, return N, otherwise, return -N. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL T length (vec< L, T, Q > const &x)
 Returns the length of x, i.e., sqrt(x * x). More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > normalize (vec< L, T, Q > const &x)
 Returns a vector in the same direction as x but with length of 1. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > reflect (vec< L, T, Q > const &I, vec< L, T, Q > const &N)
 For the incident vector I and surface orientation N, returns the reflection direction : result = I - 2.0 * dot(N, I) * N. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > refract (vec< L, T, Q > const &I, vec< L, T, Q > const &N, T eta)
 For the incident vector I and surface normal N, and the ratio of indices of refraction eta, return the refraction vector. More...
 
+

Detailed Description

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00036_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00036_source.html new file mode 100644 index 0000000000000000000000000000000000000000..2115bb4cb723ca7d0d88e4727c0471ae712da69f --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00036_source.html @@ -0,0 +1,152 @@ + + + + + + +0.9.9 API documentation: geometric.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
geometric.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 #include "detail/type_vec3.hpp"
+
16 
+
17 namespace glm
+
18 {
+
21 
+
29  template<length_t L, typename T, qualifier Q>
+
30  GLM_FUNC_DECL T length(vec<L, T, Q> const& x);
+
31 
+
39  template<length_t L, typename T, qualifier Q>
+
40  GLM_FUNC_DECL T distance(vec<L, T, Q> const& p0, vec<L, T, Q> const& p1);
+
41 
+
49  template<length_t L, typename T, qualifier Q>
+
50  GLM_FUNC_DECL T dot(vec<L, T, Q> const& x, vec<L, T, Q> const& y);
+
51 
+
58  template<typename T, qualifier Q>
+
59  GLM_FUNC_DECL vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y);
+
60 
+
69  template<length_t L, typename T, qualifier Q>
+
70  GLM_FUNC_DECL vec<L, T, Q> normalize(vec<L, T, Q> const& x);
+
71 
+
79  template<length_t L, typename T, qualifier Q>
+
80  GLM_FUNC_DECL vec<L, T, Q> faceforward(
+
81  vec<L, T, Q> const& N,
+
82  vec<L, T, Q> const& I,
+
83  vec<L, T, Q> const& Nref);
+
84 
+
93  template<length_t L, typename T, qualifier Q>
+
94  GLM_FUNC_DECL vec<L, T, Q> reflect(
+
95  vec<L, T, Q> const& I,
+
96  vec<L, T, Q> const& N);
+
97 
+
107  template<length_t L, typename T, qualifier Q>
+
108  GLM_FUNC_DECL vec<L, T, Q> refract(
+
109  vec<L, T, Q> const& I,
+
110  vec<L, T, Q> const& N,
+
111  T eta);
+
112 
+
114 }//namespace glm
+
115 
+
116 #include "detail/func_geometric.inl"
+
GLM_FUNC_DECL vec< L, T, Q > reflect(vec< L, T, Q > const &I, vec< L, T, Q > const &N)
For the incident vector I and surface orientation N, returns the reflection direction : result = I - ...
+
GLM_FUNC_DECL vec< L, T, Q > faceforward(vec< L, T, Q > const &N, vec< L, T, Q > const &I, vec< L, T, Q > const &Nref)
If dot(Nref, I) < 0.0, return N, otherwise, return -N.
+
GLM_FUNC_DECL T length(vec< L, T, Q > const &x)
Returns the length of x, i.e., sqrt(x * x).
+
GLM_FUNC_DECL vec< 3, T, Q > cross(vec< 3, T, Q > const &x, vec< 3, T, Q > const &y)
Returns the cross product of x and y.
+
GLM_FUNC_DECL vec< L, T, Q > refract(vec< L, T, Q > const &I, vec< L, T, Q > const &N, T eta)
For the incident vector I and surface normal N, and the ratio of indices of refraction eta...
+
GLM_FUNC_DECL vec< L, T, Q > normalize(vec< L, T, Q > const &x)
Returns a vector in the same direction as x but with length of 1.
+
Core features
+
GLM_FUNC_DECL T distance(vec< L, T, Q > const &p0, vec< L, T, Q > const &p1)
Returns the distance betwwen p0 and p1, i.e., length(p0 - p1).
+
GLM_FUNC_DECL T dot(vec< L, T, Q > const &x, vec< L, T, Q > const &y)
Returns the dot product of x and y, i.e., result = x * y.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00037.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00037.html new file mode 100644 index 0000000000000000000000000000000000000000..b1a7039b3d7a547a165011db8ce96f5d7d9558f6 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00037.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: glm.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
glm.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file glm.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00037_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00037_source.html new file mode 100644 index 0000000000000000000000000000000000000000..775648f23b594cbc76f9cba1ae776186cd7ec374 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00037_source.html @@ -0,0 +1,154 @@ + + + + + + +0.9.9 API documentation: glm.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
glm.hpp
+
+
+Go to the documentation of this file.
1 
+
103 #include "detail/_fixes.hpp"
+
104 
+
105 #include "detail/setup.hpp"
+
106 
+
107 #pragma once
+
108 
+
109 #include <cmath>
+
110 #include <climits>
+
111 #include <cfloat>
+
112 #include <limits>
+
113 #include <cassert>
+
114 #include "fwd.hpp"
+
115 
+
116 #include "vec2.hpp"
+
117 #include "vec3.hpp"
+
118 #include "vec4.hpp"
+
119 #include "mat2x2.hpp"
+
120 #include "mat2x3.hpp"
+
121 #include "mat2x4.hpp"
+
122 #include "mat3x2.hpp"
+
123 #include "mat3x3.hpp"
+
124 #include "mat3x4.hpp"
+
125 #include "mat4x2.hpp"
+
126 #include "mat4x3.hpp"
+
127 #include "mat4x4.hpp"
+
128 
+
129 #include "trigonometric.hpp"
+
130 #include "exponential.hpp"
+
131 #include "common.hpp"
+
132 #include "packing.hpp"
+
133 #include "geometric.hpp"
+
134 #include "matrix.hpp"
+
135 #include "vector_relational.hpp"
+
136 #include "integer.hpp"
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
Core features
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00038.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00038.html new file mode 100644 index 0000000000000000000000000000000000000000..9854848ca92827c9998447d7797963e5ad207155 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00038.html @@ -0,0 +1,125 @@ + + + + + + +0.9.9 API documentation: gradient_paint.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
gradient_paint.hpp File Reference
+
+
+ +

GLM_GTX_gradient_paint +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL T linearGradient (vec< 2, T, Q > const &Point0, vec< 2, T, Q > const &Point1, vec< 2, T, Q > const &Position)
 Return a color from a linear gradient. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL T radialGradient (vec< 2, T, Q > const &Center, T const &Radius, vec< 2, T, Q > const &Focal, vec< 2, T, Q > const &Position)
 Return a color from a radial gradient. More...
 
+

Detailed Description

+

GLM_GTX_gradient_paint

+
See also
Core features (dependence)
+
+GLM_GTX_optimum_pow (dependence)
+ +

Definition in file gradient_paint.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00038_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00038_source.html new file mode 100644 index 0000000000000000000000000000000000000000..0e82da1461f3a17a6fc707d9beba977d35b02473 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00038_source.html @@ -0,0 +1,136 @@ + + + + + + +0.9.9 API documentation: gradient_paint.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
gradient_paint.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependency:
+
17 #include "../glm.hpp"
+
18 #include "../gtx/optimum_pow.hpp"
+
19 
+
20 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
21 # ifndef GLM_ENABLE_EXPERIMENTAL
+
22 # pragma message("GLM: GLM_GTX_gradient_paint is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
23 # else
+
24 # pragma message("GLM: GLM_GTX_gradient_paint extension included")
+
25 # endif
+
26 #endif
+
27 
+
28 namespace glm
+
29 {
+
32 
+
35  template<typename T, qualifier Q>
+
36  GLM_FUNC_DECL T radialGradient(
+
37  vec<2, T, Q> const& Center,
+
38  T const& Radius,
+
39  vec<2, T, Q> const& Focal,
+
40  vec<2, T, Q> const& Position);
+
41 
+
44  template<typename T, qualifier Q>
+
45  GLM_FUNC_DECL T linearGradient(
+
46  vec<2, T, Q> const& Point0,
+
47  vec<2, T, Q> const& Point1,
+
48  vec<2, T, Q> const& Position);
+
49 
+
51 }// namespace glm
+
52 
+
53 #include "gradient_paint.inl"
+
GLM_FUNC_DECL T radialGradient(vec< 2, T, Q > const &Center, T const &Radius, vec< 2, T, Q > const &Focal, vec< 2, T, Q > const &Position)
Return a color from a radial gradient.
+
GLM_FUNC_DECL T linearGradient(vec< 2, T, Q > const &Point0, vec< 2, T, Q > const &Point1, vec< 2, T, Q > const &Position)
Return a color from a linear gradient.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00039.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00039.html new file mode 100644 index 0000000000000000000000000000000000000000..9959600bcb66861c44a9ddbe8f04b4ffea4c6c6c --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00039.html @@ -0,0 +1,123 @@ + + + + + + +0.9.9 API documentation: handed_coordinate_space.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
handed_coordinate_space.hpp File Reference
+
+
+ +

GLM_GTX_handed_coordinate_space +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + +

+Functions

template<typename T , qualifier Q>
GLM_FUNC_DECL bool leftHanded (vec< 3, T, Q > const &tangent, vec< 3, T, Q > const &binormal, vec< 3, T, Q > const &normal)
 Return if a trihedron left handed or not. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL bool rightHanded (vec< 3, T, Q > const &tangent, vec< 3, T, Q > const &binormal, vec< 3, T, Q > const &normal)
 Return if a trihedron right handed or not. More...
 
+

Detailed Description

+

GLM_GTX_handed_coordinate_space

+
See also
Core features (dependence)
+ +

Definition in file handed_coordinate_space.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00039_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00039_source.html new file mode 100644 index 0000000000000000000000000000000000000000..aaf7013fa4f1e17462ac3e4d5d724ed3c268706f --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00039_source.html @@ -0,0 +1,134 @@ + + + + + + +0.9.9 API documentation: handed_coordinate_space.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
handed_coordinate_space.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_handed_coordinate_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_handed_coordinate_space extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename T, qualifier Q>
+
34  GLM_FUNC_DECL bool rightHanded(
+
35  vec<3, T, Q> const& tangent,
+
36  vec<3, T, Q> const& binormal,
+
37  vec<3, T, Q> const& normal);
+
38 
+
41  template<typename T, qualifier Q>
+
42  GLM_FUNC_DECL bool leftHanded(
+
43  vec<3, T, Q> const& tangent,
+
44  vec<3, T, Q> const& binormal,
+
45  vec<3, T, Q> const& normal);
+
46 
+
48 }// namespace glm
+
49 
+
50 #include "handed_coordinate_space.inl"
+
GLM_FUNC_DECL bool leftHanded(vec< 3, T, Q > const &tangent, vec< 3, T, Q > const &binormal, vec< 3, T, Q > const &normal)
Return if a trihedron left handed or not.
+
GLM_FUNC_DECL bool rightHanded(vec< 3, T, Q > const &tangent, vec< 3, T, Q > const &binormal, vec< 3, T, Q > const &normal)
Return if a trihedron right handed or not.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00040.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00040.html new file mode 100644 index 0000000000000000000000000000000000000000..ba2c95e1cf21add15fec92986e6ef244b13f4be0 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00040.html @@ -0,0 +1,109 @@ + + + + + + +0.9.9 API documentation: hash.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
hash.hpp File Reference
+
+
+ +

GLM_GTX_hash +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

GLM_GTX_hash

+
See also
Core features (dependence)
+ +

Definition in file hash.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00040_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00040_source.html new file mode 100644 index 0000000000000000000000000000000000000000..14bcfe6ad009f2db74de37f7789c7772c8777ac2 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00040_source.html @@ -0,0 +1,232 @@ + + + + + + +0.9.9 API documentation: hash.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
hash.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
16 # ifndef GLM_ENABLE_EXPERIMENTAL
+
17 # pragma message("GLM: GLM_GTX_hash is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
18 # else
+
19 # pragma message("GLM: GLM_GTX_hash extension included")
+
20 # endif
+
21 #endif
+
22 
+
23 #include <functional>
+
24 
+
25 #include "../vec2.hpp"
+
26 #include "../vec3.hpp"
+
27 #include "../vec4.hpp"
+
28 #include "../gtc/vec1.hpp"
+
29 
+
30 #include "../gtc/quaternion.hpp"
+
31 #include "../gtx/dual_quaternion.hpp"
+
32 
+
33 #include "../mat2x2.hpp"
+
34 #include "../mat2x3.hpp"
+
35 #include "../mat2x4.hpp"
+
36 
+
37 #include "../mat3x2.hpp"
+
38 #include "../mat3x3.hpp"
+
39 #include "../mat3x4.hpp"
+
40 
+
41 #include "../mat4x2.hpp"
+
42 #include "../mat4x3.hpp"
+
43 #include "../mat4x4.hpp"
+
44 
+
45 #if !GLM_HAS_CXX11_STL
+
46 # error "GLM_GTX_hash requires C++11 standard library support"
+
47 #endif
+
48 
+
49 namespace std
+
50 {
+
51  template<typename T, glm::qualifier Q>
+
52  struct hash<glm::vec<1, T,Q> >
+
53  {
+
54  GLM_FUNC_DECL size_t operator()(glm::vec<1, T, Q> const& v) const;
+
55  };
+
56 
+
57  template<typename T, glm::qualifier Q>
+
58  struct hash<glm::vec<2, T,Q> >
+
59  {
+
60  GLM_FUNC_DECL size_t operator()(glm::vec<2, T, Q> const& v) const;
+
61  };
+
62 
+
63  template<typename T, glm::qualifier Q>
+
64  struct hash<glm::vec<3, T,Q> >
+
65  {
+
66  GLM_FUNC_DECL size_t operator()(glm::vec<3, T, Q> const& v) const;
+
67  };
+
68 
+
69  template<typename T, glm::qualifier Q>
+
70  struct hash<glm::vec<4, T,Q> >
+
71  {
+
72  GLM_FUNC_DECL size_t operator()(glm::vec<4, T, Q> const& v) const;
+
73  };
+
74 
+
75  template<typename T, glm::qualifier Q>
+
76  struct hash<glm::qua<T,Q>>
+
77  {
+
78  GLM_FUNC_DECL size_t operator()(glm::qua<T, Q> const& q) const;
+
79  };
+
80 
+
81  template<typename T, glm::qualifier Q>
+
82  struct hash<glm::tdualquat<T,Q> >
+
83  {
+
84  GLM_FUNC_DECL size_t operator()(glm::tdualquat<T,Q> const& q) const;
+
85  };
+
86 
+
87  template<typename T, glm::qualifier Q>
+
88  struct hash<glm::mat<2, 2, T,Q> >
+
89  {
+
90  GLM_FUNC_DECL size_t operator()(glm::mat<2, 2, T,Q> const& m) const;
+
91  };
+
92 
+
93  template<typename T, glm::qualifier Q>
+
94  struct hash<glm::mat<2, 3, T,Q> >
+
95  {
+
96  GLM_FUNC_DECL size_t operator()(glm::mat<2, 3, T,Q> const& m) const;
+
97  };
+
98 
+
99  template<typename T, glm::qualifier Q>
+
100  struct hash<glm::mat<2, 4, T,Q> >
+
101  {
+
102  GLM_FUNC_DECL size_t operator()(glm::mat<2, 4, T,Q> const& m) const;
+
103  };
+
104 
+
105  template<typename T, glm::qualifier Q>
+
106  struct hash<glm::mat<3, 2, T,Q> >
+
107  {
+
108  GLM_FUNC_DECL size_t operator()(glm::mat<3, 2, T,Q> const& m) const;
+
109  };
+
110 
+
111  template<typename T, glm::qualifier Q>
+
112  struct hash<glm::mat<3, 3, T,Q> >
+
113  {
+
114  GLM_FUNC_DECL size_t operator()(glm::mat<3, 3, T,Q> const& m) const;
+
115  };
+
116 
+
117  template<typename T, glm::qualifier Q>
+
118  struct hash<glm::mat<3, 4, T,Q> >
+
119  {
+
120  GLM_FUNC_DECL size_t operator()(glm::mat<3, 4, T,Q> const& m) const;
+
121  };
+
122 
+
123  template<typename T, glm::qualifier Q>
+
124  struct hash<glm::mat<4, 2, T,Q> >
+
125  {
+
126  GLM_FUNC_DECL size_t operator()(glm::mat<4, 2, T,Q> const& m) const;
+
127  };
+
128 
+
129  template<typename T, glm::qualifier Q>
+
130  struct hash<glm::mat<4, 3, T,Q> >
+
131  {
+
132  GLM_FUNC_DECL size_t operator()(glm::mat<4, 3, T,Q> const& m) const;
+
133  };
+
134 
+
135  template<typename T, glm::qualifier Q>
+
136  struct hash<glm::mat<4, 4, T,Q> >
+
137  {
+
138  GLM_FUNC_DECL size_t operator()(glm::mat<4, 4, T,Q> const& m) const;
+
139  };
+
140 } // namespace std
+
141 
+
142 #include "hash.inl"
+
Definition: hash.hpp:49
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00041.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00041.html new file mode 100644 index 0000000000000000000000000000000000000000..2996ba259a3b3096471084a07cd2c4c07bb20ad6 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00041.html @@ -0,0 +1,129 @@ + + + + + + +0.9.9 API documentation: integer.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
gtc/integer.hpp File Reference
+
+
+ +

GLM_GTC_integer +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + +

+Functions

template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, int, Q > iround (vec< L, T, Q > const &x)
 Returns a value equal to the nearest integer to x. More...
 
template<typename genIUType >
GLM_FUNC_DECL genIUType log2 (genIUType x)
 Returns the log2 of x for integer values. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, uint, Q > uround (vec< L, T, Q > const &x)
 Returns a value equal to the nearest integer to x. More...
 
+

Detailed Description

+

GLM_GTC_integer

+
See also
Core features (dependence)
+
+GLM_GTC_integer (dependence)
+ +

Definition in file gtc/integer.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00041_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00041_source.html new file mode 100644 index 0000000000000000000000000000000000000000..ac897205de20b155c61b21d0650212b8fd6ed91c --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00041_source.html @@ -0,0 +1,133 @@ + + + + + + +0.9.9 API documentation: integer.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
gtc/integer.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependencies
+
17 #include "../detail/setup.hpp"
+
18 #include "../detail/qualifier.hpp"
+
19 #include "../common.hpp"
+
20 #include "../integer.hpp"
+
21 #include "../exponential.hpp"
+
22 #include <limits>
+
23 
+
24 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
25 # pragma message("GLM: GLM_GTC_integer extension included")
+
26 #endif
+
27 
+
28 namespace glm
+
29 {
+
32 
+
35  template<typename genIUType>
+
36  GLM_FUNC_DECL genIUType log2(genIUType x);
+
37 
+
47  template<length_t L, typename T, qualifier Q>
+
48  GLM_FUNC_DECL vec<L, int, Q> iround(vec<L, T, Q> const& x);
+
49 
+
59  template<length_t L, typename T, qualifier Q>
+
60  GLM_FUNC_DECL vec<L, uint, Q> uround(vec<L, T, Q> const& x);
+
61 
+
63 } //namespace glm
+
64 
+
65 #include "integer.inl"
+
GLM_FUNC_DECL vec< L, uint, Q > uround(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer to x.
+
GLM_FUNC_DECL genIUType log2(genIUType x)
Returns the log2 of x for integer values.
+
GLM_FUNC_DECL vec< L, int, Q > iround(vec< L, T, Q > const &x)
Returns a value equal to the nearest integer to x.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00042.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00042.html new file mode 100644 index 0000000000000000000000000000000000000000..8779f988b907ce23cc71adc6fd63488708e3c45d --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00042.html @@ -0,0 +1,150 @@ + + + + + + +0.9.9 API documentation: integer.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
gtx/integer.hpp File Reference
+
+
+ +

GLM_GTX_integer +More...

+ +

Go to the source code of this file.

+ + + + + +

+Typedefs

typedef signed int sint
 32bit signed integer. More...
 
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL genType factorial (genType const &x)
 Return the factorial value of a number (!12 max, integer only) From GLM_GTX_integer extension. More...
 
GLM_FUNC_DECL unsigned int floor_log2 (unsigned int x)
 Returns the floor log2 of x. More...
 
GLM_FUNC_DECL int mod (int x, int y)
 Modulus. More...
 
GLM_FUNC_DECL uint mod (uint x, uint y)
 Modulus. More...
 
GLM_FUNC_DECL uint nlz (uint x)
 Returns the number of leading zeros. More...
 
GLM_FUNC_DECL int pow (int x, uint y)
 Returns x raised to the y power. More...
 
GLM_FUNC_DECL uint pow (uint x, uint y)
 Returns x raised to the y power. More...
 
GLM_FUNC_DECL int sqrt (int x)
 Returns the positive square root of x. More...
 
GLM_FUNC_DECL uint sqrt (uint x)
 Returns the positive square root of x. More...
 
+

Detailed Description

+

GLM_GTX_integer

+
See also
Core features (dependence)
+ +

Definition in file gtx/integer.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00042_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00042_source.html new file mode 100644 index 0000000000000000000000000000000000000000..9093e881b5ba2fb0c55da976270e4e19020e6927 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00042_source.html @@ -0,0 +1,149 @@ + + + + + + +0.9.9 API documentation: integer.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
gtx/integer.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 #include "../gtc/integer.hpp"
+
18 
+
19 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
20 # ifndef GLM_ENABLE_EXPERIMENTAL
+
21 # pragma message("GLM: GLM_GTX_integer is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
22 # else
+
23 # pragma message("GLM: GLM_GTX_integer extension included")
+
24 # endif
+
25 #endif
+
26 
+
27 namespace glm
+
28 {
+
31 
+
34  GLM_FUNC_DECL int pow(int x, uint y);
+
35 
+
38  GLM_FUNC_DECL int sqrt(int x);
+
39 
+
42  GLM_FUNC_DECL unsigned int floor_log2(unsigned int x);
+
43 
+
46  GLM_FUNC_DECL int mod(int x, int y);
+
47 
+
50  template<typename genType>
+
51  GLM_FUNC_DECL genType factorial(genType const& x);
+
52 
+
55  typedef signed int sint;
+
56 
+
59  GLM_FUNC_DECL uint pow(uint x, uint y);
+
60 
+
63  GLM_FUNC_DECL uint sqrt(uint x);
+
64 
+
67  GLM_FUNC_DECL uint mod(uint x, uint y);
+
68 
+
71  GLM_FUNC_DECL uint nlz(uint x);
+
72 
+
74 }//namespace glm
+
75 
+
76 #include "integer.inl"
+
GLM_FUNC_DECL uint nlz(uint x)
Returns the number of leading zeros.
+
GLM_FUNC_DECL uint mod(uint x, uint y)
Modulus.
+
GLM_FUNC_DECL unsigned int floor_log2(unsigned int x)
Returns the floor log2 of x.
+
signed int sint
32bit signed integer.
Definition: gtx/integer.hpp:55
+
GLM_FUNC_DECL genType factorial(genType const &x)
Return the factorial value of a number (!12 max, integer only) From GLM_GTX_integer extension...
+
GLM_FUNC_DECL uint pow(uint x, uint y)
Returns x raised to the y power.
+
GLM_FUNC_DECL uint sqrt(uint x)
Returns the positive square root of x.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00043.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00043.html new file mode 100644 index 0000000000000000000000000000000000000000..02da2db62f2d612290d728822f856dff71cfeb9a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00043.html @@ -0,0 +1,167 @@ + + + + + + +0.9.9 API documentation: integer.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
integer.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL int bitCount (genType v)
 Returns the number of bits set to 1 in the binary representation of value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, int, Q > bitCount (vec< L, T, Q > const &v)
 Returns the number of bits set to 1 in the binary representation of value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > bitfieldExtract (vec< L, T, Q > const &Value, int Offset, int Bits)
 Extracts bits [offset, offset + bits - 1] from value, returning them in the least significant bits of the result. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > bitfieldInsert (vec< L, T, Q > const &Base, vec< L, T, Q > const &Insert, int Offset, int Bits)
 Returns the insertion the bits least-significant bits of insert into base. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > bitfieldReverse (vec< L, T, Q > const &v)
 Returns the reversal of the bits of value. More...
 
template<typename genIUType >
GLM_FUNC_DECL int findLSB (genIUType x)
 Returns the bit number of the least significant bit set to 1 in the binary representation of value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, int, Q > findLSB (vec< L, T, Q > const &v)
 Returns the bit number of the least significant bit set to 1 in the binary representation of value. More...
 
template<typename genIUType >
GLM_FUNC_DECL int findMSB (genIUType x)
 Returns the bit number of the most significant bit in the binary representation of value. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, int, Q > findMSB (vec< L, T, Q > const &v)
 Returns the bit number of the most significant bit in the binary representation of value. More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL void imulExtended (vec< L, int, Q > const &x, vec< L, int, Q > const &y, vec< L, int, Q > &msb, vec< L, int, Q > &lsb)
 Multiplies 32-bit integers x and y, producing a 64-bit result. More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL vec< L, uint, Q > uaddCarry (vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &carry)
 Adds 32-bit unsigned integer x and y, returning the sum modulo pow(2, 32). More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL void umulExtended (vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &msb, vec< L, uint, Q > &lsb)
 Multiplies 32-bit integers x and y, producing a 64-bit result. More...
 
template<length_t L, qualifier Q>
GLM_FUNC_DECL vec< L, uint, Q > usubBorrow (vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &borrow)
 Subtracts the 32-bit unsigned integer y from x, returning the difference if non-negative, or pow(2, 32) plus the difference otherwise. More...
 
+

Detailed Description

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00043_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00043_source.html new file mode 100644 index 0000000000000000000000000000000000000000..675e0f0c82f2afcffc7bc225dc2ba176e87a5446 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00043_source.html @@ -0,0 +1,185 @@ + + + + + + +0.9.9 API documentation: integer.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
integer.hpp
+
+
+Go to the documentation of this file.
1 
+
17 #pragma once
+
18 
+
19 #include "detail/qualifier.hpp"
+
20 #include "common.hpp"
+
21 #include "vector_relational.hpp"
+
22 
+
23 namespace glm
+
24 {
+
27 
+
36  template<length_t L, qualifier Q>
+
37  GLM_FUNC_DECL vec<L, uint, Q> uaddCarry(
+
38  vec<L, uint, Q> const& x,
+
39  vec<L, uint, Q> const& y,
+
40  vec<L, uint, Q> & carry);
+
41 
+
50  template<length_t L, qualifier Q>
+
51  GLM_FUNC_DECL vec<L, uint, Q> usubBorrow(
+
52  vec<L, uint, Q> const& x,
+
53  vec<L, uint, Q> const& y,
+
54  vec<L, uint, Q> & borrow);
+
55 
+
64  template<length_t L, qualifier Q>
+
65  GLM_FUNC_DECL void umulExtended(
+
66  vec<L, uint, Q> const& x,
+
67  vec<L, uint, Q> const& y,
+
68  vec<L, uint, Q> & msb,
+
69  vec<L, uint, Q> & lsb);
+
70 
+
79  template<length_t L, qualifier Q>
+
80  GLM_FUNC_DECL void imulExtended(
+
81  vec<L, int, Q> const& x,
+
82  vec<L, int, Q> const& y,
+
83  vec<L, int, Q> & msb,
+
84  vec<L, int, Q> & lsb);
+
85 
+
102  template<length_t L, typename T, qualifier Q>
+
103  GLM_FUNC_DECL vec<L, T, Q> bitfieldExtract(
+
104  vec<L, T, Q> const& Value,
+
105  int Offset,
+
106  int Bits);
+
107 
+
123  template<length_t L, typename T, qualifier Q>
+
124  GLM_FUNC_DECL vec<L, T, Q> bitfieldInsert(
+
125  vec<L, T, Q> const& Base,
+
126  vec<L, T, Q> const& Insert,
+
127  int Offset,
+
128  int Bits);
+
129 
+
139  template<length_t L, typename T, qualifier Q>
+
140  GLM_FUNC_DECL vec<L, T, Q> bitfieldReverse(vec<L, T, Q> const& v);
+
141 
+
148  template<typename genType>
+
149  GLM_FUNC_DECL int bitCount(genType v);
+
150 
+
158  template<length_t L, typename T, qualifier Q>
+
159  GLM_FUNC_DECL vec<L, int, Q> bitCount(vec<L, T, Q> const& v);
+
160 
+
169  template<typename genIUType>
+
170  GLM_FUNC_DECL int findLSB(genIUType x);
+
171 
+
181  template<length_t L, typename T, qualifier Q>
+
182  GLM_FUNC_DECL vec<L, int, Q> findLSB(vec<L, T, Q> const& v);
+
183 
+
193  template<typename genIUType>
+
194  GLM_FUNC_DECL int findMSB(genIUType x);
+
195 
+
206  template<length_t L, typename T, qualifier Q>
+
207  GLM_FUNC_DECL vec<L, int, Q> findMSB(vec<L, T, Q> const& v);
+
208 
+
210 }//namespace glm
+
211 
+
212 #include "detail/func_integer.inl"
+
Core features
+
GLM_FUNC_DECL vec< L, int, Q > findMSB(vec< L, T, Q > const &v)
Returns the bit number of the most significant bit in the binary representation of value...
+
GLM_FUNC_DECL void umulExtended(vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &msb, vec< L, uint, Q > &lsb)
Multiplies 32-bit integers x and y, producing a 64-bit result.
+
GLM_FUNC_DECL void imulExtended(vec< L, int, Q > const &x, vec< L, int, Q > const &y, vec< L, int, Q > &msb, vec< L, int, Q > &lsb)
Multiplies 32-bit integers x and y, producing a 64-bit result.
+
GLM_FUNC_DECL vec< L, int, Q > bitCount(vec< L, T, Q > const &v)
Returns the number of bits set to 1 in the binary representation of value.
+
GLM_FUNC_DECL vec< L, uint, Q > uaddCarry(vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &carry)
Adds 32-bit unsigned integer x and y, returning the sum modulo pow(2, 32).
+
GLM_FUNC_DECL vec< L, T, Q > bitfieldExtract(vec< L, T, Q > const &Value, int Offset, int Bits)
Extracts bits [offset, offset + bits - 1] from value, returning them in the least significant bits of...
+
GLM_FUNC_DECL vec< L, T, Q > bitfieldInsert(vec< L, T, Q > const &Base, vec< L, T, Q > const &Insert, int Offset, int Bits)
Returns the insertion the bits least-significant bits of insert into base.
+
Core features
+
GLM_FUNC_DECL vec< L, T, Q > bitfieldReverse(vec< L, T, Q > const &v)
Returns the reversal of the bits of value.
+
GLM_FUNC_DECL vec< L, uint, Q > usubBorrow(vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &borrow)
Subtracts the 32-bit unsigned integer y from x, returning the difference if non-negative, or pow(2, 32) plus the difference otherwise.
+
GLM_FUNC_DECL vec< L, int, Q > findLSB(vec< L, T, Q > const &v)
Returns the bit number of the least significant bit set to 1 in the binary representation of value...
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00044.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00044.html new file mode 100644 index 0000000000000000000000000000000000000000..86892f43a0edd90421afd3e492920c8311a881d1 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00044.html @@ -0,0 +1,141 @@ + + + + + + +0.9.9 API documentation: intersect.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
intersect.hpp File Reference
+
+
+ +

GLM_GTX_intersect +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL bool intersectLineSphere (genType const &point0, genType const &point1, genType const &sphereCenter, typename genType::value_type sphereRadius, genType &intersectionPosition1, genType &intersectionNormal1, genType &intersectionPosition2=genType(), genType &intersectionNormal2=genType())
 Compute the intersection of a line and a sphere. More...
 
template<typename genType >
GLM_FUNC_DECL bool intersectLineTriangle (genType const &orig, genType const &dir, genType const &vert0, genType const &vert1, genType const &vert2, genType &position)
 Compute the intersection of a line and a triangle. More...
 
template<typename genType >
GLM_FUNC_DECL bool intersectRayPlane (genType const &orig, genType const &dir, genType const &planeOrig, genType const &planeNormal, typename genType::value_type &intersectionDistance)
 Compute the intersection of a ray and a plane. More...
 
template<typename genType >
GLM_FUNC_DECL bool intersectRaySphere (genType const &rayStarting, genType const &rayNormalizedDirection, genType const &sphereCenter, typename genType::value_type const sphereRadiusSquared, typename genType::value_type &intersectionDistance)
 Compute the intersection distance of a ray and a sphere. More...
 
template<typename genType >
GLM_FUNC_DECL bool intersectRaySphere (genType const &rayStarting, genType const &rayNormalizedDirection, genType const &sphereCenter, const typename genType::value_type sphereRadius, genType &intersectionPosition, genType &intersectionNormal)
 Compute the intersection of a ray and a sphere. More...
 
template<typename T , qualifier Q>
GLM_FUNC_DECL bool intersectRayTriangle (vec< 3, T, Q > const &orig, vec< 3, T, Q > const &dir, vec< 3, T, Q > const &v0, vec< 3, T, Q > const &v1, vec< 3, T, Q > const &v2, vec< 2, T, Q > &baryPosition, T &distance)
 Compute the intersection of a ray and a triangle. More...
 
+

Detailed Description

+

GLM_GTX_intersect

+
See also
Core features (dependence)
+
+GLM_GTX_closest_point (dependence)
+ +

Definition in file intersect.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00044_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00044_source.html new file mode 100644 index 0000000000000000000000000000000000000000..f11348274faa882f630932affe22989c3485ebd6 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00044_source.html @@ -0,0 +1,168 @@ + + + + + + +0.9.9 API documentation: intersect.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
intersect.hpp
+
+
+Go to the documentation of this file.
1 
+
14 #pragma once
+
15 
+
16 // Dependency:
+
17 #include <cfloat>
+
18 #include <limits>
+
19 #include "../glm.hpp"
+
20 #include "../geometric.hpp"
+
21 #include "../gtx/closest_point.hpp"
+
22 #include "../gtx/vector_query.hpp"
+
23 
+
24 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
25 # ifndef GLM_ENABLE_EXPERIMENTAL
+
26 # pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
27 # else
+
28 # pragma message("GLM: GLM_GTX_closest_point extension included")
+
29 # endif
+
30 #endif
+
31 
+
32 namespace glm
+
33 {
+
36 
+
40  template<typename genType>
+
41  GLM_FUNC_DECL bool intersectRayPlane(
+
42  genType const& orig, genType const& dir,
+
43  genType const& planeOrig, genType const& planeNormal,
+
44  typename genType::value_type & intersectionDistance);
+
45 
+
49  template<typename T, qualifier Q>
+
50  GLM_FUNC_DECL bool intersectRayTriangle(
+
51  vec<3, T, Q> const& orig, vec<3, T, Q> const& dir,
+
52  vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, vec<3, T, Q> const& v2,
+
53  vec<2, T, Q>& baryPosition, T& distance);
+
54 
+
57  template<typename genType>
+
58  GLM_FUNC_DECL bool intersectLineTriangle(
+
59  genType const& orig, genType const& dir,
+
60  genType const& vert0, genType const& vert1, genType const& vert2,
+
61  genType & position);
+
62 
+
66  template<typename genType>
+
67  GLM_FUNC_DECL bool intersectRaySphere(
+
68  genType const& rayStarting, genType const& rayNormalizedDirection,
+
69  genType const& sphereCenter, typename genType::value_type const sphereRadiusSquared,
+
70  typename genType::value_type & intersectionDistance);
+
71 
+
74  template<typename genType>
+
75  GLM_FUNC_DECL bool intersectRaySphere(
+
76  genType const& rayStarting, genType const& rayNormalizedDirection,
+
77  genType const& sphereCenter, const typename genType::value_type sphereRadius,
+
78  genType & intersectionPosition, genType & intersectionNormal);
+
79 
+
82  template<typename genType>
+
83  GLM_FUNC_DECL bool intersectLineSphere(
+
84  genType const& point0, genType const& point1,
+
85  genType const& sphereCenter, typename genType::value_type sphereRadius,
+
86  genType & intersectionPosition1, genType & intersectionNormal1,
+
87  genType & intersectionPosition2 = genType(), genType & intersectionNormal2 = genType());
+
88 
+
90 }//namespace glm
+
91 
+
92 #include "intersect.inl"
+
GLM_FUNC_DECL bool intersectRayTriangle(vec< 3, T, Q > const &orig, vec< 3, T, Q > const &dir, vec< 3, T, Q > const &v0, vec< 3, T, Q > const &v1, vec< 3, T, Q > const &v2, vec< 2, T, Q > &baryPosition, T &distance)
Compute the intersection of a ray and a triangle.
+
GLM_FUNC_DECL bool intersectRaySphere(genType const &rayStarting, genType const &rayNormalizedDirection, genType const &sphereCenter, const typename genType::value_type sphereRadius, genType &intersectionPosition, genType &intersectionNormal)
Compute the intersection of a ray and a sphere.
+
GLM_FUNC_DECL bool intersectRayPlane(genType const &orig, genType const &dir, genType const &planeOrig, genType const &planeNormal, typename genType::value_type &intersectionDistance)
Compute the intersection of a ray and a plane.
+
GLM_FUNC_DECL bool intersectLineTriangle(genType const &orig, genType const &dir, genType const &vert0, genType const &vert1, genType const &vert2, genType &position)
Compute the intersection of a line and a triangle.
+
GLM_FUNC_DECL bool intersectLineSphere(genType const &point0, genType const &point1, genType const &sphereCenter, typename genType::value_type sphereRadius, genType &intersectionPosition1, genType &intersectionNormal1, genType &intersectionPosition2=genType(), genType &intersectionNormal2=genType())
Compute the intersection of a line and a sphere.
+
GLM_FUNC_DECL T distance(vec< L, T, Q > const &p0, vec< L, T, Q > const &p1)
Returns the distance betwwen p0 and p1, i.e., length(p0 - p1).
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00045.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00045.html new file mode 100644 index 0000000000000000000000000000000000000000..a0bd7057f5f3114a63c7625aadb70853b0fcaec0 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00045.html @@ -0,0 +1,114 @@ + + + + + + +0.9.9 API documentation: io.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
io.hpp File Reference
+
+
+ +

GLM_GTX_io +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

GLM_GTX_io

+
Author
Jan P Springer (regni.nosp@m.rpsj.nosp@m.@gmai.nosp@m.l.co.nosp@m.m)
+
See also
Core features (dependence)
+
+GLM_GTC_matrix_access (dependence)
+
+GLM_GTC_quaternion (dependence)
+ +

Definition in file io.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00045_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00045_source.html new file mode 100644 index 0000000000000000000000000000000000000000..93b422871f012fedb3d448fa67ebd0757400e544 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00045_source.html @@ -0,0 +1,280 @@ + + + + + + +0.9.9 API documentation: io.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
io.hpp
+
+
+Go to the documentation of this file.
1 
+
20 #pragma once
+
21 
+
22 // Dependency:
+
23 #include "../glm.hpp"
+
24 #include "../gtx/quaternion.hpp"
+
25 
+
26 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
27 # ifndef GLM_ENABLE_EXPERIMENTAL
+
28 # pragma message("GLM: GLM_GTX_io is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
29 # else
+
30 # pragma message("GLM: GLM_GTX_io extension included")
+
31 # endif
+
32 #endif
+
33 
+
34 #include <iosfwd> // std::basic_ostream<> (fwd)
+
35 #include <locale> // std::locale, std::locale::facet, std::locale::id
+
36 #include <utility> // std::pair<>
+
37 
+
38 namespace glm
+
39 {
+
42 
+
43  namespace io
+
44  {
+
45  enum order_type { column_major, row_major};
+
46 
+
47  template<typename CTy>
+
48  class format_punct : public std::locale::facet
+
49  {
+
50  typedef CTy char_type;
+
51 
+
52  public:
+
53 
+
54  static std::locale::id id;
+
55 
+
56  bool formatted;
+
57  unsigned precision;
+
58  unsigned width;
+
59  char_type separator;
+
60  char_type delim_left;
+
61  char_type delim_right;
+
62  char_type space;
+
63  char_type newline;
+
64  order_type order;
+
65 
+
66  GLM_FUNC_DECL explicit format_punct(size_t a = 0);
+
67  GLM_FUNC_DECL explicit format_punct(format_punct const&);
+
68  };
+
69 
+
70  template<typename CTy, typename CTr = std::char_traits<CTy> >
+
71  class basic_state_saver {
+
72 
+
73  public:
+
74 
+
75  GLM_FUNC_DECL explicit basic_state_saver(std::basic_ios<CTy,CTr>&);
+
76  GLM_FUNC_DECL ~basic_state_saver();
+
77 
+
78  private:
+
79 
+
80  typedef ::std::basic_ios<CTy,CTr> state_type;
+
81  typedef typename state_type::char_type char_type;
+
82  typedef ::std::ios_base::fmtflags flags_type;
+
83  typedef ::std::streamsize streamsize_type;
+
84  typedef ::std::locale const locale_type;
+
85 
+
86  state_type& state_;
+
87  flags_type flags_;
+
88  streamsize_type precision_;
+
89  streamsize_type width_;
+
90  char_type fill_;
+
91  locale_type locale_;
+
92 
+
93  GLM_FUNC_DECL basic_state_saver& operator=(basic_state_saver const&);
+
94  };
+
95 
+
96  typedef basic_state_saver<char> state_saver;
+
97  typedef basic_state_saver<wchar_t> wstate_saver;
+
98 
+
99  template<typename CTy, typename CTr = std::char_traits<CTy> >
+
100  class basic_format_saver
+
101  {
+
102  public:
+
103 
+
104  GLM_FUNC_DECL explicit basic_format_saver(std::basic_ios<CTy,CTr>&);
+
105  GLM_FUNC_DECL ~basic_format_saver();
+
106 
+
107  private:
+
108 
+
109  basic_state_saver<CTy> const bss_;
+
110 
+
111  GLM_FUNC_DECL basic_format_saver& operator=(basic_format_saver const&);
+
112  };
+
113 
+
114  typedef basic_format_saver<char> format_saver;
+
115  typedef basic_format_saver<wchar_t> wformat_saver;
+
116 
+
117  struct precision
+
118  {
+
119  unsigned value;
+
120 
+
121  GLM_FUNC_DECL explicit precision(unsigned);
+
122  };
+
123 
+
124  struct width
+
125  {
+
126  unsigned value;
+
127 
+
128  GLM_FUNC_DECL explicit width(unsigned);
+
129  };
+
130 
+
131  template<typename CTy>
+
132  struct delimeter
+
133  {
+
134  CTy value[3];
+
135 
+
136  GLM_FUNC_DECL explicit delimeter(CTy /* left */, CTy /* right */, CTy /* separator */ = ',');
+
137  };
+
138 
+
139  struct order
+
140  {
+
141  order_type value;
+
142 
+
143  GLM_FUNC_DECL explicit order(order_type);
+
144  };
+
145 
+
146  // functions, inlined (inline)
+
147 
+
148  template<typename FTy, typename CTy, typename CTr>
+
149  FTy const& get_facet(std::basic_ios<CTy,CTr>&);
+
150  template<typename FTy, typename CTy, typename CTr>
+
151  std::basic_ios<CTy,CTr>& formatted(std::basic_ios<CTy,CTr>&);
+
152  template<typename FTy, typename CTy, typename CTr>
+
153  std::basic_ios<CTy,CTr>& unformattet(std::basic_ios<CTy,CTr>&);
+
154 
+
155  template<typename CTy, typename CTr>
+
156  std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, precision const&);
+
157  template<typename CTy, typename CTr>
+
158  std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, width const&);
+
159  template<typename CTy, typename CTr>
+
160  std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, delimeter<CTy> const&);
+
161  template<typename CTy, typename CTr>
+
162  std::basic_ostream<CTy, CTr>& operator<<(std::basic_ostream<CTy, CTr>&, order const&);
+
163  }//namespace io
+
164 
+
165  template<typename CTy, typename CTr, typename T, qualifier Q>
+
166  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, qua<T, Q> const&);
+
167  template<typename CTy, typename CTr, typename T, qualifier Q>
+
168  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<1, T, Q> const&);
+
169  template<typename CTy, typename CTr, typename T, qualifier Q>
+
170  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<2, T, Q> const&);
+
171  template<typename CTy, typename CTr, typename T, qualifier Q>
+
172  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<3, T, Q> const&);
+
173  template<typename CTy, typename CTr, typename T, qualifier Q>
+
174  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, vec<4, T, Q> const&);
+
175  template<typename CTy, typename CTr, typename T, qualifier Q>
+
176  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<2, 2, T, Q> const&);
+
177  template<typename CTy, typename CTr, typename T, qualifier Q>
+
178  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<2, 3, T, Q> const&);
+
179  template<typename CTy, typename CTr, typename T, qualifier Q>
+
180  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<2, 4, T, Q> const&);
+
181  template<typename CTy, typename CTr, typename T, qualifier Q>
+
182  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<3, 2, T, Q> const&);
+
183  template<typename CTy, typename CTr, typename T, qualifier Q>
+
184  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<3, 3, T, Q> const&);
+
185  template<typename CTy, typename CTr, typename T, qualifier Q>
+
186  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<3, 4, T, Q> const&);
+
187  template<typename CTy, typename CTr, typename T, qualifier Q>
+
188  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<4, 2, T, Q> const&);
+
189  template<typename CTy, typename CTr, typename T, qualifier Q>
+
190  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<4, 3, T, Q> const&);
+
191  template<typename CTy, typename CTr, typename T, qualifier Q>
+
192  GLM_FUNC_DECL std::basic_ostream<CTy,CTr>& operator<<(std::basic_ostream<CTy,CTr>&, mat<4, 4, T, Q> const&);
+
193 
+
194  template<typename CTy, typename CTr, typename T, qualifier Q>
+
195  GLM_FUNC_DECL std::basic_ostream<CTy,CTr> & operator<<(std::basic_ostream<CTy,CTr> &,
+
196  std::pair<mat<4, 4, T, Q> const, mat<4, 4, T, Q> const> const&);
+
197 
+
199 }//namespace glm
+
200 
+
201 #include "io.inl"
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00046.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00046.html new file mode 100644 index 0000000000000000000000000000000000000000..1f92ed357d70573df8b2e210c13f8ff05923170a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00046.html @@ -0,0 +1,123 @@ + + + + + + +0.9.9 API documentation: log_base.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+ +
+
log_base.hpp File Reference
+
+
+ +

GLM_GTX_log_base +More...

+ +

Go to the source code of this file.

+ + + + + + + + + + +

+Functions

template<typename genType >
GLM_FUNC_DECL genType log (genType const &x, genType const &base)
 Logarithm for any base. More...
 
template<length_t L, typename T , qualifier Q>
GLM_FUNC_DECL vec< L, T, Q > sign (vec< L, T, Q > const &x, vec< L, T, Q > const &base)
 Logarithm for any base. More...
 
+

Detailed Description

+

GLM_GTX_log_base

+
See also
Core features (dependence)
+ +

Definition in file log_base.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00046_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00046_source.html new file mode 100644 index 0000000000000000000000000000000000000000..4e8dc6dbe6997f6c9bcddf67cb3a8bdfc242923d --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00046_source.html @@ -0,0 +1,132 @@ + + + + + + +0.9.9 API documentation: log_base.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
log_base.hpp
+
+
+Go to the documentation of this file.
1 
+
13 #pragma once
+
14 
+
15 // Dependency:
+
16 #include "../glm.hpp"
+
17 
+
18 #if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED)
+
19 # ifndef GLM_ENABLE_EXPERIMENTAL
+
20 # pragma message("GLM: GLM_GTX_log_base is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.")
+
21 # else
+
22 # pragma message("GLM: GLM_GTX_log_base extension included")
+
23 # endif
+
24 #endif
+
25 
+
26 namespace glm
+
27 {
+
30 
+
33  template<typename genType>
+
34  GLM_FUNC_DECL genType log(
+
35  genType const& x,
+
36  genType const& base);
+
37 
+
40  template<length_t L, typename T, qualifier Q>
+
41  GLM_FUNC_DECL vec<L, T, Q> sign(
+
42  vec<L, T, Q> const& x,
+
43  vec<L, T, Q> const& base);
+
44 
+
46 }//namespace glm
+
47 
+
48 #include "log_base.inl"
+
GLM_FUNC_DECL vec< L, T, Q > sign(vec< L, T, Q > const &x, vec< L, T, Q > const &base)
Logarithm for any base.
+
GLM_FUNC_DECL genType log(genType const &x, genType const &base)
Logarithm for any base.
+
Definition: common.hpp:20
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00047_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00047_source.html new file mode 100644 index 0000000000000000000000000000000000000000..13ae4919fa773656cf29e7703c38207fb8cb3d32 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00047_source.html @@ -0,0 +1,1764 @@ + + + + + + +0.9.9 API documentation: man.doxy Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
man.doxy
+
+
+
1 # Doxyfile 1.8.10
+
2 
+
3 # This file describes the settings to be used by the documentation system
+
4 # doxygen (www.doxygen.org) for a project.
+
5 #
+
6 # All text after a double hash (##) is considered a comment and is placed in
+
7 # front of the TAG it is preceding.
+
8 #
+
9 # All text after a single hash (#) is considered a comment and will be ignored.
+
10 # The format is:
+
11 # TAG = value [value, ...]
+
12 # For lists, items can also be appended using:
+
13 # TAG += value [value, ...]
+
14 # Values that contain spaces should be placed between quotes (\" \").
+
15 
+
16 #---------------------------------------------------------------------------
+
17 # Project related configuration options
+
18 #---------------------------------------------------------------------------
+
19 
+
20 # This tag specifies the encoding used for all characters in the config file
+
21 # that follow. The default is UTF-8 which is also the encoding used for all text
+
22 # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+
23 # built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+
24 # for the list of possible encodings.
+
25 # The default value is: UTF-8.
+
26 
+
27 DOXYFILE_ENCODING = UTF-8
+
28 
+
29 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+
30 # double-quotes, unless you are using Doxywizard) that should identify the
+
31 # project for which the documentation is generated. This name is used in the
+
32 # title of most generated pages and in a few other places.
+
33 # The default value is: My Project.
+
34 
+
35 PROJECT_NAME = "0.9.9 API documentation"
+
36 
+
37 # The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+
38 # could be handy for archiving the generated documentation or if some version
+
39 # control system is used.
+
40 
+
41 PROJECT_NUMBER =
+
42 
+
43 # Using the PROJECT_BRIEF tag one can provide an optional one line description
+
44 # for a project that appears at the top of each page and should give viewer a
+
45 # quick idea about the purpose of the project. Keep the description short.
+
46 
+
47 PROJECT_BRIEF =
+
48 
+
49 # With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+
50 # in the documentation. The maximum height of the logo should not exceed 55
+
51 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+
52 # the logo to the output directory.
+
53 
+
54 PROJECT_LOGO = theme/logo-mini.png
+
55 
+
56 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+
57 # into which the generated documentation will be written. If a relative path is
+
58 # entered, it will be relative to the location where doxygen was started. If
+
59 # left blank the current directory will be used.
+
60 
+
61 OUTPUT_DIRECTORY = .
+
62 
+
63 # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+
64 # directories (in 2 levels) under the output directory of each output format and
+
65 # will distribute the generated files over these directories. Enabling this
+
66 # option can be useful when feeding doxygen a huge amount of source files, where
+
67 # putting all generated files in the same directory would otherwise causes
+
68 # performance problems for the file system.
+
69 # The default value is: NO.
+
70 
+
71 CREATE_SUBDIRS = NO
+
72 
+
73 # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+
74 # characters to appear in the names of generated files. If set to NO, non-ASCII
+
75 # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+
76 # U+3044.
+
77 # The default value is: NO.
+
78 
+
79 ALLOW_UNICODE_NAMES = NO
+
80 
+
81 # The OUTPUT_LANGUAGE tag is used to specify the language in which all
+
82 # documentation generated by doxygen is written. Doxygen will use this
+
83 # information to generate all constant output in the proper language.
+
84 # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+
85 # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+
86 # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+
87 # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+
88 # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+
89 # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+
90 # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+
91 # Ukrainian and Vietnamese.
+
92 # The default value is: English.
+
93 
+
94 OUTPUT_LANGUAGE = English
+
95 
+
96 # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+
97 # descriptions after the members that are listed in the file and class
+
98 # documentation (similar to Javadoc). Set to NO to disable this.
+
99 # The default value is: YES.
+
100 
+
101 BRIEF_MEMBER_DESC = YES
+
102 
+
103 # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+
104 # description of a member or function before the detailed description
+
105 #
+
106 # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+
107 # brief descriptions will be completely suppressed.
+
108 # The default value is: YES.
+
109 
+
110 REPEAT_BRIEF = YES
+
111 
+
112 # This tag implements a quasi-intelligent brief description abbreviator that is
+
113 # used to form the text in various listings. Each string in this list, if found
+
114 # as the leading text of the brief description, will be stripped from the text
+
115 # and the result, after processing the whole list, is used as the annotated
+
116 # text. Otherwise, the brief description is used as-is. If left blank, the
+
117 # following values are used ($name is automatically replaced with the name of
+
118 # the entity):The $name class, The $name widget, The $name file, is, provides,
+
119 # specifies, contains, represents, a, an and the.
+
120 
+
121 ABBREVIATE_BRIEF = "The $name class " \
+
122  "The $name widget " \
+
123  "The $name file " \
+
124  is \
+
125  provides \
+
126  specifies \
+
127  contains \
+
128  represents \
+
129  a \
+
130  an \
+
131  the
+
132 
+
133 # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+
134 # doxygen will generate a detailed section even if there is only a brief
+
135 # description.
+
136 # The default value is: NO.
+
137 
+
138 ALWAYS_DETAILED_SEC = NO
+
139 
+
140 # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+
141 # inherited members of a class in the documentation of that class as if those
+
142 # members were ordinary class members. Constructors, destructors and assignment
+
143 # operators of the base classes will not be shown.
+
144 # The default value is: NO.
+
145 
+
146 INLINE_INHERITED_MEMB = NO
+
147 
+
148 # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+
149 # before files name in the file list and in the header files. If set to NO the
+
150 # shortest path that makes the file name unique will be used
+
151 # The default value is: YES.
+
152 
+
153 FULL_PATH_NAMES = NO
+
154 
+
155 # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+
156 # Stripping is only done if one of the specified strings matches the left-hand
+
157 # part of the path. The tag can be used to show relative paths in the file list.
+
158 # If left blank the directory from which doxygen is run is used as the path to
+
159 # strip.
+
160 #
+
161 # Note that you can specify absolute paths here, but also relative paths, which
+
162 # will be relative from the directory where doxygen is started.
+
163 # This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
164 
+
165 STRIP_FROM_PATH = "C:/Documents and Settings/Groove/ "
+
166 
+
167 # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+
168 # path mentioned in the documentation of a class, which tells the reader which
+
169 # header file to include in order to use a class. If left blank only the name of
+
170 # the header file containing the class definition is used. Otherwise one should
+
171 # specify the list of include paths that are normally passed to the compiler
+
172 # using the -I flag.
+
173 
+
174 STRIP_FROM_INC_PATH =
+
175 
+
176 # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+
177 # less readable) file names. This can be useful is your file systems doesn't
+
178 # support long names like on DOS, Mac, or CD-ROM.
+
179 # The default value is: NO.
+
180 
+
181 SHORT_NAMES = YES
+
182 
+
183 # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+
184 # first line (until the first dot) of a Javadoc-style comment as the brief
+
185 # description. If set to NO, the Javadoc-style will behave just like regular Qt-
+
186 # style comments (thus requiring an explicit @brief command for a brief
+
187 # description.)
+
188 # The default value is: NO.
+
189 
+
190 JAVADOC_AUTOBRIEF = YES
+
191 
+
192 # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+
193 # line (until the first dot) of a Qt-style comment as the brief description. If
+
194 # set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+
195 # requiring an explicit \brief command for a brief description.)
+
196 # The default value is: NO.
+
197 
+
198 QT_AUTOBRIEF = NO
+
199 
+
200 # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+
201 # multi-line C++ special comment block (i.e. a block of
+
202 # a brief description. This used to be the default behavior. The new default is
+
203 # to treat a multi-line C++ comment block as a detailed description. Set this
+
204 # tag to YES if you prefer the old behavior instead.
+
205 #
+
206 # Note that setting this tag to YES also means that rational rose comments are
+
207 # not recognized any more.
+
208 # The default value is: NO.
+
209 
+
210 MULTILINE_CPP_IS_BRIEF = NO
+
211 
+
212 # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+
213 # documentation from any documented member that it re-implements.
+
214 # The default value is: YES.
+
215 
+
216 INHERIT_DOCS = YES
+
217 
+
218 # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+
219 # page for each member. If set to NO, the documentation of a member will be part
+
220 # of the file/class/namespace that contains it.
+
221 # The default value is: NO.
+
222 
+
223 SEPARATE_MEMBER_PAGES = NO
+
224 
+
225 # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+
226 # uses this value to replace tabs by spaces in code fragments.
+
227 # Minimum value: 1, maximum value: 16, default value: 4.
+
228 
+
229 TAB_SIZE = 8
+
230 
+
231 # This tag can be used to specify a number of aliases that act as commands in
+
232 # the documentation. An alias has the form:
+
233 # name=value
+
234 # For example adding
+
235 # "sideeffect=@par Side Effects:\n"
+
236 # will allow you to put the command \sideeffect (or @sideeffect) in the
+
237 # documentation, which will result in a user-defined paragraph with heading
+
238 # "Side Effects:". You can put \n's in the value part of an alias to insert
+
239 # newlines.
+
240 
+
241 ALIASES =
+
242 
+
243 # This tag can be used to specify a number of word-keyword mappings (TCL only).
+
244 # A mapping has the form "name=value". For example adding "class=itcl::class"
+
245 # will allow you to use the command class in the itcl::class meaning.
+
246 
+
247 TCL_SUBST =
+
248 
+
249 # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+
250 # only. Doxygen will then generate output that is more tailored for C. For
+
251 # instance, some of the names that are used will be different. The list of all
+
252 # members will be omitted, etc.
+
253 # The default value is: NO.
+
254 
+
255 OPTIMIZE_OUTPUT_FOR_C = NO
+
256 
+
257 # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+
258 # Python sources only. Doxygen will then generate output that is more tailored
+
259 # for that language. For instance, namespaces will be presented as packages,
+
260 # qualified scopes will look different, etc.
+
261 # The default value is: NO.
+
262 
+
263 OPTIMIZE_OUTPUT_JAVA = NO
+
264 
+
265 # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+
266 # sources. Doxygen will then generate output that is tailored for Fortran.
+
267 # The default value is: NO.
+
268 
+
269 OPTIMIZE_FOR_FORTRAN = NO
+
270 
+
271 # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+
272 # sources. Doxygen will then generate output that is tailored for VHDL.
+
273 # The default value is: NO.
+
274 
+
275 OPTIMIZE_OUTPUT_VHDL = NO
+
276 
+
277 # Doxygen selects the parser to use depending on the extension of the files it
+
278 # parses. With this tag you can assign which parser to use for a given
+
279 # extension. Doxygen has a built-in mapping, but you can override or extend it
+
280 # using this tag. The format is ext=language, where ext is a file extension, and
+
281 # language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+
282 # C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+
283 # FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+
284 # Fortran. In the later case the parser tries to guess whether the code is fixed
+
285 # or free formatted code, this is the default for Fortran type files), VHDL. For
+
286 # instance to make doxygen treat .inc files as Fortran files (default is PHP),
+
287 # and .f files as C (default is Fortran), use: inc=Fortran f=C.
+
288 #
+
289 # Note: For files without extension you can use no_extension as a placeholder.
+
290 #
+
291 # Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+
292 # the files are not read by doxygen.
+
293 
+
294 EXTENSION_MAPPING =
+
295 
+
296 # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+
297 # according to the Markdown format, which allows for more readable
+
298 # documentation. See http://daringfireball.net/projects/markdown/ for details.
+
299 # The output of markdown processing is further processed by doxygen, so you can
+
300 # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+
301 # case of backward compatibilities issues.
+
302 # The default value is: YES.
+
303 
+
304 MARKDOWN_SUPPORT = YES
+
305 
+
306 # When enabled doxygen tries to link words that correspond to documented
+
307 # classes, or namespaces to their corresponding documentation. Such a link can
+
308 # be prevented in individual cases by putting a % sign in front of the word or
+
309 # globally by setting AUTOLINK_SUPPORT to NO.
+
310 # The default value is: YES.
+
311 
+
312 AUTOLINK_SUPPORT = YES
+
313 
+
314 # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+
315 # to include (a tag file for) the STL sources as input, then you should set this
+
316 # tag to YES in order to let doxygen match functions declarations and
+
317 # definitions whose arguments contain STL classes (e.g. func(std::string);
+
318 # versus func(std::string) {}). This also make the inheritance and collaboration
+
319 # diagrams that involve STL classes more complete and accurate.
+
320 # The default value is: NO.
+
321 
+
322 BUILTIN_STL_SUPPORT = NO
+
323 
+
324 # If you use Microsoft's C++/CLI language, you should set this option to YES to
+
325 # enable parsing support.
+
326 # The default value is: NO.
+
327 
+
328 CPP_CLI_SUPPORT = NO
+
329 
+
330 # Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+
331 # http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+
332 # will parse them like normal C++ but will assume all classes use public instead
+
333 # of private inheritance when no explicit protection keyword is present.
+
334 # The default value is: NO.
+
335 
+
336 SIP_SUPPORT = NO
+
337 
+
338 # For Microsoft's IDL there are propget and propput attributes to indicate
+
339 # getter and setter methods for a property. Setting this option to YES will make
+
340 # doxygen to replace the get and set methods by a property in the documentation.
+
341 # This will only work if the methods are indeed getting or setting a simple
+
342 # type. If this is not the case, or you want to show the methods anyway, you
+
343 # should set this option to NO.
+
344 # The default value is: YES.
+
345 
+
346 IDL_PROPERTY_SUPPORT = YES
+
347 
+
348 # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+
349 # tag is set to YES then doxygen will reuse the documentation of the first
+
350 # member in the group (if any) for the other members of the group. By default
+
351 # all members of a group must be documented explicitly.
+
352 # The default value is: NO.
+
353 
+
354 DISTRIBUTE_GROUP_DOC = NO
+
355 
+
356 # If one adds a struct or class to a group and this option is enabled, then also
+
357 # any nested class or struct is added to the same group. By default this option
+
358 # is disabled and one has to add nested compounds explicitly via \ingroup.
+
359 # The default value is: NO.
+
360 
+
361 GROUP_NESTED_COMPOUNDS = NO
+
362 
+
363 # Set the SUBGROUPING tag to YES to allow class member groups of the same type
+
364 # (for instance a group of public functions) to be put as a subgroup of that
+
365 # type (e.g. under the Public Functions section). Set it to NO to prevent
+
366 # subgrouping. Alternatively, this can be done per class using the
+
367 # \nosubgrouping command.
+
368 # The default value is: YES.
+
369 
+
370 SUBGROUPING = NO
+
371 
+
372 # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+
373 # are shown inside the group in which they are included (e.g. using \ingroup)
+
374 # instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+
375 # and RTF).
+
376 #
+
377 # Note that this feature does not work in combination with
+
378 # SEPARATE_MEMBER_PAGES.
+
379 # The default value is: NO.
+
380 
+
381 INLINE_GROUPED_CLASSES = NO
+
382 
+
383 # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+
384 # with only public data fields or simple typedef fields will be shown inline in
+
385 # the documentation of the scope in which they are defined (i.e. file,
+
386 # namespace, or group documentation), provided this scope is documented. If set
+
387 # to NO, structs, classes, and unions are shown on a separate page (for HTML and
+
388 # Man pages) or section (for LaTeX and RTF).
+
389 # The default value is: NO.
+
390 
+
391 INLINE_SIMPLE_STRUCTS = NO
+
392 
+
393 # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+
394 # enum is documented as struct, union, or enum with the name of the typedef. So
+
395 # typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+
396 # with name TypeT. When disabled the typedef will appear as a member of a file,
+
397 # namespace, or class. And the struct will be named TypeS. This can typically be
+
398 # useful for C code in case the coding convention dictates that all compound
+
399 # types are typedef'ed and only the typedef is referenced, never the tag name.
+
400 # The default value is: NO.
+
401 
+
402 TYPEDEF_HIDES_STRUCT = NO
+
403 
+
404 # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+
405 # cache is used to resolve symbols given their name and scope. Since this can be
+
406 # an expensive process and often the same symbol appears multiple times in the
+
407 # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+
408 # doxygen will become slower. If the cache is too large, memory is wasted. The
+
409 # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+
410 # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+
411 # symbols. At the end of a run doxygen will report the cache usage and suggest
+
412 # the optimal cache size from a speed point of view.
+
413 # Minimum value: 0, maximum value: 9, default value: 0.
+
414 
+
415 LOOKUP_CACHE_SIZE = 0
+
416 
+
417 #---------------------------------------------------------------------------
+
418 # Build related configuration options
+
419 #---------------------------------------------------------------------------
+
420 
+
421 # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+
422 # documentation are documented, even if no documentation was available. Private
+
423 # class members and static file members will be hidden unless the
+
424 # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+
425 # Note: This will also disable the warnings about undocumented members that are
+
426 # normally produced when WARNINGS is set to YES.
+
427 # The default value is: NO.
+
428 
+
429 EXTRACT_ALL = NO
+
430 
+
431 # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+
432 # be included in the documentation.
+
433 # The default value is: NO.
+
434 
+
435 EXTRACT_PRIVATE = NO
+
436 
+
437 # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+
438 # scope will be included in the documentation.
+
439 # The default value is: NO.
+
440 
+
441 EXTRACT_PACKAGE = NO
+
442 
+
443 # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+
444 # included in the documentation.
+
445 # The default value is: NO.
+
446 
+
447 EXTRACT_STATIC = YES
+
448 
+
449 # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+
450 # locally in source files will be included in the documentation. If set to NO,
+
451 # only classes defined in header files are included. Does not have any effect
+
452 # for Java sources.
+
453 # The default value is: YES.
+
454 
+
455 EXTRACT_LOCAL_CLASSES = NO
+
456 
+
457 # This flag is only useful for Objective-C code. If set to YES, local methods,
+
458 # which are defined in the implementation section but not in the interface are
+
459 # included in the documentation. If set to NO, only methods in the interface are
+
460 # included.
+
461 # The default value is: NO.
+
462 
+
463 EXTRACT_LOCAL_METHODS = NO
+
464 
+
465 # If this flag is set to YES, the members of anonymous namespaces will be
+
466 # extracted and appear in the documentation as a namespace called
+
467 # 'anonymous_namespace{file}', where file will be replaced with the base name of
+
468 # the file that contains the anonymous namespace. By default anonymous namespace
+
469 # are hidden.
+
470 # The default value is: NO.
+
471 
+
472 EXTRACT_ANON_NSPACES = NO
+
473 
+
474 # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+
475 # undocumented members inside documented classes or files. If set to NO these
+
476 # members will be included in the various overviews, but no documentation
+
477 # section is generated. This option has no effect if EXTRACT_ALL is enabled.
+
478 # The default value is: NO.
+
479 
+
480 HIDE_UNDOC_MEMBERS = YES
+
481 
+
482 # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+
483 # undocumented classes that are normally visible in the class hierarchy. If set
+
484 # to NO, these classes will be included in the various overviews. This option
+
485 # has no effect if EXTRACT_ALL is enabled.
+
486 # The default value is: NO.
+
487 
+
488 HIDE_UNDOC_CLASSES = YES
+
489 
+
490 # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+
491 # (class|struct|union) declarations. If set to NO, these declarations will be
+
492 # included in the documentation.
+
493 # The default value is: NO.
+
494 
+
495 HIDE_FRIEND_COMPOUNDS = YES
+
496 
+
497 # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+
498 # documentation blocks found inside the body of a function. If set to NO, these
+
499 # blocks will be appended to the function's detailed documentation block.
+
500 # The default value is: NO.
+
501 
+
502 HIDE_IN_BODY_DOCS = YES
+
503 
+
504 # The INTERNAL_DOCS tag determines if documentation that is typed after a
+
505 # \internal command is included. If the tag is set to NO then the documentation
+
506 # will be excluded. Set it to YES to include the internal documentation.
+
507 # The default value is: NO.
+
508 
+
509 INTERNAL_DOCS = NO
+
510 
+
511 # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+
512 # names in lower-case letters. If set to YES, upper-case letters are also
+
513 # allowed. This is useful if you have classes or files whose names only differ
+
514 # in case and if your file system supports case sensitive file names. Windows
+
515 # and Mac users are advised to set this option to NO.
+
516 # The default value is: system dependent.
+
517 
+
518 CASE_SENSE_NAMES = YES
+
519 
+
520 # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+
521 # their full class and namespace scopes in the documentation. If set to YES, the
+
522 # scope will be hidden.
+
523 # The default value is: NO.
+
524 
+
525 HIDE_SCOPE_NAMES = YES
+
526 
+
527 # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+
528 # append additional text to a page's title, such as Class Reference. If set to
+
529 # YES the compound reference will be hidden.
+
530 # The default value is: NO.
+
531 
+
532 HIDE_COMPOUND_REFERENCE= NO
+
533 
+
534 # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+
535 # the files that are included by a file in the documentation of that file.
+
536 # The default value is: YES.
+
537 
+
538 SHOW_INCLUDE_FILES = NO
+
539 
+
540 # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+
541 # grouped member an include statement to the documentation, telling the reader
+
542 # which file to include in order to use the member.
+
543 # The default value is: NO.
+
544 
+
545 SHOW_GROUPED_MEMB_INC = NO
+
546 
+
547 # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+
548 # files with double quotes in the documentation rather than with sharp brackets.
+
549 # The default value is: NO.
+
550 
+
551 FORCE_LOCAL_INCLUDES = NO
+
552 
+
553 # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+
554 # documentation for inline members.
+
555 # The default value is: YES.
+
556 
+
557 INLINE_INFO = NO
+
558 
+
559 # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+
560 # (detailed) documentation of file and class members alphabetically by member
+
561 # name. If set to NO, the members will appear in declaration order.
+
562 # The default value is: YES.
+
563 
+
564 SORT_MEMBER_DOCS = YES
+
565 
+
566 # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+
567 # descriptions of file, namespace and class members alphabetically by member
+
568 # name. If set to NO, the members will appear in declaration order. Note that
+
569 # this will also influence the order of the classes in the class list.
+
570 # The default value is: NO.
+
571 
+
572 SORT_BRIEF_DOCS = YES
+
573 
+
574 # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+
575 # (brief and detailed) documentation of class members so that constructors and
+
576 # destructors are listed first. If set to NO the constructors will appear in the
+
577 # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+
578 # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+
579 # member documentation.
+
580 # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+
581 # detailed member documentation.
+
582 # The default value is: NO.
+
583 
+
584 SORT_MEMBERS_CTORS_1ST = NO
+
585 
+
586 # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+
587 # of group names into alphabetical order. If set to NO the group names will
+
588 # appear in their defined order.
+
589 # The default value is: NO.
+
590 
+
591 SORT_GROUP_NAMES = NO
+
592 
+
593 # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+
594 # fully-qualified names, including namespaces. If set to NO, the class list will
+
595 # be sorted only by class name, not including the namespace part.
+
596 # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+
597 # Note: This option applies only to the class list, not to the alphabetical
+
598 # list.
+
599 # The default value is: NO.
+
600 
+
601 SORT_BY_SCOPE_NAME = YES
+
602 
+
603 # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+
604 # type resolution of all parameters of a function it will reject a match between
+
605 # the prototype and the implementation of a member function even if there is
+
606 # only one candidate or it is obvious which candidate to choose by doing a
+
607 # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+
608 # accept a match between prototype and implementation in such cases.
+
609 # The default value is: NO.
+
610 
+
611 STRICT_PROTO_MATCHING = NO
+
612 
+
613 # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+
614 # list. This list is created by putting \todo commands in the documentation.
+
615 # The default value is: YES.
+
616 
+
617 GENERATE_TODOLIST = YES
+
618 
+
619 # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+
620 # list. This list is created by putting \test commands in the documentation.
+
621 # The default value is: YES.
+
622 
+
623 GENERATE_TESTLIST = YES
+
624 
+
625 # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+
626 # list. This list is created by putting \bug commands in the documentation.
+
627 # The default value is: YES.
+
628 
+
629 GENERATE_BUGLIST = YES
+
630 
+
631 # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+
632 # the deprecated list. This list is created by putting \deprecated commands in
+
633 # the documentation.
+
634 # The default value is: YES.
+
635 
+
636 GENERATE_DEPRECATEDLIST= YES
+
637 
+
638 # The ENABLED_SECTIONS tag can be used to enable conditional documentation
+
639 # sections, marked by \if <section_label> ... \endif and \cond <section_label>
+
640 # ... \endcond blocks.
+
641 
+
642 ENABLED_SECTIONS =
+
643 
+
644 # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+
645 # initial value of a variable or macro / define can have for it to appear in the
+
646 # documentation. If the initializer consists of more lines than specified here
+
647 # it will be hidden. Use a value of 0 to hide initializers completely. The
+
648 # appearance of the value of individual variables and macros / defines can be
+
649 # controlled using \showinitializer or \hideinitializer command in the
+
650 # documentation regardless of this setting.
+
651 # Minimum value: 0, maximum value: 10000, default value: 30.
+
652 
+
653 MAX_INITIALIZER_LINES = 30
+
654 
+
655 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+
656 # the bottom of the documentation of classes and structs. If set to YES, the
+
657 # list will mention the files that were used to generate the documentation.
+
658 # The default value is: YES.
+
659 
+
660 SHOW_USED_FILES = NO
+
661 
+
662 # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+
663 # will remove the Files entry from the Quick Index and from the Folder Tree View
+
664 # (if specified).
+
665 # The default value is: YES.
+
666 
+
667 SHOW_FILES = YES
+
668 
+
669 # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+
670 # page. This will remove the Namespaces entry from the Quick Index and from the
+
671 # Folder Tree View (if specified).
+
672 # The default value is: YES.
+
673 
+
674 SHOW_NAMESPACES = YES
+
675 
+
676 # The FILE_VERSION_FILTER tag can be used to specify a program or script that
+
677 # doxygen should invoke to get the current version for each file (typically from
+
678 # the version control system). Doxygen will invoke the program by executing (via
+
679 # popen()) the command command input-file, where command is the value of the
+
680 # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+
681 # by doxygen. Whatever the program writes to standard output is used as the file
+
682 # version. For an example see the documentation.
+
683 
+
684 FILE_VERSION_FILTER =
+
685 
+
686 # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+
687 # by doxygen. The layout file controls the global structure of the generated
+
688 # output files in an output format independent way. To create the layout file
+
689 # that represents doxygen's defaults, run doxygen with the -l option. You can
+
690 # optionally specify a file name after the option, if omitted DoxygenLayout.xml
+
691 # will be used as the name of the layout file.
+
692 #
+
693 # Note that if you run doxygen from a directory containing a file called
+
694 # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+
695 # tag is left empty.
+
696 
+
697 LAYOUT_FILE =
+
698 
+
699 # The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+
700 # the reference definitions. This must be a list of .bib files. The .bib
+
701 # extension is automatically appended if omitted. This requires the bibtex tool
+
702 # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+
703 # For LaTeX the style of the bibliography can be controlled using
+
704 # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+
705 # search path. See also \cite for info how to create references.
+
706 
+
707 CITE_BIB_FILES =
+
708 
+
709 #---------------------------------------------------------------------------
+
710 # Configuration options related to warning and progress messages
+
711 #---------------------------------------------------------------------------
+
712 
+
713 # The QUIET tag can be used to turn on/off the messages that are generated to
+
714 # standard output by doxygen. If QUIET is set to YES this implies that the
+
715 # messages are off.
+
716 # The default value is: NO.
+
717 
+
718 QUIET = NO
+
719 
+
720 # The WARNINGS tag can be used to turn on/off the warning messages that are
+
721 # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+
722 # this implies that the warnings are on.
+
723 #
+
724 # Tip: Turn warnings on while writing the documentation.
+
725 # The default value is: YES.
+
726 
+
727 WARNINGS = YES
+
728 
+
729 # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+
730 # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+
731 # will automatically be disabled.
+
732 # The default value is: YES.
+
733 
+
734 WARN_IF_UNDOCUMENTED = YES
+
735 
+
736 # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+
737 # potential errors in the documentation, such as not documenting some parameters
+
738 # in a documented function, or documenting parameters that don't exist or using
+
739 # markup commands wrongly.
+
740 # The default value is: YES.
+
741 
+
742 WARN_IF_DOC_ERROR = YES
+
743 
+
744 # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+
745 # are documented, but have no documentation for their parameters or return
+
746 # value. If set to NO, doxygen will only warn about wrong or incomplete
+
747 # parameter documentation, but not about the absence of documentation.
+
748 # The default value is: NO.
+
749 
+
750 WARN_NO_PARAMDOC = NO
+
751 
+
752 # The WARN_FORMAT tag determines the format of the warning messages that doxygen
+
753 # can produce. The string should contain the $file, $line, and $text tags, which
+
754 # will be replaced by the file and line number from which the warning originated
+
755 # and the warning text. Optionally the format may contain $version, which will
+
756 # be replaced by the version of the file (if it could be obtained via
+
757 # FILE_VERSION_FILTER)
+
758 # The default value is: $file:$line: $text.
+
759 
+
760 WARN_FORMAT = "$file:$line: $text"
+
761 
+
762 # The WARN_LOGFILE tag can be used to specify a file to which warning and error
+
763 # messages should be written. If left blank the output is written to standard
+
764 # error (stderr).
+
765 
+
766 WARN_LOGFILE =
+
767 
+
768 #---------------------------------------------------------------------------
+
769 # Configuration options related to the input files
+
770 #---------------------------------------------------------------------------
+
771 
+
772 # The INPUT tag is used to specify the files and/or directories that contain
+
773 # documented source files. You may enter file names like myfile.cpp or
+
774 # directories like /usr/src/myproject. Separate the files or directories with
+
775 # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+
776 # Note: If this tag is empty the current directory is searched.
+
777 
+
778 INPUT = ../glm \
+
779  .
+
780 
+
781 # This tag can be used to specify the character encoding of the source files
+
782 # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+
783 # libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+
784 # documentation (see: http://www.gnu.org/software/libiconv) for the list of
+
785 # possible encodings.
+
786 # The default value is: UTF-8.
+
787 
+
788 INPUT_ENCODING = UTF-8
+
789 
+
790 # If the value of the INPUT tag contains directories, you can use the
+
791 # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+
792 # *.h) to filter out the source-files in the directories.
+
793 #
+
794 # Note that for custom extensions or not directly supported extensions you also
+
795 # need to set EXTENSION_MAPPING for the extension otherwise the files are not
+
796 # read by doxygen.
+
797 #
+
798 # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+
799 # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+
800 # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+
801 # *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd,
+
802 # *.vhdl, *.ucf, *.qsf, *.as and *.js.
+
803 
+
804 FILE_PATTERNS = *.hpp \
+
805  *.doxy
+
806 
+
807 # The RECURSIVE tag can be used to specify whether or not subdirectories should
+
808 # be searched for input files as well.
+
809 # The default value is: NO.
+
810 
+
811 RECURSIVE = YES
+
812 
+
813 # The EXCLUDE tag can be used to specify files and/or directories that should be
+
814 # excluded from the INPUT source files. This way you can easily exclude a
+
815 # subdirectory from a directory tree whose root is specified with the INPUT tag.
+
816 #
+
817 # Note that relative paths are relative to the directory from which doxygen is
+
818 # run.
+
819 
+
820 EXCLUDE =
+
821 
+
822 # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+
823 # directories that are symbolic links (a Unix file system feature) are excluded
+
824 # from the input.
+
825 # The default value is: NO.
+
826 
+
827 EXCLUDE_SYMLINKS = NO
+
828 
+
829 # If the value of the INPUT tag contains directories, you can use the
+
830 # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+
831 # certain files from those directories.
+
832 #
+
833 # Note that the wildcards are matched against the file with absolute path, so to
+
834 # exclude all test directories for example use the pattern */test/*
+
835 
+
836 EXCLUDE_PATTERNS =
+
837 
+
838 # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+
839 # (namespaces, classes, functions, etc.) that should be excluded from the
+
840 # output. The symbol name can be a fully qualified name, a word, or if the
+
841 # wildcard * is used, a substring. Examples: ANamespace, AClass,
+
842 # AClass::ANamespace, ANamespace::*Test
+
843 #
+
844 # Note that the wildcards are matched against the file with absolute path, so to
+
845 # exclude all test directories use the pattern */test/*
+
846 
+
847 EXCLUDE_SYMBOLS =
+
848 
+
849 # The EXAMPLE_PATH tag can be used to specify one or more files or directories
+
850 # that contain example code fragments that are included (see the \include
+
851 # command).
+
852 
+
853 EXAMPLE_PATH =
+
854 
+
855 # If the value of the EXAMPLE_PATH tag contains directories, you can use the
+
856 # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+
857 # *.h) to filter out the source-files in the directories. If left blank all
+
858 # files are included.
+
859 
+
860 EXAMPLE_PATTERNS = *
+
861 
+
862 # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+
863 # searched for input files to be used with the \include or \dontinclude commands
+
864 # irrespective of the value of the RECURSIVE tag.
+
865 # The default value is: NO.
+
866 
+
867 EXAMPLE_RECURSIVE = NO
+
868 
+
869 # The IMAGE_PATH tag can be used to specify one or more files or directories
+
870 # that contain images that are to be included in the documentation (see the
+
871 # \image command).
+
872 
+
873 IMAGE_PATH =
+
874 
+
875 # The INPUT_FILTER tag can be used to specify a program that doxygen should
+
876 # invoke to filter for each input file. Doxygen will invoke the filter program
+
877 # by executing (via popen()) the command:
+
878 #
+
879 # <filter> <input-file>
+
880 #
+
881 # where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+
882 # name of an input file. Doxygen will then use the output that the filter
+
883 # program writes to standard output. If FILTER_PATTERNS is specified, this tag
+
884 # will be ignored.
+
885 #
+
886 # Note that the filter must not add or remove lines; it is applied before the
+
887 # code is scanned, but not when the output code is generated. If lines are added
+
888 # or removed, the anchors will not be placed correctly.
+
889 
+
890 INPUT_FILTER =
+
891 
+
892 # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+
893 # basis. Doxygen will compare the file name with each pattern and apply the
+
894 # filter if there is a match. The filters are a list of the form: pattern=filter
+
895 # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+
896 # filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+
897 # patterns match the file name, INPUT_FILTER is applied.
+
898 
+
899 FILTER_PATTERNS =
+
900 
+
901 # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+
902 # INPUT_FILTER) will also be used to filter the input files that are used for
+
903 # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
904 # The default value is: NO.
+
905 
+
906 FILTER_SOURCE_FILES = NO
+
907 
+
908 # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+
909 # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+
910 # it is also possible to disable source filtering for a specific pattern using
+
911 # *.ext= (so without naming a filter).
+
912 # This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
913 
+
914 FILTER_SOURCE_PATTERNS =
+
915 
+
916 # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+
917 # is part of the input, its contents will be placed on the main page
+
918 # (index.html). This can be useful if you have a project on for instance GitHub
+
919 # and want to reuse the introduction page also for the doxygen output.
+
920 
+
921 USE_MDFILE_AS_MAINPAGE =
+
922 
+
923 #---------------------------------------------------------------------------
+
924 # Configuration options related to source browsing
+
925 #---------------------------------------------------------------------------
+
926 
+
927 # If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+
928 # generated. Documented entities will be cross-referenced with these sources.
+
929 #
+
930 # Note: To get rid of all source code in the generated output, make sure that
+
931 # also VERBATIM_HEADERS is set to NO.
+
932 # The default value is: NO.
+
933 
+
934 SOURCE_BROWSER = YES
+
935 
+
936 # Setting the INLINE_SOURCES tag to YES will include the body of functions,
+
937 # classes and enums directly into the documentation.
+
938 # The default value is: NO.
+
939 
+
940 INLINE_SOURCES = NO
+
941 
+
942 # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+
943 # special comment blocks from generated source code fragments. Normal C, C++ and
+
944 # Fortran comments will always remain visible.
+
945 # The default value is: YES.
+
946 
+
947 STRIP_CODE_COMMENTS = YES
+
948 
+
949 # If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+
950 # function all documented functions referencing it will be listed.
+
951 # The default value is: NO.
+
952 
+
953 REFERENCED_BY_RELATION = YES
+
954 
+
955 # If the REFERENCES_RELATION tag is set to YES then for each documented function
+
956 # all documented entities called/used by that function will be listed.
+
957 # The default value is: NO.
+
958 
+
959 REFERENCES_RELATION = YES
+
960 
+
961 # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+
962 # to YES then the hyperlinks from functions in REFERENCES_RELATION and
+
963 # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+
964 # link to the documentation.
+
965 # The default value is: YES.
+
966 
+
967 REFERENCES_LINK_SOURCE = YES
+
968 
+
969 # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+
970 # source code will show a tooltip with additional information such as prototype,
+
971 # brief description and links to the definition and documentation. Since this
+
972 # will make the HTML file larger and loading of large files a bit slower, you
+
973 # can opt to disable this feature.
+
974 # The default value is: YES.
+
975 # This tag requires that the tag SOURCE_BROWSER is set to YES.
+
976 
+
977 SOURCE_TOOLTIPS = YES
+
978 
+
979 # If the USE_HTAGS tag is set to YES then the references to source code will
+
980 # point to the HTML generated by the htags(1) tool instead of doxygen built-in
+
981 # source browser. The htags tool is part of GNU's global source tagging system
+
982 # (see http://www.gnu.org/software/global/global.html). You will need version
+
983 # 4.8.6 or higher.
+
984 #
+
985 # To use it do the following:
+
986 # - Install the latest version of global
+
987 # - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+
988 # - Make sure the INPUT points to the root of the source tree
+
989 # - Run doxygen as normal
+
990 #
+
991 # Doxygen will invoke htags (and that will in turn invoke gtags), so these
+
992 # tools must be available from the command line (i.e. in the search path).
+
993 #
+
994 # The result: instead of the source browser generated by doxygen, the links to
+
995 # source code will now point to the output of htags.
+
996 # The default value is: NO.
+
997 # This tag requires that the tag SOURCE_BROWSER is set to YES.
+
998 
+
999 USE_HTAGS = NO
+
1000 
+
1001 # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+
1002 # verbatim copy of the header file for each class for which an include is
+
1003 # specified. Set to NO to disable this.
+
1004 # See also: Section \class.
+
1005 # The default value is: YES.
+
1006 
+
1007 VERBATIM_HEADERS = YES
+
1008 
+
1009 # If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+
1010 # clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+
1011 # cost of reduced performance. This can be particularly helpful with template
+
1012 # rich C++ code for which doxygen's built-in parser lacks the necessary type
+
1013 # information.
+
1014 # Note: The availability of this option depends on whether or not doxygen was
+
1015 # compiled with the --with-libclang option.
+
1016 # The default value is: NO.
+
1017 
+
1018 CLANG_ASSISTED_PARSING = NO
+
1019 
+
1020 # If clang assisted parsing is enabled you can provide the compiler with command
+
1021 # line options that you would normally use when invoking the compiler. Note that
+
1022 # the include paths will already be set by doxygen for the files and directories
+
1023 # specified with INPUT and INCLUDE_PATH.
+
1024 # This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
1025 
+
1026 CLANG_OPTIONS =
+
1027 
+
1028 #---------------------------------------------------------------------------
+
1029 # Configuration options related to the alphabetical class index
+
1030 #---------------------------------------------------------------------------
+
1031 
+
1032 # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+
1033 # compounds will be generated. Enable this if the project contains a lot of
+
1034 # classes, structs, unions or interfaces.
+
1035 # The default value is: YES.
+
1036 
+
1037 ALPHABETICAL_INDEX = NO
+
1038 
+
1039 # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+
1040 # which the alphabetical index list will be split.
+
1041 # Minimum value: 1, maximum value: 20, default value: 5.
+
1042 # This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
1043 
+
1044 COLS_IN_ALPHA_INDEX = 5
+
1045 
+
1046 # In case all classes in a project start with a common prefix, all classes will
+
1047 # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+
1048 # can be used to specify a prefix (or a list of prefixes) that should be ignored
+
1049 # while generating the index headers.
+
1050 # This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
1051 
+
1052 IGNORE_PREFIX =
+
1053 
+
1054 #---------------------------------------------------------------------------
+
1055 # Configuration options related to the HTML output
+
1056 #---------------------------------------------------------------------------
+
1057 
+
1058 # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+
1059 # The default value is: YES.
+
1060 
+
1061 GENERATE_HTML = YES
+
1062 
+
1063 # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+
1064 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+
1065 # it.
+
1066 # The default directory is: html.
+
1067 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1068 
+
1069 HTML_OUTPUT = html
+
1070 
+
1071 # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+
1072 # generated HTML page (for example: .htm, .php, .asp).
+
1073 # The default value is: .html.
+
1074 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1075 
+
1076 HTML_FILE_EXTENSION = .html
+
1077 
+
1078 # The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+
1079 # each generated HTML page. If the tag is left blank doxygen will generate a
+
1080 # standard header.
+
1081 #
+
1082 # To get valid HTML the header file that includes any scripts and style sheets
+
1083 # that doxygen needs, which is dependent on the configuration options used (e.g.
+
1084 # the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+
1085 # default header using
+
1086 # doxygen -w html new_header.html new_footer.html new_stylesheet.css
+
1087 # YourConfigFile
+
1088 # and then modify the file new_header.html. See also section "Doxygen usage"
+
1089 # for information on how to generate the default header that doxygen normally
+
1090 # uses.
+
1091 # Note: The header is subject to change so you typically have to regenerate the
+
1092 # default header when upgrading to a newer version of doxygen. For a description
+
1093 # of the possible markers and block names see the documentation.
+
1094 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1095 
+
1096 HTML_HEADER =
+
1097 
+
1098 # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+
1099 # generated HTML page. If the tag is left blank doxygen will generate a standard
+
1100 # footer. See HTML_HEADER for more information on how to generate a default
+
1101 # footer and what special commands can be used inside the footer. See also
+
1102 # section "Doxygen usage" for information on how to generate the default footer
+
1103 # that doxygen normally uses.
+
1104 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1105 
+
1106 HTML_FOOTER =
+
1107 
+
1108 # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+
1109 # sheet that is used by each HTML page. It can be used to fine-tune the look of
+
1110 # the HTML output. If left blank doxygen will generate a default style sheet.
+
1111 # See also section "Doxygen usage" for information on how to generate the style
+
1112 # sheet that doxygen normally uses.
+
1113 # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+
1114 # it is more robust and this tag (HTML_STYLESHEET) will in the future become
+
1115 # obsolete.
+
1116 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1117 
+
1118 HTML_STYLESHEET =
+
1119 
+
1120 # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+
1121 # cascading style sheets that are included after the standard style sheets
+
1122 # created by doxygen. Using this option one can overrule certain style aspects.
+
1123 # This is preferred over using HTML_STYLESHEET since it does not replace the
+
1124 # standard style sheet and is therefore more robust against future updates.
+
1125 # Doxygen will copy the style sheet files to the output directory.
+
1126 # Note: The order of the extra style sheet files is of importance (e.g. the last
+
1127 # style sheet in the list overrules the setting of the previous ones in the
+
1128 # list). For an example see the documentation.
+
1129 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1130 
+
1131 HTML_EXTRA_STYLESHEET =
+
1132 
+
1133 # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+
1134 # other source files which should be copied to the HTML output directory. Note
+
1135 # that these files will be copied to the base HTML output directory. Use the
+
1136 # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+
1137 # files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+
1138 # files will be copied as-is; there are no commands or markers available.
+
1139 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1140 
+
1141 HTML_EXTRA_FILES =
+
1142 
+
1143 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+
1144 # will adjust the colors in the style sheet and background images according to
+
1145 # this color. Hue is specified as an angle on a colorwheel, see
+
1146 # http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+
1147 # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+
1148 # purple, and 360 is red again.
+
1149 # Minimum value: 0, maximum value: 359, default value: 220.
+
1150 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1151 
+
1152 HTML_COLORSTYLE_HUE = 220
+
1153 
+
1154 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+
1155 # in the HTML output. For a value of 0 the output will use grayscales only. A
+
1156 # value of 255 will produce the most vivid colors.
+
1157 # Minimum value: 0, maximum value: 255, default value: 100.
+
1158 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1159 
+
1160 HTML_COLORSTYLE_SAT = 100
+
1161 
+
1162 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+
1163 # luminance component of the colors in the HTML output. Values below 100
+
1164 # gradually make the output lighter, whereas values above 100 make the output
+
1165 # darker. The value divided by 100 is the actual gamma applied, so 80 represents
+
1166 # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+
1167 # change the gamma.
+
1168 # Minimum value: 40, maximum value: 240, default value: 80.
+
1169 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1170 
+
1171 HTML_COLORSTYLE_GAMMA = 80
+
1172 
+
1173 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+
1174 # page will contain the date and time when the page was generated. Setting this
+
1175 # to YES can help to show when doxygen was last run and thus if the
+
1176 # documentation is up to date.
+
1177 # The default value is: NO.
+
1178 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1179 
+
1180 HTML_TIMESTAMP = NO
+
1181 
+
1182 # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+
1183 # documentation will contain sections that can be hidden and shown after the
+
1184 # page has loaded.
+
1185 # The default value is: NO.
+
1186 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1187 
+
1188 HTML_DYNAMIC_SECTIONS = NO
+
1189 
+
1190 # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+
1191 # shown in the various tree structured indices initially; the user can expand
+
1192 # and collapse entries dynamically later on. Doxygen will expand the tree to
+
1193 # such a level that at most the specified number of entries are visible (unless
+
1194 # a fully collapsed tree already exceeds this amount). So setting the number of
+
1195 # entries 1 will produce a full collapsed tree by default. 0 is a special value
+
1196 # representing an infinite number of entries and will result in a full expanded
+
1197 # tree by default.
+
1198 # Minimum value: 0, maximum value: 9999, default value: 100.
+
1199 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1200 
+
1201 HTML_INDEX_NUM_ENTRIES = 100
+
1202 
+
1203 # If the GENERATE_DOCSET tag is set to YES, additional index files will be
+
1204 # generated that can be used as input for Apple's Xcode 3 integrated development
+
1205 # environment (see: http://developer.apple.com/tools/xcode/), introduced with
+
1206 # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+
1207 # Makefile in the HTML output directory. Running make will produce the docset in
+
1208 # that directory and running make install will install the docset in
+
1209 # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+
1210 # startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+
1211 # for more information.
+
1212 # The default value is: NO.
+
1213 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1214 
+
1215 GENERATE_DOCSET = NO
+
1216 
+
1217 # This tag determines the name of the docset feed. A documentation feed provides
+
1218 # an umbrella under which multiple documentation sets from a single provider
+
1219 # (such as a company or product suite) can be grouped.
+
1220 # The default value is: Doxygen generated docs.
+
1221 # This tag requires that the tag GENERATE_DOCSET is set to YES.
+
1222 
+
1223 DOCSET_FEEDNAME = "Doxygen generated docs"
+
1224 
+
1225 # This tag specifies a string that should uniquely identify the documentation
+
1226 # set bundle. This should be a reverse domain-name style string, e.g.
+
1227 # com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+
1228 # The default value is: org.doxygen.Project.
+
1229 # This tag requires that the tag GENERATE_DOCSET is set to YES.
+
1230 
+
1231 DOCSET_BUNDLE_ID = org.doxygen.Project
+
1232 
+
1233 # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+
1234 # the documentation publisher. This should be a reverse domain-name style
+
1235 # string, e.g. com.mycompany.MyDocSet.documentation.
+
1236 # The default value is: org.doxygen.Publisher.
+
1237 # This tag requires that the tag GENERATE_DOCSET is set to YES.
+
1238 
+
1239 DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
1240 
+
1241 # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+
1242 # The default value is: Publisher.
+
1243 # This tag requires that the tag GENERATE_DOCSET is set to YES.
+
1244 
+
1245 DOCSET_PUBLISHER_NAME = Publisher
+
1246 
+
1247 # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+
1248 # additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+
1249 # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+
1250 # (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+
1251 # Windows.
+
1252 #
+
1253 # The HTML Help Workshop contains a compiler that can convert all HTML output
+
1254 # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+
1255 # files are now used as the Windows 98 help format, and will replace the old
+
1256 # Windows help format (.hlp) on all Windows platforms in the future. Compressed
+
1257 # HTML files also contain an index, a table of contents, and you can search for
+
1258 # words in the documentation. The HTML workshop also contains a viewer for
+
1259 # compressed HTML files.
+
1260 # The default value is: NO.
+
1261 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1262 
+
1263 GENERATE_HTMLHELP = NO
+
1264 
+
1265 # The CHM_FILE tag can be used to specify the file name of the resulting .chm
+
1266 # file. You can add a path in front of the file if the result should not be
+
1267 # written to the html output directory.
+
1268 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
1269 
+
1270 CHM_FILE =
+
1271 
+
1272 # The HHC_LOCATION tag can be used to specify the location (absolute path
+
1273 # including file name) of the HTML help compiler (hhc.exe). If non-empty,
+
1274 # doxygen will try to run the HTML help compiler on the generated index.hhp.
+
1275 # The file has to be specified with full path.
+
1276 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
1277 
+
1278 HHC_LOCATION =
+
1279 
+
1280 # The GENERATE_CHI flag controls if a separate .chi index file is generated
+
1281 # (YES) or that it should be included in the master .chm file (NO).
+
1282 # The default value is: NO.
+
1283 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
1284 
+
1285 GENERATE_CHI = NO
+
1286 
+
1287 # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+
1288 # and project file content.
+
1289 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
1290 
+
1291 CHM_INDEX_ENCODING =
+
1292 
+
1293 # The BINARY_TOC flag controls whether a binary table of contents is generated
+
1294 # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+
1295 # enables the Previous and Next buttons.
+
1296 # The default value is: NO.
+
1297 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
1298 
+
1299 BINARY_TOC = NO
+
1300 
+
1301 # The TOC_EXPAND flag can be set to YES to add extra items for group members to
+
1302 # the table of contents of the HTML help documentation and to the tree view.
+
1303 # The default value is: NO.
+
1304 # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
1305 
+
1306 TOC_EXPAND = NO
+
1307 
+
1308 # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+
1309 # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+
1310 # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+
1311 # (.qch) of the generated HTML documentation.
+
1312 # The default value is: NO.
+
1313 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1314 
+
1315 GENERATE_QHP = NO
+
1316 
+
1317 # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+
1318 # the file name of the resulting .qch file. The path specified is relative to
+
1319 # the HTML output folder.
+
1320 # This tag requires that the tag GENERATE_QHP is set to YES.
+
1321 
+
1322 QCH_FILE =
+
1323 
+
1324 # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+
1325 # Project output. For more information please see Qt Help Project / Namespace
+
1326 # (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+
1327 # The default value is: org.doxygen.Project.
+
1328 # This tag requires that the tag GENERATE_QHP is set to YES.
+
1329 
+
1330 QHP_NAMESPACE = org.doxygen.Project
+
1331 
+
1332 # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+
1333 # Help Project output. For more information please see Qt Help Project / Virtual
+
1334 # Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+
1335 # folders).
+
1336 # The default value is: doc.
+
1337 # This tag requires that the tag GENERATE_QHP is set to YES.
+
1338 
+
1339 QHP_VIRTUAL_FOLDER = doc
+
1340 
+
1341 # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+
1342 # filter to add. For more information please see Qt Help Project / Custom
+
1343 # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+
1344 # filters).
+
1345 # This tag requires that the tag GENERATE_QHP is set to YES.
+
1346 
+
1347 QHP_CUST_FILTER_NAME =
+
1348 
+
1349 # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+
1350 # custom filter to add. For more information please see Qt Help Project / Custom
+
1351 # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+
1352 # filters).
+
1353 # This tag requires that the tag GENERATE_QHP is set to YES.
+
1354 
+
1355 QHP_CUST_FILTER_ATTRS =
+
1356 
+
1357 # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+
1358 # project's filter section matches. Qt Help Project / Filter Attributes (see:
+
1359 # http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+
1360 # This tag requires that the tag GENERATE_QHP is set to YES.
+
1361 
+
1362 QHP_SECT_FILTER_ATTRS =
+
1363 
+
1364 # The QHG_LOCATION tag can be used to specify the location of Qt's
+
1365 # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+
1366 # generated .qhp file.
+
1367 # This tag requires that the tag GENERATE_QHP is set to YES.
+
1368 
+
1369 QHG_LOCATION =
+
1370 
+
1371 # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+
1372 # generated, together with the HTML files, they form an Eclipse help plugin. To
+
1373 # install this plugin and make it available under the help contents menu in
+
1374 # Eclipse, the contents of the directory containing the HTML and XML files needs
+
1375 # to be copied into the plugins directory of eclipse. The name of the directory
+
1376 # within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+
1377 # After copying Eclipse needs to be restarted before the help appears.
+
1378 # The default value is: NO.
+
1379 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1380 
+
1381 GENERATE_ECLIPSEHELP = NO
+
1382 
+
1383 # A unique identifier for the Eclipse help plugin. When installing the plugin
+
1384 # the directory name containing the HTML and XML files should also have this
+
1385 # name. Each documentation set should have its own identifier.
+
1386 # The default value is: org.doxygen.Project.
+
1387 # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
1388 
+
1389 ECLIPSE_DOC_ID = org.doxygen.Project
+
1390 
+
1391 # If you want full control over the layout of the generated HTML pages it might
+
1392 # be necessary to disable the index and replace it with your own. The
+
1393 # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+
1394 # of each HTML page. A value of NO enables the index and the value YES disables
+
1395 # it. Since the tabs in the index contain the same information as the navigation
+
1396 # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+
1397 # The default value is: NO.
+
1398 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1399 
+
1400 DISABLE_INDEX = NO
+
1401 
+
1402 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+
1403 # structure should be generated to display hierarchical information. If the tag
+
1404 # value is set to YES, a side panel will be generated containing a tree-like
+
1405 # index structure (just like the one that is generated for HTML Help). For this
+
1406 # to work a browser that supports JavaScript, DHTML, CSS and frames is required
+
1407 # (i.e. any modern browser). Windows users are probably better off using the
+
1408 # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+
1409 # further fine-tune the look of the index. As an example, the default style
+
1410 # sheet generated by doxygen has an example that shows how to put an image at
+
1411 # the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+
1412 # the same information as the tab index, you could consider setting
+
1413 # DISABLE_INDEX to YES when enabling this option.
+
1414 # The default value is: NO.
+
1415 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1416 
+
1417 GENERATE_TREEVIEW = NO
+
1418 
+
1419 # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+
1420 # doxygen will group on one line in the generated HTML documentation.
+
1421 #
+
1422 # Note that a value of 0 will completely suppress the enum values from appearing
+
1423 # in the overview section.
+
1424 # Minimum value: 0, maximum value: 20, default value: 4.
+
1425 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1426 
+
1427 ENUM_VALUES_PER_LINE = 4
+
1428 
+
1429 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+
1430 # to set the initial width (in pixels) of the frame in which the tree is shown.
+
1431 # Minimum value: 0, maximum value: 1500, default value: 250.
+
1432 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1433 
+
1434 TREEVIEW_WIDTH = 250
+
1435 
+
1436 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+
1437 # external symbols imported via tag files in a separate window.
+
1438 # The default value is: NO.
+
1439 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1440 
+
1441 EXT_LINKS_IN_WINDOW = NO
+
1442 
+
1443 # Use this tag to change the font size of LaTeX formulas included as images in
+
1444 # the HTML documentation. When you change the font size after a successful
+
1445 # doxygen run you need to manually remove any form_*.png images from the HTML
+
1446 # output directory to force them to be regenerated.
+
1447 # Minimum value: 8, maximum value: 50, default value: 10.
+
1448 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1449 
+
1450 FORMULA_FONTSIZE = 10
+
1451 
+
1452 # Use the FORMULA_TRANPARENT tag to determine whether or not the images
+
1453 # generated for formulas are transparent PNGs. Transparent PNGs are not
+
1454 # supported properly for IE 6.0, but are supported on all modern browsers.
+
1455 #
+
1456 # Note that when changing this option you need to delete any form_*.png files in
+
1457 # the HTML output directory before the changes have effect.
+
1458 # The default value is: YES.
+
1459 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1460 
+
1461 FORMULA_TRANSPARENT = YES
+
1462 
+
1463 # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+
1464 # http://www.mathjax.org) which uses client side Javascript for the rendering
+
1465 # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+
1466 # installed or if you want to formulas look prettier in the HTML output. When
+
1467 # enabled you may also need to install MathJax separately and configure the path
+
1468 # to it using the MATHJAX_RELPATH option.
+
1469 # The default value is: NO.
+
1470 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1471 
+
1472 USE_MATHJAX = NO
+
1473 
+
1474 # When MathJax is enabled you can set the default output format to be used for
+
1475 # the MathJax output. See the MathJax site (see:
+
1476 # http://docs.mathjax.org/en/latest/output.html) for more details.
+
1477 # Possible values are: HTML-CSS (which is slower, but has the best
+
1478 # compatibility), NativeMML (i.e. MathML) and SVG.
+
1479 # The default value is: HTML-CSS.
+
1480 # This tag requires that the tag USE_MATHJAX is set to YES.
+
1481 
+
1482 MATHJAX_FORMAT = HTML-CSS
+
1483 
+
1484 # When MathJax is enabled you need to specify the location relative to the HTML
+
1485 # output directory using the MATHJAX_RELPATH option. The destination directory
+
1486 # should contain the MathJax.js script. For instance, if the mathjax directory
+
1487 # is located at the same level as the HTML output directory, then
+
1488 # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+
1489 # Content Delivery Network so you can quickly see the result without installing
+
1490 # MathJax. However, it is strongly recommended to install a local copy of
+
1491 # MathJax from http://www.mathjax.org before deployment.
+
1492 # The default value is: http://cdn.mathjax.org/mathjax/latest.
+
1493 # This tag requires that the tag USE_MATHJAX is set to YES.
+
1494 
+
1495 MATHJAX_RELPATH = http://www.mathjax.org/mathjax
+
1496 
+
1497 # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+
1498 # extension names that should be enabled during MathJax rendering. For example
+
1499 # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+
1500 # This tag requires that the tag USE_MATHJAX is set to YES.
+
1501 
+
1502 MATHJAX_EXTENSIONS =
+
1503 
+
1504 # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+
1505 # of code that will be used on startup of the MathJax code. See the MathJax site
+
1506 # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+
1507 # example see the documentation.
+
1508 # This tag requires that the tag USE_MATHJAX is set to YES.
+
1509 
+
1510 MATHJAX_CODEFILE =
+
1511 
+
1512 # When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+
1513 # the HTML output. The underlying search engine uses javascript and DHTML and
+
1514 # should work on any modern browser. Note that when using HTML help
+
1515 # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+
1516 # there is already a search function so this one should typically be disabled.
+
1517 # For large projects the javascript based search engine can be slow, then
+
1518 # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+
1519 # search using the keyboard; to jump to the search box use <access key> + S
+
1520 # (what the <access key> is depends on the OS and browser, but it is typically
+
1521 # <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+
1522 # key> to jump into the search results window, the results can be navigated
+
1523 # using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+
1524 # the search. The filter options can be selected when the cursor is inside the
+
1525 # search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+
1526 # to select a filter and <Enter> or <escape> to activate or cancel the filter
+
1527 # option.
+
1528 # The default value is: YES.
+
1529 # This tag requires that the tag GENERATE_HTML is set to YES.
+
1530 
+
1531 SEARCHENGINE = YES
+
1532 
+
1533 # When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+
1534 # implemented using a web server instead of a web client using Javascript. There
+
1535 # are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+
1536 # setting. When disabled, doxygen will generate a PHP script for searching and
+
1537 # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+
1538 # and searching needs to be provided by external tools. See the section
+
1539 # "External Indexing and Searching" for details.
+
1540 # The default value is: NO.
+
1541 # This tag requires that the tag SEARCHENGINE is set to YES.
+
1542 
+
1543 SERVER_BASED_SEARCH = NO
+
1544 
+
1545 # When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+
1546 # script for searching. Instead the search results are written to an XML file
+
1547 # which needs to be processed by an external indexer. Doxygen will invoke an
+
1548 # external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+
1549 # search results.
+
1550 #
+
1551 # Doxygen ships with an example indexer (doxyindexer) and search engine
+
1552 # (doxysearch.cgi) which are based on the open source search engine library
+
1553 # Xapian (see: http://xapian.org/).
+
1554 #
+
1555 # See the section "External Indexing and Searching" for details.
+
1556 # The default value is: NO.
+
1557 # This tag requires that the tag SEARCHENGINE is set to YES.
+
1558 
+
1559 EXTERNAL_SEARCH = NO
+
1560 
+
1561 # The SEARCHENGINE_URL should point to a search engine hosted by a web server
+
1562 # which will return the search results when EXTERNAL_SEARCH is enabled.
+
1563 #
+
1564 # Doxygen ships with an example indexer (doxyindexer) and search engine
+
1565 # (doxysearch.cgi) which are based on the open source search engine library
+
1566 # Xapian (see: http://xapian.org/). See the section "External Indexing and
+
1567 # Searching" for details.
+
1568 # This tag requires that the tag SEARCHENGINE is set to YES.
+
1569 
+
1570 SEARCHENGINE_URL =
+
1571 
+
1572 # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+
1573 # search data is written to a file for indexing by an external tool. With the
+
1574 # SEARCHDATA_FILE tag the name of this file can be specified.
+
1575 # The default file is: searchdata.xml.
+
1576 # This tag requires that the tag SEARCHENGINE is set to YES.
+
1577 
+
1578 SEARCHDATA_FILE = searchdata.xml
+
1579 
+
1580 # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+
1581 # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+
1582 # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+
1583 # projects and redirect the results back to the right project.
+
1584 # This tag requires that the tag SEARCHENGINE is set to YES.
+
1585 
+
1586 EXTERNAL_SEARCH_ID =
+
1587 
+
1588 # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+
1589 # projects other than the one defined by this configuration file, but that are
+
1590 # all added to the same external search index. Each project needs to have a
+
1591 # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+
1592 # to a relative location where the documentation can be found. The format is:
+
1593 # EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+
1594 # This tag requires that the tag SEARCHENGINE is set to YES.
+
1595 
+
1596 EXTRA_SEARCH_MAPPINGS =
+
1597 
+
1598 #---------------------------------------------------------------------------
+
1599 # Configuration options related to the LaTeX output
+
1600 #---------------------------------------------------------------------------
+
1601 
+
1602 # If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+
1603 # The default value is: YES.
+
1604 
+
1605 GENERATE_LATEX = NO
+
1606 
+
1607 # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+
1608 # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+
1609 # it.
+
1610 # The default directory is: latex.
+
1611 # This tag requires that the tag GENERATE_LATEX is set to YES.
+
1612 
+
1613 LATEX_OUTPUT = latex
+
1614 
+
1615 # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+
1616 # invoked.
+
1617 #
+
1618 # Note that when enabling USE_PDFLATEX this option is only used for generating
+
1619 # bitmaps for formulas in the HTML output, but not in the Makefile that is
+
1620 # written to the output directory.
+
1621 # The default file is: latex.
+
1622 # This tag requires that the tag GENERATE_LATEX is set to YES.
+
1623 
+
1624 LATEX_CMD_NAME = latex
+
1625 
+
1626 # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+
1627 # index for LaTeX.
+
1628 # The default file is: makeindex.
+
1629 # This tag requires that the tag GENERATE_LATEX is set to YES.
+
1630 
+
1631 MAKEINDEX_CMD_NAME = makeindex
+
1632 
+
1633 # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+
1634 # documents. This may be useful for small projects and may help to save some
+
1635 # trees in general.
+
1636 # The default value is: NO.
+
1637 # This tag requires that the tag GENERATE_LATEX is set to YES.
+
1638 
+
1639 COMPACT_LATEX = NO
+
1640 
+
1641 # The PAPER_TYPE tag can be used to set the paper type that is used by the
+
1642 # printer.
+
1643 # Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+
1644 # 14 inches) and executive (7.25 x 10.5 inches).
+
1645 # The default value is: a4.
+
1646 # This tag requires that the tag GENERATE_LATEX is set to YES.
+
1647 
+
1648 PAPER_TYPE = a4wide
+
1649 
+
1650 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+
1651 # that should be included in the LaTeX output. The package can be specified just
+
1652 # by its name or with the correct syntax as to be used with the LaTeX
+
1653 # \usepackage command. To get the times font for instance you can specify :
+
1654 # EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+
1655 # To use the option intlimits with the amsmath package you can specify:
+
1656 # EXTRA_PACKAGES=[intlimits]{amsmath}
+
1657 # If left blank no extra packages will be included.
+
1658 # This tag requires that the tag GENERATE_LATEX is set to YES.
+
1659 
+
1660 EXTRA_PACKAGES =
+
1661 
+
1662 # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+
1663 # generated LaTeX document. The header should contain everything until the first
+
1664 # chapter. If it is left blank doxygen will generate a standard header. See
+
1665 # section "Doxygen usage" for information on how to let doxygen write the
+
1666 # default header to a separate file.
+
1667 #
+
1668 # Note: Only use a user-defined header if you know what you are doing! The
+
1669 # following commands have a special meaning inside the header: $title,
+
1670 # $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+
1671 # $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+
1672 # string, for the replacement values of the other commands the user is referred
+
1673 # to HTML_HEADER.
+
1674 # This tag \ No newline at end of file diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00048.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00048.html new file mode 100644 index 0000000000000000000000000000000000000000..f1e6b7087338c38b2ec38445c4a429c252e84b5a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00048.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat2x2.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat2x2.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat2x2.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00048_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00048_source.html new file mode 100644 index 0000000000000000000000000000000000000000..29c2a5219f90c0563b86eac701c00aabeaf4f388 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00048_source.html @@ -0,0 +1,110 @@ + + + + + + +0.9.9 API documentation: mat2x2.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat2x2.hpp
+
+ + + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00049.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00049.html new file mode 100644 index 0000000000000000000000000000000000000000..02371af4af5696e939c802ad49e498a669beb909 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00049.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat2x3.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat2x3.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat2x3.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00049_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00049_source.html new file mode 100644 index 0000000000000000000000000000000000000000..9b32dcebba3d5a696127f17ce0f4a800baba5106 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00049_source.html @@ -0,0 +1,110 @@ + + + + + + +0.9.9 API documentation: mat2x3.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat2x3.hpp
+
+ + + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00050.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00050.html new file mode 100644 index 0000000000000000000000000000000000000000..edc8e0d3a6820808b57f7590655ba781fa67cdbc --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00050.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat2x4.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat2x4.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat2x4.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00050_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00050_source.html new file mode 100644 index 0000000000000000000000000000000000000000..ef9de3a5d66ef529ce5f4daa9a62f57f9e97227c --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00050_source.html @@ -0,0 +1,110 @@ + + + + + + +0.9.9 API documentation: mat2x4.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat2x4.hpp
+
+ + + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00051.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00051.html new file mode 100644 index 0000000000000000000000000000000000000000..fe42f0de1f1659177f4804d6248dcd695ad4ea0a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00051.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat3x2.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat3x2.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat3x2.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00051_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00051_source.html new file mode 100644 index 0000000000000000000000000000000000000000..7ff21cc9ebb6cc262f092f81c1a4f01ec86c8fff --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00051_source.html @@ -0,0 +1,110 @@ + + + + + + +0.9.9 API documentation: mat3x2.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat3x2.hpp
+
+ + + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00052.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00052.html new file mode 100644 index 0000000000000000000000000000000000000000..e54365b581cfe7a9d4e237320ba1df37fa1e739a --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00052.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat3x3.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat3x3.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat3x3.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00052_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00052_source.html new file mode 100644 index 0000000000000000000000000000000000000000..d05398ccfe4738e5d0ce996818e471efc1559f51 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00052_source.html @@ -0,0 +1,109 @@ + + + + + + +0.9.9 API documentation: mat3x3.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat3x3.hpp
+
+ + + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00053.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00053.html new file mode 100644 index 0000000000000000000000000000000000000000..595a4ebdba3a8d0b67aa6e4416a735297eda9736 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00053.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat3x4.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat3x4.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat3x4.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00053_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00053_source.html new file mode 100644 index 0000000000000000000000000000000000000000..ceaa6234bf801daaa98e4688ad7babbaca1cface --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00053_source.html @@ -0,0 +1,109 @@ + + + + + + +0.9.9 API documentation: mat3x4.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat3x4.hpp
+
+ + + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00054.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00054.html new file mode 100644 index 0000000000000000000000000000000000000000..8a0645635c9b65c849d782a7ff0bb40e6a80a71e --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00054.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat4x2.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat4x2.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat4x2.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00054_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00054_source.html new file mode 100644 index 0000000000000000000000000000000000000000..5e8fe2dc4de660acafd7df1fd5156677de3b4d66 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00054_source.html @@ -0,0 +1,109 @@ + + + + + + +0.9.9 API documentation: mat4x2.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat4x2.hpp
+
+
+Go to the documentation of this file.
1 
+
4 #pragma once
+ + + +
8 #include "./ext/matrix_float4x2_precision.hpp"
+
9 
+ +
Core features
+
Core features
+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00055.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00055.html new file mode 100644 index 0000000000000000000000000000000000000000..3905618f13bcc917f1813434045629ce81a3c056 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00055.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat4x3.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat4x3.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat4x3.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00055_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00055_source.html new file mode 100644 index 0000000000000000000000000000000000000000..85f4e8fea5ef94f6cd6dd09bba8e99b5f43ed717 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00055_source.html @@ -0,0 +1,109 @@ + + + + + + +0.9.9 API documentation: mat4x3.hpp Source File + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat4x3.hpp
+
+ + + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00056.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00056.html new file mode 100644 index 0000000000000000000000000000000000000000..b4a33834298c0607e9f4d146389a4165ec54d016 --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00056.html @@ -0,0 +1,108 @@ + + + + + + +0.9.9 API documentation: mat4x4.hpp File Reference + + + + + + + + + + +
+
+ + + + + + + +
+
0.9.9 API documentation +
+
+
+ + + + + + +
+
+ + +
+ +
+ + +
+
+
+
mat4x4.hpp File Reference
+
+
+ +

Core features +More...

+ +

Go to the source code of this file.

+

Detailed Description

+

Core features

+ +

Definition in file mat4x4.hpp.

+
+ + + + diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00056_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00056_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00057.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00057.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00057_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00057_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00058.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00058.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00058_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00058_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00059.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00059.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00059_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00059_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00060.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00060.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00060_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00060_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00061.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00061.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00061_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00061_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00062.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00062.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00062_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00062_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00063.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00063.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00063_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00063_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00064.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00064.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00064_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00064_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00065.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00065.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00065_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00065_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00066.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00066.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00066_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00066_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00067.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00067.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00067_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00067_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00068.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00068.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00068_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00068_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00069.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00069.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00069_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00069_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00070.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00070.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00070_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00070_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00071.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00071.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00071_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00071_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00072.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00072.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00072_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00072_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00073.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00073.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00073_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00073_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00074.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00074.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00074_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00074_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00075.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00075.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00075_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00075_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00076.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00076.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00076_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00076_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00077.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00077.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00077_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00077_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00078.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00078.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00078_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00078_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00079.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00079.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00079_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00079_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00080.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00080.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00080_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00080_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00081.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00081.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00081_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00081_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00082.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00082.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00082_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00082_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00083.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00083.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00083_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00083_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00084.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00084.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00084_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00084_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00085.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00085.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00085_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00085_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00086.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00086.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00086_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00086_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00087.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00087.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00087_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00087_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00088.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00088.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00088_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00088_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00089.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00089.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00089_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00089_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00090.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00090.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00090_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00090_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00091.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00091.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00091_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00091_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00092.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00092.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00092_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00092_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00093.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00093.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00093_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00093_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00094.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00094.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00094_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00094_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00095_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00095_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00096.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00096.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00096_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00096_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00097.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00097.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00097_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00097_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00098.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00098.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00098_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00098_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00099.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00099.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00099_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00099_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00100.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00100.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00100_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00100_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00101.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00101.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00101_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00101_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00102.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00102.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00102_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00102_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00103.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00103.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00103_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00103_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00104.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00104.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00104_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00104_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00105.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00105.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00105_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00105_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00106.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00106.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00106_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00106_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00107.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00107.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00107_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00107_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00108.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00108.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00108_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00108_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00109.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00109.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00109_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00109_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00110.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00110.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00110_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00110_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00111.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00111.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00111_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00111_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00112.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00112.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00112_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00112_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00113.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00113.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00113_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00113_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00114.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00114.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00114_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00114_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00115.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00115.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00115_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00115_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00116.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00116.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00116_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00116_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00117.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00117.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00117_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00117_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00118.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00118.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00118_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00118_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00119.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00119.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00119_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00119_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00120.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00120.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00120_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00120_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00121.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00121.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00121_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00121_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00122.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00122.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00122_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00122_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00123.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00123.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00123_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00123_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00124_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00124_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00125.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00125.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00125_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00125_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00126.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00126.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00126_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00126_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00127.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00127.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00127_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00127_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00128.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00128.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00128_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00128_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00129.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00129.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00129_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00129_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00130.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00130.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00130_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00130_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00131.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00131.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00131_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00131_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00132.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00132.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00132_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00132_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00133.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00133.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00133_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00133_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00134.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00134.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00134_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00134_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00135.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00135.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00135_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00135_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00136.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00136.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00136_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00136_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00137.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00137.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00137_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00137_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00138.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00138.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00138_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00138_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00139.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00139.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00139_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00139_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00140.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00140.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00140_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00140_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00141.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00141.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00141_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00141_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00142.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00142.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00142_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00142_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00143.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00143.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00143_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00143_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00144.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00144.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00144_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00144_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00145.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00145.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00145_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00145_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00146.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00146.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00146_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00146_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00147.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00147.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00147_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00147_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00148.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00148.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00148_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00148_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00149.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00149.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00149_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00149_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00150.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00150.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00150_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00150_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00151.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00151.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00151_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00151_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00152.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00152.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00152_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00152_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00153_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00153_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00154.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00154.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00154_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00154_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00155.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00155.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00155_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00155_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00156.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00156.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00156_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00156_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00157.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00157.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00157_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00157_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00158.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00158.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00158_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00158_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00159.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00159.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00159_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00159_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00160.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00160.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00160_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00160_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00161.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00161.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00161_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00161_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00162.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00162.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00162_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00162_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00163_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00163_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00164_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00164_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00165.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00165.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00165_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00165_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00166.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00166.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00166_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00166_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00167.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00167.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00167_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00167_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00168.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00168.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00168_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00168_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00169.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00169.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00169_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00169_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00170.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00170.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00170_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00170_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00171.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00171.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00171_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00171_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00172.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00172.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00172_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00172_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00173.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00173.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00173_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00173_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00174.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00174.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00174_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00174_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00175.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00175.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00175_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00175_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00176.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00176.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00176_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00176_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00177.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00177.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00177_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00177_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00178.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00178.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00178_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00178_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00179.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00179.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00179_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00179_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00180.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00180.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00180_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00180_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00181.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00181.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00181_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00181_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00182.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00182.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00182_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00182_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00183.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00183.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00183_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00183_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00184.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00184.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00184_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00184_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00185.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00185.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00185_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00185_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00186.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00186.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00186_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00186_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00187.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00187.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00187_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00187_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00188.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00188.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00188_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00188_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00189.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00189.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00189_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00189_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00190.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00190.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00190_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00190_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00191.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00191.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00191_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00191_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00192.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00192.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00192_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00192_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00193.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00193.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00193_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00193_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00194.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00194.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00194_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00194_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00195.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00195.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00195_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00195_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00196.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00196.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00196_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00196_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00197.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00197.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00197_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00197_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00198.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00198.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00198_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00198_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00199.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00199.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00199_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00199_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00200.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00200.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00200_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00200_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00201.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00201.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00201_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00201_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00202.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00202.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00202_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00202_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00203.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00203.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00203_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00203_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00204.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00204.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00204_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00204_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00205.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00205.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00205_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00205_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00206.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00206.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00206_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00206_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00207.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00207.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00207_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00207_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00208.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00208.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00208_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00208_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00209.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00209.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00209_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00209_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00210.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00210.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00210_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00210_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00211.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00211.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00211_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00211_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00212.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00212.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00212_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00212_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00213.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00213.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00213_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00213_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00214.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00214.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00214_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00214_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00215.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00215.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00215_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00215_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00216.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00216.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00216_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00216_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00217.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00217.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00217_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00217_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00218.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00218.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00218_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00218_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00219.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00219.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00219_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00219_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00220.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00220.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00220_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00220_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00221.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00221.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00221_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00221_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00222.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00222.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00222_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00222_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00223.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00223.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00223_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00223_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00224.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00224.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00224_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00224_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00225.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00225.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00225_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00225_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00226.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00226.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00226_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00226_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00227.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00227.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00227_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00227_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00228.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00228.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00228_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00228_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00229.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00229.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00229_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00229_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00230.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00230.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00230_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00230_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00231.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00231.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00231_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00231_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00232.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00232.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00232_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00232_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00233.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00233.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00233_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00233_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00234.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00234.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00234_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00234_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00235.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00235.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00235_source.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00235_source.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00241.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00241.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00242.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00242.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00243.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00243.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00244.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00244.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00245.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00245.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00246.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00246.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00247.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00247.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00248.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00248.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00249.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00249.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00250.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00250.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00251.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00251.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00252.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00252.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00253.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00253.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00254.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00254.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00255.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00255.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00256.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00256.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00257.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00257.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00258.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00258.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00259.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00259.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00260.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00260.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00261.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00261.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00262.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00262.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00263.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00263.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00264.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00264.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00265.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00265.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00266.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00266.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00267.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00267.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00268.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00268.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00269.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00269.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00270.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00270.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00271.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00271.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00272.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00272.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00273.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00273.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00274.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00274.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00275.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00275.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00276.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00276.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00277.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00277.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00278.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00278.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00279.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00279.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00280.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00280.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00281.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00281.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00282.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00282.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00283.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00283.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00284.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00284.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00285.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00285.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00286.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00286.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00287.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00287.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00288.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00288.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00289.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00289.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00290.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00290.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00291.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00291.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00292.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00292.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00293.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00293.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00294.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00294.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00295.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00295.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00296.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00296.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00297.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00297.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00298.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00298.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00299.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00299.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00300.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00300.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00301.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00301.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00302.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00302.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00303.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00303.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00304.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00304.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00305.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00305.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00306.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00306.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00307.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00307.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00308.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00308.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00309.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00309.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00310.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00310.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00311.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00311.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00312.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00312.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00313.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00313.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00314.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00314.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00315.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00315.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00316.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00316.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00317.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00317.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00318.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00318.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00319.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00319.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00320.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00320.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00321.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00321.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00322.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00322.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00323.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00323.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00324.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00324.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00325.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00325.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00326.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00326.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00327.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00327.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00328.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00328.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00329.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00329.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00330.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00330.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00331.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00331.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00332.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00332.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00333.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00333.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00334.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00334.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00335.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00335.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00336.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00336.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00337.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00337.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00338.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00338.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00339.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00339.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00340.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00340.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00341.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00341.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00342.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00342.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00343.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00343.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00344.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00344.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00345.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00345.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00346.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00346.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00347.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00347.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00348.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00348.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00349.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00349.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00350.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00350.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00351.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00351.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00352.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00352.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00353.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00353.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00354.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00354.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00355.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00355.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00356.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00356.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00357.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00357.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00358.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00358.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00359.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00359.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00360.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00360.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00361.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00361.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00362.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00362.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00363.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00363.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00364.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00364.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00365.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00365.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00366.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00366.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00367.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00367.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00368.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00368.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00369.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00369.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00370.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00370.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00371.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00371.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00372.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00372.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00373.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00373.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00374.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/a00374.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/arrowdown.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/arrowdown.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/arrowright.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/arrowright.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/bc_s.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/bc_s.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/bdwn.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/bdwn.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/closed.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/closed.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_033f5edb0915b828d2c46ed4804e5503.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_033f5edb0915b828d2c46ed4804e5503.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_3a581ba30d25676e4b797b1f96d53b45.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_3a581ba30d25676e4b797b1f96d53b45.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_44e5e654415abd9ca6fdeaddaff8565e.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_44e5e654415abd9ca6fdeaddaff8565e.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_4c6bd29c73fa4e5a2509e1c15f846751.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_4c6bd29c73fa4e5a2509e1c15f846751.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_5189610d3ba09ec39b766fb99b34cd93.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_5189610d3ba09ec39b766fb99b34cd93.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_6b66465792d005310484819a0eb0b0d3.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_6b66465792d005310484819a0eb0b0d3.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_9e5fe034a00e89334fd5186c3e7db156.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_9e5fe034a00e89334fd5186c3e7db156.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_a8bee7be44182a33f3820393ae0b105d.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_a8bee7be44182a33f3820393ae0b105d.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_cef2d71d502cb69a9252bca2297d9549.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_cef2d71d502cb69a9252bca2297d9549.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_d9496f0844b48bc7e53b5af8c99b9ab2.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_d9496f0844b48bc7e53b5af8c99b9ab2.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_f35778ec600a1b9bbc4524e62e226aa2.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dir_f35778ec600a1b9bbc4524e62e226aa2.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/doc.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/doc.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/doxygen.css b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/doxygen.css new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/doxygen.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/doxygen.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dynsections.js b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/dynsections.js new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/files.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/files.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/folderclosed.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/folderclosed.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/folderopen.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/folderopen.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/index.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/index.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/jquery.js b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/jquery.js new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/logo-mini.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/logo-mini.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/modules.html b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/modules.html new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/nav_f.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/nav_f.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/nav_g.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/nav_g.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/nav_h.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/nav_h.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/open.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/open.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/splitbar.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/splitbar.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/sync_off.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/sync_off.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/sync_on.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/sync_on.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_a.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_a.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_b.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_b.png new file mode 100644 index 0000000000000000000000000000000000000000..e14114dc75fef8984382122e778c4a0948dfcd6d Binary files /dev/null and b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_b.png differ diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_h.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_h.png new file mode 100644 index 0000000000000000000000000000000000000000..eddb3f2d6ece97516cf389f7fe69ea063b04e0a3 Binary files /dev/null and b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_h.png differ diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_s.png b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_s.png new file mode 100644 index 0000000000000000000000000000000000000000..8d36eef701f28f3037288ac442aa5c51ea79ed0d Binary files /dev/null and b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tab_s.png differ diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tabs.css b/submodules/diff-gaussian-rasterization/third_party/glm/doc/api/tabs.css new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/submodules/diff-gaussian-rasterization/third_party/glm/doc/man.doxy b/submodules/diff-gaussian-rasterization/third_party/glm/doc/man.doxy new file mode 100644 index 0000000000000000000000000000000000000000..8eab2f641776daa6068ee575d8f5473244fd49ec --- /dev/null +++ b/submodules/diff-gaussian-rasterization/third_party/glm/doc/man.doxy @@ -0,0 +1,2415 @@ +# Doxyfile 1.8.10 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "0.9.9 API documentation" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = theme/logo-mini.png + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = . + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class " \ + "The $name widget " \ + "The $name file " \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = "C:/Documents and Settings/Groove/ " + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = YES + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = NO + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = YES + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = YES + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = YES + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = YES + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = NO + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = NO + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = YES + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = YES + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = ../glm \ + . + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, +# *.vhdl, *.ucf, *.qsf, *.as and *.js. + +FILE_PATTERNS = *.hpp \ + *.doxy + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# compiled with the --with-libclang option. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = NO + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://www.mathjax.org/mathjax + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /