file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
NVIDIA/warp/warp/examples/core/example_mesh_intersect.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ############################################################################# # Example Mesh Intersection # # Show how to use built-in BVH query to test if two triangle meshes intersect. # ############################################################################## import os import numpy as np from pxr import Usd, UsdGeom import warp as wp import warp.examples import warp.render @wp.func def cw_min(a: wp.vec3, b: wp.vec3): return wp.vec3(wp.min(a[0], b[0]), wp.min(a[1], b[1]), wp.min(a[2], b[2])) @wp.func def cw_max(a: wp.vec3, b: wp.vec3): return wp.vec3(wp.max(a[0], b[0]), wp.max(a[1], b[1]), wp.max(a[2], b[2])) @wp.kernel def intersect( mesh_0: wp.uint64, mesh_1: wp.uint64, num_faces: int, xforms: wp.array(dtype=wp.transform), result: wp.array(dtype=int), ): tid = wp.tid() # mesh_0 is assumed to be the query mesh, we launch one thread # for each face in mesh_0 and test it against the opposing mesh's BVH face = tid % num_faces batch = tid // num_faces # transforms from mesh_0 -> mesh_1 space xform = xforms[batch] # load query triangles points and transform to mesh_1's space v0 = wp.transform_point(xform, wp.mesh_eval_position(mesh_0, face, 1.0, 0.0)) v1 = wp.transform_point(xform, wp.mesh_eval_position(mesh_0, face, 0.0, 1.0)) v2 = wp.transform_point(xform, wp.mesh_eval_position(mesh_0, face, 0.0, 0.0)) # compute bounds of the query triangle lower = cw_min(cw_min(v0, v1), v2) upper = cw_max(cw_max(v0, v1), v2) query = wp.mesh_query_aabb(mesh_1, lower, upper) for f in query: u0 = wp.mesh_eval_position(mesh_1, f, 1.0, 0.0) u1 = wp.mesh_eval_position(mesh_1, f, 0.0, 1.0) u2 = wp.mesh_eval_position(mesh_1, f, 0.0, 0.0) # test for triangle intersection i = wp.intersect_tri_tri(v0, v1, v2, u0, u1, u2) if i > 0: result[batch] = 1 return # use if you want to count all intersections # wp.atomic_add(result, batch, i) class Example: def __init__(self, stage_path="example_mesh_intersect.usd"): rng = np.random.default_rng(42) self.query_count = 1024 self.has_queried = False self.path_0 = os.path.join(warp.examples.get_asset_directory(), "cube.usd") self.path_1 = os.path.join(warp.examples.get_asset_directory(), "sphere.usd") self.mesh_0 = self.load_mesh(self.path_0, "/root/cube") self.mesh_1 = self.load_mesh(self.path_1, "/root/sphere") self.query_num_faces = int(len(self.mesh_0.indices) / 3) self.query_num_points = len(self.mesh_0.points) # generate random relative transforms self.xforms = [] for _ in range(self.query_count): # random offset p = wp.vec3(rng.random(3) * 0.5 - 0.5) * 5.0 # random orientation axis = wp.normalize(wp.vec3(rng.random(3) * 0.5 - 0.5)) angle = float(np.random.rand(1)[0]) q = wp.quat_from_axis_angle(wp.normalize(axis), angle) self.xforms.append(wp.transform(p, q)) self.array_result = wp.zeros(self.query_count, dtype=int) self.array_xforms = wp.array(self.xforms, dtype=wp.transform) # renderer if stage_path: self.renderer = wp.render.UsdRenderer(stage_path) else: self.renderer = None def step(self): with wp.ScopedTimer("step"): wp.launch( kernel=intersect, dim=self.query_num_faces * self.query_count, inputs=[self.mesh_0.id, self.mesh_1.id, self.query_num_faces, self.array_xforms, self.array_result], ) def render(self): if self.renderer is None: return # bring results back to host result = self.array_result.numpy() with wp.ScopedTimer("render", active=True): self.renderer.begin_frame(0.0) for i in range(self.query_count): spacing = 8.0 offset = i * spacing xform = self.xforms[i] self.renderer.render_ref( f"mesh_{i}_0", self.path_0, pos=wp.vec3(xform.p[0] + offset, xform.p[1], xform.p[2]), rot=xform.q, scale=wp.vec3(1.0, 1.0, 1.0), ) self.renderer.render_ref( f"mesh_{i}_1", self.path_1, pos=wp.vec3(offset, 0.0, 0.0), rot=wp.quat_identity(), scale=wp.vec3(1.0, 1.0, 1.0), ) # if pair intersects then draw a small box above the pair if result[i] > 0: self.renderer.render_box( f"result_{i}", pos=wp.vec3(xform.p[0] + offset, xform.p[1] + 5.0, xform.p[2]), rot=wp.quat_identity(), extents=(0.1, 0.1, 0.1), ) self.renderer.end_frame() # create collision meshes def load_mesh(self, path, prim): usd_stage = Usd.Stage.Open(path) usd_geom = UsdGeom.Mesh(usd_stage.GetPrimAtPath(prim)) mesh = wp.Mesh( points=wp.array(usd_geom.GetPointsAttr().Get(), dtype=wp.vec3), indices=wp.array(usd_geom.GetFaceVertexIndicesAttr().Get(), dtype=int), ) return mesh if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument( "--stage_path", type=lambda x: None if x == "None" else str(x), default="example_mesh_intersect.usd", help="Path to the output USD file.", ) args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(stage_path=args.stage_path) example.step() example.render() if example.renderer: example.renderer.save()
6,637
Python
31.539216
116
0.558686
NVIDIA/warp/warp/examples/core/example_fluid.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Fluid # # Shows how to implement a simple 2D Stable Fluids solver using # multidimensional arrays and launches. # ########################################################################### import math import warp as wp import warp.render grid_width = wp.constant(256) grid_height = wp.constant(128) @wp.func def lookup_float(f: wp.array2d(dtype=float), x: int, y: int): x = wp.clamp(x, 0, grid_width - 1) y = wp.clamp(y, 0, grid_height - 1) return f[x, y] @wp.func def sample_float(f: wp.array2d(dtype=float), x: float, y: float): lx = int(wp.floor(x)) ly = int(wp.floor(y)) tx = x - float(lx) ty = y - float(ly) s0 = wp.lerp(lookup_float(f, lx, ly), lookup_float(f, lx + 1, ly), tx) s1 = wp.lerp(lookup_float(f, lx, ly + 1), lookup_float(f, lx + 1, ly + 1), tx) s = wp.lerp(s0, s1, ty) return s @wp.func def lookup_vel(f: wp.array2d(dtype=wp.vec2), x: int, y: int): if x < 0 or x >= grid_width: return wp.vec2() if y < 0 or y >= grid_height: return wp.vec2() return f[x, y] @wp.func def sample_vel(f: wp.array2d(dtype=wp.vec2), x: float, y: float): lx = int(wp.floor(x)) ly = int(wp.floor(y)) tx = x - float(lx) ty = y - float(ly) s0 = wp.lerp(lookup_vel(f, lx, ly), lookup_vel(f, lx + 1, ly), tx) s1 = wp.lerp(lookup_vel(f, lx, ly + 1), lookup_vel(f, lx + 1, ly + 1), tx) s = wp.lerp(s0, s1, ty) return s @wp.kernel def advect( u0: wp.array2d(dtype=wp.vec2), u1: wp.array2d(dtype=wp.vec2), rho0: wp.array2d(dtype=float), rho1: wp.array2d(dtype=float), dt: float, ): i, j = wp.tid() u = u0[i, j] # trace backward p = wp.vec2(float(i), float(j)) p = p - u * dt # advect u1[i, j] = sample_vel(u0, p[0], p[1]) rho1[i, j] = sample_float(rho0, p[0], p[1]) @wp.kernel def divergence(u: wp.array2d(dtype=wp.vec2), div: wp.array2d(dtype=float)): i, j = wp.tid() if i == grid_width - 1: return if j == grid_height - 1: return dx = (u[i + 1, j][0] - u[i, j][0]) * 0.5 dy = (u[i, j + 1][1] - u[i, j][1]) * 0.5 div[i, j] = dx + dy @wp.kernel def pressure_solve(p0: wp.array2d(dtype=float), p1: wp.array2d(dtype=float), div: wp.array2d(dtype=float)): i, j = wp.tid() s1 = lookup_float(p0, i - 1, j) s2 = lookup_float(p0, i + 1, j) s3 = lookup_float(p0, i, j - 1) s4 = lookup_float(p0, i, j + 1) # Jacobi update err = s1 + s2 + s3 + s4 - div[i, j] p1[i, j] = err * 0.25 @wp.kernel def pressure_apply(p: wp.array2d(dtype=float), u: wp.array2d(dtype=wp.vec2)): i, j = wp.tid() if i == 0 or i == grid_width - 1: return if j == 0 or j == grid_height - 1: return # pressure gradient f_p = wp.vec2(p[i + 1, j] - p[i - 1, j], p[i, j + 1] - p[i, j - 1]) * 0.5 u[i, j] = u[i, j] - f_p @wp.kernel def integrate(u: wp.array2d(dtype=wp.vec2), rho: wp.array2d(dtype=float), dt: float): i, j = wp.tid() # gravity f_g = wp.vec2(-90.8, 0.0) * rho[i, j] # integrate u[i, j] = u[i, j] + dt * f_g # fade rho[i, j] = rho[i, j] * (1.0 - 0.1 * dt) @wp.kernel def init(rho: wp.array2d(dtype=float), u: wp.array2d(dtype=wp.vec2), radius: int, dir: wp.vec2): i, j = wp.tid() d = wp.length(wp.vec2(float(i - grid_width / 2), float(j - grid_height / 2))) if d < radius: rho[i, j] = 1.0 u[i, j] = dir class Example: def __init__(self): fps = 60 self.frame_dt = 1.0 / fps self.sim_substeps = 2 self.iterations = 100 # Number of pressure iterations self.sim_dt = self.frame_dt / self.sim_substeps self.sim_time = 0.0 shape = (grid_width, grid_height) self.u0 = wp.zeros(shape, dtype=wp.vec2) self.u1 = wp.zeros(shape, dtype=wp.vec2) self.rho0 = wp.zeros(shape, dtype=float) self.rho1 = wp.zeros(shape, dtype=float) self.p0 = wp.zeros(shape, dtype=float) self.p1 = wp.zeros(shape, dtype=float) self.div = wp.zeros(shape, dtype=float) # capture pressure solve as a CUDA graph self.use_cuda_graph = wp.get_device().is_cuda if self.use_cuda_graph: with wp.ScopedCapture() as capture: self.pressure_iterations() self.graph = capture.graph def step(self): with wp.ScopedTimer("step"): for _ in range(self.sim_substeps): shape = (grid_width, grid_height) dt = self.sim_dt speed = 400.0 angle = math.sin(self.sim_time * 4.0) * 1.5 vel = wp.vec2(math.cos(angle) * speed, math.sin(angle) * speed) # update emitters wp.launch(init, dim=shape, inputs=[self.rho0, self.u0, 5, vel]) # force integrate wp.launch(integrate, dim=shape, inputs=[self.u0, self.rho0, dt]) wp.launch(divergence, dim=shape, inputs=[self.u0, self.div]) # pressure solve self.p0.zero_() self.p1.zero_() if self.use_cuda_graph: wp.capture_launch(self.graph) else: self.pressure_iterations() # velocity update wp.launch(pressure_apply, dim=shape, inputs=[self.p0, self.u0]) # semi-Lagrangian advection wp.launch(advect, dim=shape, inputs=[self.u0, self.u1, self.rho0, self.rho1, dt]) # swap buffers (self.u0, self.u1) = (self.u1, self.u0) (self.rho0, self.rho1) = (self.rho1, self.rho0) self.sim_time += dt def pressure_iterations(self): for _ in range(self.iterations): wp.launch(pressure_solve, dim=self.p0.shape, inputs=[self.p0, self.p1, self.div]) # swap pressure fields (self.p0, self.p1) = (self.p1, self.p0) def step_and_render_frame(self, frame_num=None, img=None): self.step() with wp.ScopedTimer("render"): if img: img.set_array(self.rho0.numpy()) return (img,) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument("--num_frames", type=int, default=100000, help="Total number of frames.") parser.add_argument( "--headless", action="store_true", help="Run in headless mode, suppressing the opening of any graphical windows.", ) args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example() if args.headless: for _ in range(args.num_frames): example.step() else: import matplotlib import matplotlib.animation as anim import matplotlib.pyplot as plt fig = plt.figure() img = plt.imshow( example.rho0.numpy(), origin="lower", animated=True, interpolation="antialiased", ) img.set_norm(matplotlib.colors.Normalize(0.0, 1.0)) seq = anim.FuncAnimation( fig, example.step_and_render_frame, fargs=(img,), frames=args.num_frames, blit=True, interval=8, repeat=False, ) plt.show()
8,156
Python
26.934931
107
0.539235
NVIDIA/warp/warp/examples/core/example_graph_capture.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Graph Capture # # Shows how to implement CUDA graph capture using wp.ScopedCapture(). # ########################################################################### import numpy as np import warp as wp @wp.kernel def fbm( kernel_seed: int, frequency: float, amplitude: float, x: wp.array(dtype=float), y: wp.array(dtype=float), z: wp.array2d(dtype=float), ): i, j = wp.tid() state = wp.rand_init(kernel_seed) p = frequency * wp.vec2(x[j], y[i]) n = amplitude * wp.noise(state, p) z[i, j] += n @wp.kernel def slide(x: wp.array(dtype=float), shift: float): tid = wp.tid() x[tid] += shift class Example: def __init__(self): self.width = 128 self.height = 128 min_x, max_x = 0.0, 2.0 min_y, max_y = 0.0, 2.0 # create a grid of pixels x = np.linspace(min_x, max_x, self.width) y = np.linspace(min_y, max_y, self.height) self.x = wp.array(x, dtype=float) self.y = wp.array(y, dtype=float) self.pixel_values = wp.zeros((self.width, self.height), dtype=float) self.seed = 42 self.shift = 2e-2 self.frequency = 1.0 self.amplitude = 1.0 # use graph capture if launching from a CUDA-capable device self.use_cuda_graph = wp.get_device().is_cuda if self.use_cuda_graph: # record launches with wp.ScopedCapture() as capture: self.fbm() self.graph = capture.graph def fbm(self): for _ in range(16): wp.launch( kernel=fbm, dim=(self.height, self.width), inputs=[self.seed, self.frequency, self.amplitude, self.x, self.y], outputs=[self.pixel_values], ) self.frequency *= 2.0 self.amplitude *= 0.5 def step(self): self.pixel_values.zero_() self.frequency = 1.0 self.amplitude = 1.0 with wp.ScopedTimer("step", active=True): wp.launch(kernel=slide, dim=self.width, inputs=[self.x, self.shift]) if self.use_cuda_graph: wp.capture_launch(self.graph) else: # cpu path self.fbm() def step_and_render(self, frame_num=None, img=None): self.step() with wp.ScopedTimer("render"): if img: pixels = self.pixel_values.numpy() pixels = (pixels + 1.0) / 2.0 img.set_array(pixels) return (img,) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument("--num_frames", type=int, default=1000, help="Total number of frames.") parser.add_argument( "--headless", action="store_true", help="Run in headless mode, suppressing the opening of any graphical windows.", ) args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example() if not args.headless: import matplotlib.colors import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation # Create the animation fig = plt.figure() img = plt.imshow(example.pixel_values.numpy(), "gray", origin="lower", animated=True) img.set_norm(matplotlib.colors.Normalize(0.0, 1.0)) ani = FuncAnimation(fig, example.step_and_render, fargs=(img,), frames=1000, interval=30) # Display the animation plt.show() else: for _ in range(args.num_frames): example.step()
4,328
Python
29.272727
101
0.567699
NVIDIA/warp/warp/examples/core/example_raycast.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ############################################################################# # Example Ray Cast # # Shows how to use the built-in wp.Mesh data structure and wp.mesh_query_ray() # function to implement a basic ray-tracer. # ############################################################################## import os import numpy as np from pxr import Usd, UsdGeom import warp as wp import warp.examples @wp.kernel def draw(mesh: wp.uint64, cam_pos: wp.vec3, width: int, height: int, pixels: wp.array(dtype=wp.vec3)): tid = wp.tid() x = tid % width y = tid // width sx = 2.0 * float(x) / float(height) - 1.0 sy = 2.0 * float(y) / float(height) - 1.0 # compute view ray ro = cam_pos rd = wp.normalize(wp.vec3(sx, sy, -1.0)) color = wp.vec3(0.0, 0.0, 0.0) query = wp.mesh_query_ray(mesh, ro, rd, 1.0e6) if query.result: color = query.normal * 0.5 + wp.vec3(0.5, 0.5, 0.5) pixels[tid] = color class Example: def __init__(self, height=1024, width=1024): self.height = height self.width = width self.cam_pos = (0.0, 1.0, 2.0) asset_stage = Usd.Stage.Open(os.path.join(warp.examples.get_asset_directory(), "bunny.usd")) mesh_geom = UsdGeom.Mesh(asset_stage.GetPrimAtPath("/root/bunny")) points = np.array(mesh_geom.GetPointsAttr().Get()) indices = np.array(mesh_geom.GetFaceVertexIndicesAttr().Get()) self.pixels = wp.zeros(self.width * self.height, dtype=wp.vec3) # create wp mesh self.mesh = wp.Mesh( points=wp.array(points, dtype=wp.vec3), velocities=None, indices=wp.array(indices, dtype=int) ) def render(self): with wp.ScopedTimer("render"): wp.launch( kernel=draw, dim=self.width * self.height, inputs=[self.mesh.id, self.cam_pos, self.width, self.height, self.pixels], ) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument("--width", type=int, default=1024, help="Output image width in pixels.") parser.add_argument("--height", type=int, default=1024, help="Output image height in pixels.") parser.add_argument( "--headless", action="store_true", help="Run in headless mode, suppressing the opening of any graphical windows.", ) args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(height=args.height, width=args.width) example.render() if not args.headless: import matplotlib.pyplot as plt plt.imshow( example.pixels.numpy().reshape((example.height, example.width, 3)), origin="lower", interpolation="antialiased", ) plt.show()
3,434
Python
32.028846
105
0.605416
NVIDIA/warp/warp/examples/core/example_mesh.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Mesh # # Shows how to implement a PBD particle simulation with collision against # a deforming triangle mesh. The mesh collision uses wp.mesh_query_point_sign_normal() # to compute the closest point, and wp.Mesh.refit() to update the mesh # object after deformation. # ########################################################################### import os import numpy as np from pxr import Usd, UsdGeom import warp as wp import warp.examples import warp.render @wp.kernel def deform(positions: wp.array(dtype=wp.vec3), t: float): tid = wp.tid() x = positions[tid] offset = -wp.sin(x[0]) * 0.02 scale = wp.sin(t) x = x + wp.vec3(0.0, offset * scale, 0.0) positions[tid] = x @wp.kernel def simulate( positions: wp.array(dtype=wp.vec3), velocities: wp.array(dtype=wp.vec3), mesh: wp.uint64, margin: float, dt: float, ): tid = wp.tid() x = positions[tid] v = velocities[tid] v = v + wp.vec3(0.0, 0.0 - 9.8, 0.0) * dt - v * 0.1 * dt xpred = x + v * dt max_dist = 1.5 query = wp.mesh_query_point_sign_normal(mesh, xpred, max_dist) if query.result: p = wp.mesh_eval_position(mesh, query.face, query.u, query.v) delta = xpred - p dist = wp.length(delta) * query.sign err = dist - margin # mesh collision if err < 0.0: n = wp.normalize(delta) * query.sign xpred = xpred - n * err # pbd update v = (xpred - x) * (1.0 / dt) x = xpred positions[tid] = x velocities[tid] = v class Example: def __init__(self, stage_path="example_mesh.usd"): rng = np.random.default_rng(42) self.num_particles = 1000 self.sim_dt = 1.0 / 60.0 self.sim_time = 0.0 self.sim_timers = {} self.sim_margin = 0.1 usd_stage = Usd.Stage.Open(os.path.join(warp.examples.get_asset_directory(), "bunny.usd")) usd_geom = UsdGeom.Mesh(usd_stage.GetPrimAtPath("/root/bunny")) usd_scale = 10.0 # create collision mesh self.mesh = wp.Mesh( points=wp.array(usd_geom.GetPointsAttr().Get() * usd_scale, dtype=wp.vec3), indices=wp.array(usd_geom.GetFaceVertexIndicesAttr().Get(), dtype=int), ) # random particles init_pos = (rng.random((self.num_particles, 3)) - np.array([0.5, -1.5, 0.5])) * 10.0 init_vel = rng.random((self.num_particles, 3)) * 0.0 self.positions = wp.from_numpy(init_pos, dtype=wp.vec3) self.velocities = wp.from_numpy(init_vel, dtype=wp.vec3) # renderer self.renderer = None if stage_path: self.renderer = wp.render.UsdRenderer(stage_path) def step(self): with wp.ScopedTimer("step", dict=self.sim_timers): wp.launch(kernel=deform, dim=len(self.mesh.points), inputs=[self.mesh.points, self.sim_time]) # refit the mesh BVH to account for the deformation self.mesh.refit() wp.launch( kernel=simulate, dim=self.num_particles, inputs=[self.positions, self.velocities, self.mesh.id, self.sim_margin, self.sim_dt], ) self.sim_time += self.sim_dt def render(self): if self.renderer is None: return with wp.ScopedTimer("render"): self.renderer.begin_frame(self.sim_time) self.renderer.render_mesh( name="mesh", points=self.mesh.points.numpy(), indices=self.mesh.indices.numpy(), colors=((0.35, 0.55, 0.9),) * len(self.mesh.points), ) self.renderer.render_points( name="points", points=self.positions.numpy(), radius=self.sim_margin, colors=(0.8, 0.3, 0.2) ) self.renderer.end_frame() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument( "--stage_path", type=lambda x: None if x == "None" else str(x), default="example_mesh.usd", help="Path to the output USD file.", ) parser.add_argument("--num_frames", type=int, default=500, help="Total number of frames.") args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(stage_path=args.stage_path) for _ in range(args.num_frames): example.step() example.render() if example.renderer: example.renderer.save()
5,205
Python
29.092485
108
0.58463
NVIDIA/warp/warp/examples/core/example_marching_cubes.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Marching Cubes # # Shows how use the built-in marching cubes functionality to extract # the iso-surface from a density field. # # Note: requires a CUDA-capable device ########################################################################### import warp as wp import warp.render @wp.func def sdf_create_box(pos: wp.vec3, size: wp.vec3): """Creates a SDF box primitive.""" # https://iquilezles.org/articles/distfunctions q = wp.vec3( wp.abs(pos[0]) - size[0], wp.abs(pos[1]) - size[1], wp.abs(pos[2]) - size[2], ) qp = wp.vec3(wp.max(q[0], 0.0), wp.max(q[1], 0.0), wp.max(q[2], 0.0)) return wp.length(qp) + wp.min(wp.max(q[0], wp.max(q[1], q[2])), 0.0) @wp.func def sdf_create_torus(pos: wp.vec3, major_radius: float, minor_radius: float): """Creates a SDF torus primitive.""" # https://iquilezles.org/articles/distfunctions q = wp.vec2(wp.length(wp.vec2(pos[0], pos[2])) - major_radius, pos[1]) return wp.length(q) - minor_radius @wp.func def sdf_translate(pos: wp.vec3, offset: wp.vec3): """Translates a SDF position vector with an offset.""" return pos - offset @wp.func def sdf_rotate(pos: wp.vec3, angles: wp.vec3): """Rotates a SDF position vector using Euler angles.""" rot = wp.quat_rpy( wp.radians(angles[0]), wp.radians(angles[1]), wp.radians(angles[2]), ) return wp.quat_rotate_inv(rot, pos) @wp.func def sdf_smooth_min(a: float, b: float, radius: float): """Creates a SDF torus primitive.""" # https://iquilezles.org/articles/smin h = wp.max(radius - wp.abs(a - b), 0.0) / radius return wp.min(a, b) - h * h * h * radius * (1.0 / 6.0) @wp.kernel(enable_backward=False) def make_field( torus_altitude: float, torus_major_radius: float, torus_minor_radius: float, smooth_min_radius: float, dim: int, time: float, out_data: wp.array3d(dtype=float), ): """Kernel to generate a SDF volume based on primitives.""" i, j, k = wp.tid() # Retrieve the position of the current cell in a normalized [-1, 1] range # for each dimension. pos = wp.vec3( 2.0 * ((float(i) + 0.5) / float(dim)) - 1.0, 2.0 * ((float(j) + 0.5) / float(dim)) - 1.0, 2.0 * ((float(k) + 0.5) / float(dim)) - 1.0, ) box = sdf_create_box( sdf_translate(pos, wp.vec3(0.0, -0.7, 0.0)), wp.vec3(0.9, 0.3, 0.9), ) torus = sdf_create_torus( sdf_rotate( sdf_translate(pos, wp.vec3(0.0, torus_altitude, 0.0)), wp.vec3(wp.sin(time) * 90.0, wp.cos(time) * 45.0, 0.0), ), torus_major_radius, torus_minor_radius, ) out_data[i, j, k] = sdf_smooth_min(box, torus, smooth_min_radius) class Example: def __init__(self, stage_path="example_marching_cubes.usd", verbose=False): self.verbose = verbose self.dim = 64 self.max_verts = int(1e6) self.max_tris = int(1e6) self.torus_altitude = -0.5 self.torus_major_radius = 0.5 self.torus_minor_radius = 0.1 self.smooth_min_radius = 0.5 self.fps = 60 self.frame = 0 self.field = wp.zeros((self.dim, self.dim, self.dim), dtype=float) self.mc = wp.MarchingCubes(self.dim, self.dim, self.dim, self.max_verts, self.max_tris) self.renderer = None if stage_path: self.renderer = wp.render.UsdRenderer(stage_path) def step(self): with wp.ScopedTimer("step"): with wp.ScopedTimer("Update Field", active=self.verbose): wp.launch( make_field, dim=self.field.shape, inputs=( self.torus_altitude, self.torus_major_radius, self.torus_minor_radius, self.smooth_min_radius, self.dim, self.frame / self.fps, ), outputs=(self.field,), ) with wp.ScopedTimer("Surface Extraction", active=self.verbose): self.mc.surface(self.field, 0.0) def render(self): if self.renderer is None: return with wp.ScopedTimer("Render"): self.renderer.begin_frame(self.frame / self.fps) self.renderer.render_mesh( "surface", self.mc.verts.numpy(), self.mc.indices.numpy(), colors=((0.35, 0.55, 0.9),) * len(self.mc.verts), update_topology=True, ) self.renderer.end_frame() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument( "--stage_path", type=lambda x: None if x == "None" else str(x), default="example_marching_cubes.usd", help="Path to the output USD file.", ) parser.add_argument("--num_frames", type=int, default=240, help="Total number of frames.") parser.add_argument("--verbose", action="store_true", help="Print out additional status messages during execution.") args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(stage_path=args.stage_path, verbose=args.verbose) for _ in range(args.num_frames): example.step() example.render() example.frame += 1 if example.renderer is not None: example.renderer.save()
6,206
Python
32.192513
120
0.569449
NVIDIA/warp/warp/examples/core/example_torch.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Torch # # Optimizes the Rosenbrock function using the PyTorch Adam optimizer # The Rosenbrock function is a non-convex function, and is often used # to test optimization algorithms. The function is defined as: # f(x, y) = (a - x)^2 + b * (y - x^2)^2 # where a = 1 and b = 100. The minimum value of the function is 0 at (1, 1). # # The example demonstrates how to set up a torch.autograd.Function to # incorporate Warp kernel launches within a PyTorch graph. ########################################################################### import numpy as np import torch import warp as wp pvec2 = wp.types.vector(length=2, dtype=wp.float32) # Define the Rosenbrock function @wp.func def rosenbrock(x: float, y: float): return (1.0 - x) ** 2.0 + 100.0 * (y - x**2.0) ** 2.0 @wp.kernel def eval_rosenbrock( xs: wp.array(dtype=pvec2), # outputs z: wp.array(dtype=float), ): i = wp.tid() x = xs[i] z[i] = rosenbrock(x[0], x[1]) class Rosenbrock(torch.autograd.Function): @staticmethod def forward(ctx, xy, num_particles): ctx.xy = wp.from_torch(xy, dtype=pvec2, requires_grad=True) ctx.num_particles = num_particles # allocate output ctx.z = wp.zeros(num_particles, requires_grad=True) wp.launch(kernel=eval_rosenbrock, dim=ctx.num_particles, inputs=[ctx.xy], outputs=[ctx.z]) return wp.to_torch(ctx.z) @staticmethod def backward(ctx, adj_z): # map incoming Torch grads to our output variables ctx.z.grad = wp.from_torch(adj_z) wp.launch( kernel=eval_rosenbrock, dim=ctx.num_particles, inputs=[ctx.xy], outputs=[ctx.z], adj_inputs=[ctx.xy.grad], adj_outputs=[ctx.z.grad], adjoint=True, ) # return adjoint w.r.t. inputs return (wp.to_torch(ctx.xy.grad), None) class Example: def __init__(self, headless=False, train_iters=10): self.num_particles = 1500 self.train_iters = train_iters self.frame = 0 self.learning_rate = 5e-2 self.torch_device = wp.device_to_torch(wp.get_device()) rng = np.random.default_rng(42) self.xy = torch.tensor( rng.normal(size=(self.num_particles, 2)), dtype=torch.float32, requires_grad=True, device=self.torch_device ) self.xp_np = self.xy.numpy(force=True) self.opt = torch.optim.Adam([self.xy], lr=self.learning_rate) if headless: self.scatter_plot = None self.mean_marker = None else: self.scatter_plot = self.create_plot() self.mean_pos = np.empty((2,)) def create_plot(self): import matplotlib.pyplot as plt min_x, max_x = -2.0, 2.0 min_y, max_y = -2.0, 2.0 # Create a grid of points x = np.linspace(min_x, max_x, 100) y = np.linspace(min_y, max_y, 100) X, Y = np.meshgrid(x, y) xy = np.column_stack((X.flatten(), Y.flatten())) N = len(xy) xy = wp.array(xy, dtype=pvec2) Z = wp.empty(N, dtype=wp.float32) wp.launch(eval_rosenbrock, dim=N, inputs=[xy], outputs=[Z]) Z = Z.numpy().reshape(X.shape) # Plot the function as a heatmap self.fig = plt.figure(figsize=(6, 6)) ax = plt.gca() plt.imshow( Z, extent=[min_x, max_x, min_y, max_y], origin="lower", interpolation="bicubic", cmap="coolwarm", ) plt.contour( X, Y, Z, extent=[min_x, max_x, min_y, max_y], levels=150, colors="k", alpha=0.5, linewidths=0.5, ) # Plot optimum plt.plot(1, 1, "*", color="r", markersize=10) plt.title("Rosenbrock function") plt.xlabel("x") plt.ylabel("y") (self.mean_marker,) = ax.plot([], [], "o", color="w", markersize=5) # Create a scatter plot (initially empty) return ax.scatter([], [], c="k", s=2) def forward(self): self.z = Rosenbrock.apply(self.xy, self.num_particles) def step(self): self.opt.zero_grad() self.forward() self.z.backward(torch.ones_like(self.z)) self.opt.step() # Update the scatter plot self.xy_np = self.xy.numpy(force=True) # Compute mean self.mean_pos = np.mean(self.xy_np, axis=0) print(f"\rFrame {self.frame:5d} particle mean: {self.mean_pos[0]:.8f}, {self.mean_pos[1]:.8f} ", end="") self.frame += 1 def render(self): if self.scatter_plot is None: return self.scatter_plot.set_offsets(np.c_[self.xy_np[:, 0], self.xy_np[:, 1]]) self.mean_marker.set_data(self.mean_pos[0], self.mean_pos[1]) # Function to update the scatter plot def step_and_render(self, frame): for _ in range(self.train_iters): self.step() self.render() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument("--num_frames", type=int, default=10000, help="Total number of frames.") parser.add_argument("--train_iters", type=int, default=10, help="Total number of training iterations per frame.") parser.add_argument( "--headless", action="store_true", help="Run in headless mode, suppressing the opening of any graphical windows.", ) args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(headless=args.headless, train_iters=args.train_iters) if not args.headless: import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation # Create the animation ani = FuncAnimation(example.fig, example.step_and_render, frames=args.num_frames, interval=100) # Display the animation plt.show() else: for _ in range(args.num_frames): example.step()
6,777
Python
29.809091
119
0.57887
NVIDIA/warp/warp/examples/core/example_nvdb.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example NanoVDB # # Shows how to implement a particle simulation with collision against # a NanoVDB signed-distance field. In this example the NanoVDB field # is created offline in Houdini. The particle kernel uses the Warp # wp.volume_sample_f() method to compute the SDF and normal at a point. # ########################################################################### import os import numpy as np import warp as wp import warp.examples import warp.render @wp.func def volume_grad(volume: wp.uint64, p: wp.vec3): eps = 1.0 q = wp.volume_world_to_index(volume, p) # compute gradient of the SDF using finite differences dx = wp.volume_sample_f(volume, q + wp.vec3(eps, 0.0, 0.0), wp.Volume.LINEAR) - wp.volume_sample_f( volume, q - wp.vec3(eps, 0.0, 0.0), wp.Volume.LINEAR ) dy = wp.volume_sample_f(volume, q + wp.vec3(0.0, eps, 0.0), wp.Volume.LINEAR) - wp.volume_sample_f( volume, q - wp.vec3(0.0, eps, 0.0), wp.Volume.LINEAR ) dz = wp.volume_sample_f(volume, q + wp.vec3(0.0, 0.0, eps), wp.Volume.LINEAR) - wp.volume_sample_f( volume, q - wp.vec3(0.0, 0.0, eps), wp.Volume.LINEAR ) return wp.normalize(wp.vec3(dx, dy, dz)) @wp.kernel def simulate( positions: wp.array(dtype=wp.vec3), velocities: wp.array(dtype=wp.vec3), volume: wp.uint64, margin: float, dt: float, ): tid = wp.tid() x = positions[tid] v = velocities[tid] v = v + wp.vec3(0.0, -9.8, 0.0) * dt - v * 0.1 * dt xpred = x + v * dt xpred_local = wp.volume_world_to_index(volume, xpred) # d = wp.volume_sample_f(volume, xpred_local, wp.Volume.LINEAR) n = wp.vec3() d = wp.volume_sample_grad_f(volume, xpred_local, wp.Volume.LINEAR, n) if d < margin: # n = volume_grad(volume, xpred) n = wp.normalize(n) err = d - margin # mesh collision xpred = xpred - n * err # ground collision if xpred[1] < 0.0: xpred = wp.vec3(xpred[0], 0.0, xpred[2]) # pbd update v = (xpred - x) * (1.0 / dt) x = xpred positions[tid] = x velocities[tid] = v class Example: def __init__(self, stage_path="example_nvdb.usd"): rng = np.random.default_rng(42) self.num_particles = 10000 fps = 60 frame_dt = 1.0 / fps self.sim_substeps = 3 self.sim_dt = frame_dt / self.sim_substeps self.sim_time = 0.0 self.sim_timers = {} self.sim_margin = 0.15 init_pos = 10.0 * (rng.random((self.num_particles, 3)) * 2.0 - 1.0) + np.array((0.0, 30.0, 0.0)) init_vel = rng.random((self.num_particles, 3)) self.positions = wp.from_numpy(init_pos.astype(np.float32), dtype=wp.vec3) self.velocities = wp.from_numpy(init_vel.astype(np.float32), dtype=wp.vec3) # load collision volume with open(os.path.join(warp.examples.get_asset_directory(), "rocks.nvdb"), "rb") as file: # create Volume object self.volume = wp.Volume.load_from_nvdb(file) # renderer self.renderer = None if stage_path: self.renderer = wp.render.UsdRenderer(stage_path) self.renderer.render_ground(size=100.0) def step(self): with wp.ScopedTimer("step", dict=self.sim_timers): for _ in range(self.sim_substeps): wp.launch( kernel=simulate, dim=self.num_particles, inputs=[self.positions, self.velocities, self.volume.id, self.sim_margin, self.sim_dt], ) self.sim_time += self.sim_dt def render(self): if self.renderer is None: return with wp.ScopedTimer("render"): self.renderer.begin_frame(self.sim_time) self.renderer.render_ref( name="collision", path=os.path.join(warp.examples.get_asset_directory(), "rocks.usd"), pos=wp.vec3(0.0, 0.0, 0.0), rot=wp.quat(0.0, 0.0, 0.0, 1.0), scale=wp.vec3(1.0, 1.0, 1.0), color=(0.35, 0.55, 0.9), ) self.renderer.render_points( name="points", points=self.positions.numpy(), radius=self.sim_margin, colors=(0.8, 0.3, 0.2) ) self.renderer.end_frame() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument( "--stage_path", type=lambda x: None if x == "None" else str(x), default="example_nvdb.usd", help="Path to the output USD file.", ) parser.add_argument("--num_frames", type=int, default=1000, help="Total number of frames.") args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(stage_path=args.stage_path) for _ in range(args.num_frames): example.step() example.render() if example.renderer: example.renderer.save()
5,682
Python
31.474286
108
0.582189
NVIDIA/warp/warp/examples/core/example_sph.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Smoothed Particle Hydrodynamics # # Shows how to implement a SPH fluid simulation. # # Neighbors are found using the wp.HashGrid class, and # wp.hash_grid_query(), wp.hash_grid_query_next() kernel methods. # # Reference Publication # Matthias Müller, David Charypar, and Markus H. Gross. # "Particle-based fluid simulation for interactive applications." # Symposium on Computer animation. Vol. 2. 2003. # ########################################################################### import numpy as np import warp as wp import warp.render @wp.func def square(x: float): return x * x @wp.func def cube(x: float): return x * x * x @wp.func def fifth(x: float): return x * x * x * x * x @wp.func def density_kernel(xyz: wp.vec3, smoothing_length: float): # calculate distance distance = wp.dot(xyz, xyz) return wp.max(cube(square(smoothing_length) - distance), 0.0) @wp.func def diff_pressure_kernel( xyz: wp.vec3, pressure: float, neighbor_pressure: float, neighbor_rho: float, smoothing_length: float ): # calculate distance distance = wp.sqrt(wp.dot(xyz, xyz)) if distance < smoothing_length: # calculate terms of kernel term_1 = -xyz / distance term_2 = (neighbor_pressure + pressure) / (2.0 * neighbor_rho) term_3 = square(smoothing_length - distance) return term_1 * term_2 * term_3 else: return wp.vec3() @wp.func def diff_viscous_kernel(xyz: wp.vec3, v: wp.vec3, neighbor_v: wp.vec3, neighbor_rho: float, smoothing_length: float): # calculate distance distance = wp.sqrt(wp.dot(xyz, xyz)) # calculate terms of kernel if distance < smoothing_length: term_1 = (neighbor_v - v) / neighbor_rho term_2 = smoothing_length - distance return term_1 * term_2 else: return wp.vec3() @wp.kernel def compute_density( grid: wp.uint64, particle_x: wp.array(dtype=wp.vec3), particle_rho: wp.array(dtype=float), density_normalization: float, smoothing_length: float, ): tid = wp.tid() # order threads by cell i = wp.hash_grid_point_id(grid, tid) # get local particle variables x = particle_x[i] # store density rho = float(0.0) # particle contact neighbors = wp.hash_grid_query(grid, x, smoothing_length) # loop through neighbors to compute density for index in neighbors: # compute distance distance = x - particle_x[index] # compute kernel derivative rho += density_kernel(distance, smoothing_length) # add external potential particle_rho[i] = density_normalization * rho @wp.kernel def get_acceleration( grid: wp.uint64, particle_x: wp.array(dtype=wp.vec3), particle_v: wp.array(dtype=wp.vec3), particle_rho: wp.array(dtype=float), particle_a: wp.array(dtype=wp.vec3), isotropic_exp: float, base_density: float, gravity: float, pressure_normalization: float, viscous_normalization: float, smoothing_length: float, ): tid = wp.tid() # order threads by cell i = wp.hash_grid_point_id(grid, tid) # get local particle variables x = particle_x[i] v = particle_v[i] rho = particle_rho[i] pressure = isotropic_exp * (rho - base_density) # store forces pressure_force = wp.vec3() viscous_force = wp.vec3() # particle contact neighbors = wp.hash_grid_query(grid, x, smoothing_length) # loop through neighbors to compute acceleration for index in neighbors: if index != i: # get neighbor velocity neighbor_v = particle_v[index] # get neighbor density and pressures neighbor_rho = particle_rho[index] neighbor_pressure = isotropic_exp * (neighbor_rho - base_density) # compute relative position relative_position = particle_x[index] - x # calculate pressure force pressure_force += diff_pressure_kernel( relative_position, pressure, neighbor_pressure, neighbor_rho, smoothing_length ) # compute kernel derivative viscous_force += diff_viscous_kernel(relative_position, v, neighbor_v, neighbor_rho, smoothing_length) # sum all forces force = pressure_normalization * pressure_force + viscous_normalization * viscous_force # add external potential particle_a[i] = force / rho + wp.vec3(0.0, gravity, 0.0) @wp.kernel def apply_bounds( particle_x: wp.array(dtype=wp.vec3), particle_v: wp.array(dtype=wp.vec3), damping_coef: float, width: float, height: float, length: float, ): tid = wp.tid() # get pos and velocity x = particle_x[tid] v = particle_v[tid] # clamp x left if x[0] < 0.0: x = wp.vec3(0.0, x[1], x[2]) v = wp.vec3(v[0] * damping_coef, v[1], v[2]) # clamp x right if x[0] > width: x = wp.vec3(width, x[1], x[2]) v = wp.vec3(v[0] * damping_coef, v[1], v[2]) # clamp y bot if x[1] < 0.0: x = wp.vec3(x[0], 0.0, x[2]) v = wp.vec3(v[0], v[1] * damping_coef, v[2]) # clamp z left if x[2] < 0.0: x = wp.vec3(x[0], x[1], 0.0) v = wp.vec3(v[0], v[1], v[2] * damping_coef) # clamp z right if x[2] > length: x = wp.vec3(x[0], x[1], length) v = wp.vec3(v[0], v[1], v[2] * damping_coef) # apply clamps particle_x[tid] = x particle_v[tid] = v @wp.kernel def kick(particle_v: wp.array(dtype=wp.vec3), particle_a: wp.array(dtype=wp.vec3), dt: float): tid = wp.tid() v = particle_v[tid] particle_v[tid] = v + particle_a[tid] * dt @wp.kernel def drift(particle_x: wp.array(dtype=wp.vec3), particle_v: wp.array(dtype=wp.vec3), dt: float): tid = wp.tid() x = particle_x[tid] particle_x[tid] = x + particle_v[tid] * dt @wp.kernel def initialize_particles( particle_x: wp.array(dtype=wp.vec3), smoothing_length: float, width: float, height: float, length: float ): tid = wp.tid() # grid size nr_x = wp.int32(width / 4.0 / smoothing_length) nr_y = wp.int32(height / smoothing_length) nr_z = wp.int32(length / 4.0 / smoothing_length) # calculate particle position z = wp.float(tid % nr_z) y = wp.float((tid // nr_z) % nr_y) x = wp.float((tid // (nr_z * nr_y)) % nr_x) pos = smoothing_length * wp.vec3(x, y, z) # add small jitter state = wp.rand_init(123, tid) pos = pos + 0.001 * smoothing_length * wp.vec3(wp.randn(state), wp.randn(state), wp.randn(state)) # set position particle_x[tid] = pos class Example: def __init__(self, stage_path="example_sph.usd", verbose=False): self.verbose = verbose # render params fps = 60 self.frame_dt = 1.0 / fps self.sim_time = 0.0 # simulation params self.smoothing_length = 0.8 # NOTE change this to adjust number of particles self.width = 80.0 # x self.height = 80.0 # y self.length = 80.0 # z self.isotropic_exp = 20 self.base_density = 1.0 self.particle_mass = 0.01 * self.smoothing_length**3 # reduce according to smoothing length self.dt = 0.01 * self.smoothing_length # decrease sim dt by smoothing length self.dynamic_visc = 0.025 self.damping_coef = -0.95 self.gravity = -0.1 self.n = int( self.height * (self.width / 4.0) * (self.height / 4.0) / (self.smoothing_length**3) ) # number particles (small box in corner) self.sim_step_to_frame_ratio = int(32 / self.smoothing_length) # constants self.density_normalization = (315.0 * self.particle_mass) / ( 64.0 * np.pi * self.smoothing_length**9 ) # integrate density kernel self.pressure_normalization = -(45.0 * self.particle_mass) / (np.pi * self.smoothing_length**6) self.viscous_normalization = (45.0 * self.dynamic_visc * self.particle_mass) / ( np.pi * self.smoothing_length**6 ) # allocate arrays self.x = wp.empty(self.n, dtype=wp.vec3) self.v = wp.zeros(self.n, dtype=wp.vec3) self.rho = wp.zeros(self.n, dtype=float) self.a = wp.zeros(self.n, dtype=wp.vec3) # set random positions wp.launch( kernel=initialize_particles, dim=self.n, inputs=[self.x, self.smoothing_length, self.width, self.height, self.length], ) # initialize in small area # create hash array grid_size = int(self.height / (4.0 * self.smoothing_length)) self.grid = wp.HashGrid(grid_size, grid_size, grid_size) # renderer self.renderer = None if stage_path: self.renderer = wp.render.UsdRenderer(stage_path) def step(self): with wp.ScopedTimer("step"): for _ in range(self.sim_step_to_frame_ratio): with wp.ScopedTimer("grid build", active=self.verbose): # build grid self.grid.build(self.x, self.smoothing_length) with wp.ScopedTimer("forces", active=self.verbose): # compute density of points wp.launch( kernel=compute_density, dim=self.n, inputs=[self.grid.id, self.x, self.rho, self.density_normalization, self.smoothing_length], ) # get new acceleration wp.launch( kernel=get_acceleration, dim=self.n, inputs=[ self.grid.id, self.x, self.v, self.rho, self.a, self.isotropic_exp, self.base_density, self.gravity, self.pressure_normalization, self.viscous_normalization, self.smoothing_length, ], ) # apply bounds wp.launch( kernel=apply_bounds, dim=self.n, inputs=[self.x, self.v, self.damping_coef, self.width, self.height, self.length], ) # kick wp.launch(kernel=kick, dim=self.n, inputs=[self.v, self.a, self.dt]) # drift wp.launch(kernel=drift, dim=self.n, inputs=[self.x, self.v, self.dt]) self.sim_time += self.frame_dt def render(self): if self.renderer is None: return with wp.ScopedTimer("render"): self.renderer.begin_frame(self.sim_time) self.renderer.render_points( points=self.x.numpy(), radius=self.smoothing_length, name="points", colors=(0.8, 0.3, 0.2) ) self.renderer.end_frame() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument( "--stage_path", type=lambda x: None if x == "None" else str(x), default="example_sph.usd", help="Path to the output USD file.", ) parser.add_argument("--num_frames", type=int, default=480, help="Total number of frames.") parser.add_argument("--verbose", action="store_true", help="Print out additional status messages during execution.") args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(stage_path=args.stage_path, verbose=args.verbose) for _ in range(args.num_frames): example.render() example.step() if example.renderer: example.renderer.save()
12,622
Python
30.245049
120
0.573919
NVIDIA/warp/warp/examples/core/example_wave.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Example Wave # # Shows how to implement a simple 2D wave-equation solver with collision # against a moving sphere. # ########################################################################### import math import warp as wp import warp.render @wp.func def sample(f: wp.array(dtype=float), x: int, y: int, width: int, height: int): # clamp texture coords x = wp.clamp(x, 0, width - 1) y = wp.clamp(y, 0, height - 1) s = f[y * width + x] return s @wp.func def laplacian(f: wp.array(dtype=float), x: int, y: int, width: int, height: int): ddx = sample(f, x + 1, y, width, height) - 2.0 * sample(f, x, y, width, height) + sample(f, x - 1, y, width, height) ddy = sample(f, x, y + 1, width, height) - 2.0 * sample(f, x, y, width, height) + sample(f, x, y - 1, width, height) return ddx + ddy @wp.kernel def wave_displace( hcurrent: wp.array(dtype=float), hprevious: wp.array(dtype=float), width: int, height: int, center_x: float, center_y: float, r: float, mag: float, t: float, ): tid = wp.tid() x = tid % width y = tid // width dx = float(x) - center_x dy = float(y) - center_y dist_sq = float(dx * dx + dy * dy) if dist_sq < r * r: h = mag * wp.sin(t) hcurrent[tid] = h hprevious[tid] = h @wp.kernel def wave_solve( hprevious: wp.array(dtype=float), hcurrent: wp.array(dtype=float), width: int, height: int, inv_cell: float, k_speed: float, k_damp: float, dt: float, ): tid = wp.tid() x = tid % width y = tid // width l = laplacian(hcurrent, x, y, width, height) * inv_cell * inv_cell # integrate h1 = hcurrent[tid] h0 = hprevious[tid] h = 2.0 * h1 - h0 + dt * dt * (k_speed * l - k_damp * (h1 - h0)) # buffers get swapped each iteration hprevious[tid] = h # simple kernel to apply height deltas to a vertex array @wp.kernel def grid_update(heights: wp.array(dtype=float), vertices: wp.array(dtype=wp.vec3)): tid = wp.tid() h = heights[tid] v = vertices[tid] v_new = wp.vec3(v[0], h, v[2]) vertices[tid] = v_new class Example: def __init__(self, stage_path="example_wave.usd", verbose=False): self.sim_width = 128 self.sim_height = 128 fps = 60 self.sim_substeps = 16 self.sim_dt = (1.0 / fps) / self.sim_substeps self.sim_time = 0.0 # wave constants self.k_speed = 1.0 self.k_damp = 0.0 # grid constants self.grid_size = 0.1 self.grid_displace = 0.5 self.verbose = verbose vertices = [] self.indices = [] def grid_index(x, y, stride): return y * stride + x for z in range(self.sim_height): for x in range(self.sim_width): pos = ( float(x) * self.grid_size, 0.0, float(z) * self.grid_size, ) # directly modifies verts_host memory since this is a numpy alias of the same buffer vertices.append(pos) if x > 0 and z > 0: self.indices.append(grid_index(x - 1, z - 1, self.sim_width)) self.indices.append(grid_index(x, z, self.sim_width)) self.indices.append(grid_index(x, z - 1, self.sim_width)) self.indices.append(grid_index(x - 1, z - 1, self.sim_width)) self.indices.append(grid_index(x - 1, z, self.sim_width)) self.indices.append(grid_index(x, z, self.sim_width)) # simulation grids self.sim_grid0 = wp.zeros(self.sim_width * self.sim_height, dtype=float) self.sim_grid1 = wp.zeros(self.sim_width * self.sim_height, dtype=float) self.sim_verts = wp.array(vertices, dtype=wp.vec3) # create surface displacement around a point self.cx = self.sim_width / 2 + math.sin(self.sim_time) * self.sim_width / 3 self.cy = self.sim_height / 2 + math.cos(self.sim_time) * self.sim_height / 3 if stage_path: self.renderer = wp.render.UsdRenderer(stage_path) else: self.renderer = None def step(self): with wp.ScopedTimer("step"): for _s in range(self.sim_substeps): # create surface displacement around a point self.cx = self.sim_width / 2 + math.sin(self.sim_time) * self.sim_width / 3 self.cy = self.sim_height / 2 + math.cos(self.sim_time) * self.sim_height / 3 wp.launch( kernel=wave_displace, dim=self.sim_width * self.sim_height, inputs=[ self.sim_grid0, self.sim_grid1, self.sim_width, self.sim_height, self.cx, self.cy, 10.0, self.grid_displace, -math.pi * 0.5, ], ) # integrate wave equation wp.launch( kernel=wave_solve, dim=self.sim_width * self.sim_height, inputs=[ self.sim_grid0, self.sim_grid1, self.sim_width, self.sim_height, 1.0 / self.grid_size, self.k_speed, self.k_damp, self.sim_dt, ], ) # swap grids (self.sim_grid0, self.sim_grid1) = (self.sim_grid1, self.sim_grid0) self.sim_time += self.sim_dt with wp.ScopedTimer("mesh", self.verbose): # update grid vertices from heights wp.launch(kernel=grid_update, dim=self.sim_width * self.sim_height, inputs=[self.sim_grid0, self.sim_verts]) def render(self): if self.renderer is None: return with wp.ScopedTimer("render"): vertices = self.sim_verts.numpy() self.renderer.begin_frame(self.sim_time) self.renderer.render_mesh("surface", vertices, self.indices, colors=((0.35, 0.55, 0.9),) * len(vertices)) self.renderer.render_sphere( "sphere", (self.cx * self.grid_size, 0.0, self.cy * self.grid_size), (0.0, 0.0, 0.0, 1.0), 10.0 * self.grid_size, color=(1.0, 1.0, 1.0), ) self.renderer.end_frame() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--device", type=str, default=None, help="Override the default Warp device.") parser.add_argument( "--stage_path", type=lambda x: None if x == "None" else str(x), default="example_wave.usd", help="Path to the output USD file.", ) parser.add_argument("--num_frames", type=int, default=300, help="Total number of frames.") parser.add_argument("--verbose", action="store_true", help="Print out additional status messages during execution.") args = parser.parse_known_args()[0] with wp.ScopedDevice(args.device): example = Example(stage_path=args.stage_path, verbose=args.verbose) for _ in range(args.num_frames): example.step() example.render() if example.renderer: example.renderer.save()
8,194
Python
30.278626
120
0.527581
NVIDIA/warp/warp/examples/assets/nv_ant.xml
<mujoco model="ant"> <custom> <numeric data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0" name="init_qpos"/> </custom> <default> <joint armature="0.01" damping="0.1" limited="true"/> <geom condim="3" density="5.0" friction="1.5 0.1 0.1" margin="0.01" rgba="0.97 0.38 0.06 1"/> </default> <compiler inertiafromgeom="true" angle="degree"/> <option timestep="0.016" iterations="50" tolerance="1e-10" solver="Newton" jacobian="dense" cone="pyramidal"/> <size nconmax="50" njmax="200" nstack="10000"/> <visual> <map force="0.1" zfar="30"/> <rgba haze="0.15 0.25 0.35 1"/> <quality shadowsize="2048"/> <global offwidth="800" offheight="800"/> </visual> <asset> <texture type="skybox" builtin="gradient" rgb1="0.3 0.5 0.7" rgb2="0 0 0" width="512" height="512"/> <texture name="texplane" type="2d" builtin="checker" rgb1=".2 .3 .4" rgb2=".1 0.15 0.2" width="512" height="512" mark="cross" markrgb=".8 .8 .8"/> <texture name="texgeom" type="cube" builtin="flat" mark="cross" width="127" height="1278" rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" random="0.01"/> <material name="matplane" reflectance="0.3" texture="texplane" texrepeat="1 1" texuniform="true"/> <material name="matgeom" texture="texgeom" texuniform="true" rgba="0.8 0.6 .4 1"/> </asset> <worldbody> <geom name="floor" pos="0 0 0" size="0 0 .25" type="plane" material="matplane" condim="3"/> <light directional="false" diffuse=".2 .2 .2" specular="0 0 0" pos="0 0 5" dir="0 0 -1" castshadow="false"/> <light mode="targetbodycom" target="torso" directional="false" diffuse=".8 .8 .8" specular="0.3 0.3 0.3" pos="0 0 4.0" dir="0 0 -1"/> <body name="torso" pos="0 0 0.75"> <geom name="torso_geom" pos="0 0 0" size="0.25" type="sphere"/> <geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/> <geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="aux_2_geom" size="0.08" type="capsule"/> <geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="aux_3_geom" size="0.08" type="capsule"/> <geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="aux_4_geom" size="0.08" type="capsule" rgba=".999 .2 .02 1"/> <joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/> <body name="front_left_leg" pos="0.2 0.2 0"> <joint axis="0 0 1" name="hip_1" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/> <geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="left_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/> <body pos="0.2 0.2 0" name="front_left_foot"> <joint axis="-1 1 0" name="ankle_1" pos="0.0 0.0 0.0" range="30 100" type="hinge"/> <geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/> </body> </body> <body name="front_right_leg" pos="-0.2 0.2 0"> <joint axis="0 0 1" name="hip_2" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/> <geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="right_leg_geom" size="0.08" type="capsule"/> <body pos="-0.2 0.2 0" name="front_right_foot"> <joint axis="1 1 0" name="ankle_2" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/> <geom fromto="0.0 0.0 0.0 -0.4 0.4 0.0" name="right_ankle_geom" size="0.08" type="capsule"/> </body> </body> <body name="left_back_leg" pos="-0.2 -0.2 0"> <joint axis="0 0 1" name="hip_3" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/> <geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="back_leg_geom" size="0.08" type="capsule"/> <body pos="-0.2 -0.2 0" name="left_back_foot"> <joint axis="-1 1 0" name="ankle_3" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/> <geom fromto="0.0 0.0 0.0 -0.4 -0.4 0.0" name="third_ankle_geom" size="0.08" type="capsule"/> </body> </body> <body name="right_back_leg" pos="0.2 -0.2 0"> <joint axis="0 0 1" name="hip_4" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/> <geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="rightback_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/> <body pos="0.2 -0.2 0" name="right_back_foot"> <joint axis="1 1 0" name="ankle_4" pos="0.0 0.0 0.0" range="30 100" type="hinge"/> <geom fromto="0.0 0.0 0.0 0.4 -0.4 0.0" name="fourth_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/> </body> </body> </body> </worldbody> <actuator> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="15"/> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="15"/> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="15"/> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="15"/> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="15"/> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="15"/> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="15"/> <motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="15"/> </actuator> </mujoco>
5,235
XML
55.301075
152
0.570392
NVIDIA/warp/warp/examples/assets/nv_humanoid.xml
<mujoco model="humanoid"> <statistic extent="2" center="0 0 1"/> <option timestep="0.00555"/> <default> <motor ctrlrange="-1 1" ctrllimited="true"/> <default class="body"> <geom type="capsule" condim="1" friction="1.0 0.05 0.05" solimp=".9 .99 .003" solref=".015 1" material="self"/> <joint limited="true" type="hinge" damping="0.1" stiffness="5" armature=".007" solimplimit="0 .99 .01"/> <site size=".04" group="3"/> <default class="force-torque"> <site type="box" size=".01 .01 .02" rgba="1 0 0 1" /> </default> <default class="touch"> <site type="capsule" rgba="0 0 1 .3"/> </default> </default> </default> <worldbody> <geom name="floor" type="plane" conaffinity="1" size="100 100 .2" material="grid"/> <body name="torso" pos="0 0 0" childclass="body"> <light name="top" pos="0 0 2" mode="trackcom"/> <camera name="back" pos="-3 0 1" xyaxes="0 -1 0 1 0 2" mode="trackcom"/> <camera name="side" pos="0 -3 1" xyaxes="1 0 0 0 1 2" mode="trackcom"/> <joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/> <site name="root" class="force-torque"/> <geom name="torso" type="capsule" fromto="0 -.07 0 0 .07 0" size=".07"/> <geom name="upper_waist" type="capsule" fromto="-.01 -.06 -.12 -.01 .06 -.12" size=".06"/> <site name="torso" class="touch" type="box" pos="0 0 -.05" size=".075 .14 .13"/> <geom name="head" type="sphere" size=".09" pos="0 0 .19"/> <body name="lower_waist" pos="-.01 0 -.260" quat="1.000 0 -.002 0"> <geom name="lower_waist" type="capsule" fromto="0 -.06 0 0 .06 0" size=".06"/> <site name="lower_waist" class="touch" size=".061 .06" zaxis="0 1 0"/> <joint limited="true" name="abdomen_z" pos="0 0 .065" axis="0 0 1" range="-45 45" damping="5" stiffness="20" armature=".02"/> <joint limited="true" name="abdomen_y" pos="0 0 .065" axis="0 1 0" range="-75 30" damping="5" stiffness="20" armature=".01"/> <body name="pelvis" pos="0 0 -.165" quat="1.000 0 -.002 0"> <joint limited="true" name="abdomen_x" pos="0 0 .1" axis="1 0 0" range="-35 35" damping="5" stiffness="10" armature=".01"/> <geom name="butt" type="capsule" fromto="-.02 -.07 0 -.02 .07 0" size=".09"/> <site name="butt" class="touch" size=".091 .07" pos="-.02 0 0" zaxis="0 1 0"/> <body name="right_thigh" pos="0 -.1 -.04"> <site name="right_hip" class="force-torque"/> <joint limited="true" name="right_hip_x" axis="1 0 0" range="-25 5" damping="5" stiffness="10" armature=".01"/> <joint limited="true" name="right_hip_z" axis="0 0 1" range="-60 35" damping="5" stiffness="10" armature=".01"/> <joint limited="true" name="right_hip_y" axis="0 1 0" range="-80 20" damping="5" stiffness="20" armature=".01"/> <geom name="right_thigh" type="capsule" fromto="0 0 0 0 .01 -.34" size=".06"/> <site name="right_thigh" class="touch" pos="0 .005 -.17" size=".061 .17" zaxis="0 -1 34"/> <body name="right_shin" pos="0 .01 -.403"> <site name="right_knee" class="force-torque" pos="0 0 .02"/> <joint limited="true" name="right_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/> <geom name="right_shin" type="capsule" fromto="0 0 0 0 0 -.3" size=".049"/> <site name="right_shin" class="touch" pos="0 0 -.15" size=".05 .15"/> <body name="right_foot" pos="0 0 -.39"> <site name="right_ankle" class="force-torque"/> <joint limited="true" name="right_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" damping="1.0" stiffness="2" armature=".006"/> <joint limited="true" name="right_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" damping="1.0" stiffness="2" armature=".006"/> <geom name="right_right_foot" type="capsule" fromto="-.07 -.02 0 .14 -.04 0" size=".027"/> <geom name="left_right_foot" type="capsule" fromto="-.07 0 0 .14 .02 0" size=".027"/> <site name="right_right_foot" class="touch" pos=".035 -.03 0" size=".03 .11" zaxis="21 -2 0"/> <site name="left_right_foot" class="touch" pos=".035 .01 0" size=".03 .11" zaxis="21 2 0"/> </body> </body> </body> <body name="left_thigh" pos="0 .1 -.04"> <site name="left_hip" class="force-torque"/> <joint limited="true" name="left_hip_x" axis="-1 0 0" range="-25 5" damping="5" stiffness="10" armature=".01"/> <joint limited="true" name="left_hip_z" axis="0 0 -1" range="-60 35" damping="5" stiffness="10" armature=".01"/> <joint limited="true" name="left_hip_y" axis="0 1 0" range="-80 20" damping="5" stiffness="20" armature=".01"/> <geom name="left_thigh" type="capsule" fromto="0 0 0 0 -.01 -.34" size=".06"/> <site name="left_thigh" class="touch" pos="0 -.005 -.17" size=".061 .17" zaxis="0 1 34"/> <body name="left_shin" pos="0 -.01 -.403"> <site name="left_knee" class="force-torque" pos="0 0 .02"/> <joint limited="true" name="left_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/> <geom name="left_shin" type="capsule" fromto="0 0 0 0 0 -.3" size=".049"/> <site name="left_shin" class="touch" pos="0 0 -.15" size=".05 .15"/> <body name="left_foot" pos="0 0 -.39"> <site name="left_ankle" class="force-torque"/> <joint limited="true" name="left_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" damping="1.0" stiffness="2" armature=".006"/> <joint limited="true" name="left_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" damping="1.0" stiffness="2" armature=".006"/> <geom name="left_left_foot" type="capsule" fromto="-.07 .02 0 .14 .04 0" size=".027"/> <geom name="right_left_foot" type="capsule" fromto="-.07 0 0 .14 -.02 0" size=".027"/> <site name="right_left_foot" class="touch" pos=".035 -.01 0" size=".03 .11" zaxis="21 -2 0"/> <site name="left_left_foot" class="touch" pos=".035 .03 0" size=".03 .11" zaxis="21 2 0"/> </body> </body> </body> </body> </body> <body name="right_upper_arm" pos="0 -.17 .06"> <joint limited="true" name="right_shoulder1" axis="2 1 1" range="-60 60" damping="5" stiffness="10" armature=".01"/> <joint limited="true" name="right_shoulder2" axis="0 -1 1" range="-60 60" damping="5" stiffness="10" armature=".01"/> <geom name="right_upper_arm" type="capsule" fromto="0 0 0 .16 -.16 -.16" size=".04 .16"/> <site name="right_upper_arm" class="touch" pos=".08 -.08 -.08" size=".041 .14" zaxis="1 -1 -1"/> <body name="right_lower_arm" pos=".18 -.18 -.18"> <joint limited="true" name="right_elbow" axis="0 -1 1" range="-90 50" damping="1.0" stiffness="2" armature=".006"/> <geom name="right_lower_arm" type="capsule" fromto=".01 .01 .01 .17 .17 .17" size=".031"/> <site name="right_lower_arm" class="touch" pos=".09 .09 .09" size=".032 .14" zaxis="1 1 1"/> <geom name="right_hand" type="sphere" size=".04" pos=".18 .18 .18"/> </body> </body> <body name="left_upper_arm" pos="0 .17 .06"> <joint limited="true" name="left_shoulder1" axis="-2 1 -1" range="-60 60" damping="5" stiffness="10" armature=".01"/> <joint limited="true" name="left_shoulder2" axis="0 -1 -1" range="-60 60" damping="5" stiffness="10" armature=".01"/> <geom name="left_upper_arm" type="capsule" fromto="0 0 0 .16 .16 -.16" size=".04 .16"/> <site name="left_upper_arm" class="touch" pos=".08 .08 -.08" size=".041 .14" zaxis="1 1 -1"/> <body name="left_lower_arm" pos=".18 .18 -.18"> <joint limited="true" name="left_elbow" axis="0 -1 -1" range="-90 50" damping="1.0" stiffness="2" armature=".006"/> <geom name="left_lower_arm" type="capsule" fromto=".01 -.01 .01 .17 -.17 .17" size=".031"/> <site name="left_lower_arm" class="touch" pos=".09 -.09 .09" size=".032 .14" zaxis="1 -1 1"/> <geom name="left_hand" type="sphere" size=".04" pos=".18 -.18 .18"/> </body> </body> </body> </worldbody> <actuator> <motor name='abdomen_y' gear='67.5' joint='abdomen_y'/> <motor name='abdomen_z' gear='67.5' joint='abdomen_z'/> <motor name='abdomen_x' gear='67.5' joint='abdomen_x'/> <motor name='right_hip_x' gear='45.0' joint='right_hip_x'/> <motor name='right_hip_z' gear='45.0' joint='right_hip_z'/> <motor name='right_hip_y' gear='135.0' joint='right_hip_y'/> <motor name='right_knee' gear='90.0' joint='right_knee'/> <motor name='right_ankle_x' gear='22.5' joint='right_ankle_x'/> <motor name='right_ankle_y' gear='22.5' joint='right_ankle_y'/> <motor name='left_hip_x' gear='45.0' joint='left_hip_x'/> <motor name='left_hip_z' gear='45.0' joint='left_hip_z'/> <motor name='left_hip_y' gear='135.0' joint='left_hip_y'/> <motor name='left_knee' gear='90.0' joint='left_knee'/> <motor name='left_ankle_x' gear='22.5' joint='left_ankle_x'/> <motor name='left_ankle_y' gear='22.5' joint='left_ankle_y'/> <motor name='right_shoulder1' gear='67.5' joint='right_shoulder1'/> <motor name='right_shoulder2' gear='67.5' joint='right_shoulder2'/> <motor name='right_elbow' gear='45.0' joint='right_elbow'/> <motor name='left_shoulder1' gear='67.5' joint='left_shoulder1'/> <motor name='left_shoulder2' gear='67.5' joint='left_shoulder2'/> <motor name='left_elbow' gear='45.0' joint='left_elbow'/> </actuator> <sensor> <subtreelinvel name="torso_subtreelinvel" body="torso"/> <accelerometer name="torso_accel" site="root"/> <velocimeter name="torso_vel" site="root"/> <gyro name="torso_gyro" site="root"/> <force name="left_ankle_force" site="left_ankle"/> <force name="right_ankle_force" site="right_ankle"/> <force name="left_knee_force" site="left_knee"/> <force name="right_knee_force" site="right_knee"/> <force name="left_hip_force" site="left_hip"/> <force name="right_hip_force" site="right_hip"/> <torque name="left_ankle_torque" site="left_ankle"/> <torque name="right_ankle_torque" site="right_ankle"/> <torque name="left_knee_torque" site="left_knee"/> <torque name="right_knee_torque" site="right_knee"/> <torque name="left_hip_torque" site="left_hip"/> <torque name="right_hip_torque" site="right_hip"/> <touch name="torso_touch" site="torso"/> <touch name="head_touch" site="head"/> <touch name="lower_waist_touch" site="lower_waist"/> <touch name="butt_touch" site="butt"/> <touch name="right_thigh_touch" site="right_thigh"/> <touch name="right_shin_touch" site="right_shin"/> <touch name="right_right_foot_touch" site="right_right_foot"/> <touch name="left_right_foot_touch" site="left_right_foot"/> <touch name="left_thigh_touch" site="left_thigh"/> <touch name="left_shin_touch" site="left_shin"/> <touch name="right_left_foot_touch" site="right_left_foot"/> <touch name="left_left_foot_touch" site="left_left_foot"/> <touch name="right_upper_arm_touch" site="right_upper_arm"/> <touch name="right_lower_arm_touch" site="right_lower_arm"/> <touch name="right_hand_touch" site="right_hand"/> <touch name="left_upper_arm_touch" site="left_upper_arm"/> <touch name="left_lower_arm_touch" site="left_lower_arm"/> <touch name="left_hand_touch" site="left_hand"/> </sensor> </mujoco>
12,018
XML
64.320652
147
0.562406
NVIDIA/warp/warp/examples/benchmarks/benchmark_api.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import gc import statistics as stats import warp as wp ENABLE_MEMPOOLS = False ENABLE_PEER_ACCESS = False ENABLE_MEMPOOL_ACCESS = False ENABLE_MEMPOOL_RELEASE_THRESHOLD = False MEMPOOL_RELEASE_THRESHOLD = 1024 * 1024 * 1024 DO_SYNC = False VERBOSE = False USE_NVTX = False num_elems = 10000 num_runs = 10000 trim_runs = 2500 @wp.kernel def inc_kernel(a: wp.array(dtype=float)): tid = wp.tid() a[tid] = a[tid] + 1.0 # configure devices for target_device in wp.get_cuda_devices(): try: wp.set_mempool_enabled(target_device, ENABLE_MEMPOOLS) if ENABLE_MEMPOOL_RELEASE_THRESHOLD: wp.set_mempool_release_threshold(target_device, MEMPOOL_RELEASE_THRESHOLD) except Exception as e: print(f"Error: {e}") for peer_device in wp.get_cuda_devices(): try: wp.set_peer_access_enabled(target_device, peer_device, ENABLE_PEER_ACCESS) except Exception as e: print(f"Error: {e}") try: wp.set_mempool_access_enabled(target_device, peer_device, ENABLE_MEMPOOL_ACCESS) except Exception as e: print(f"Error: {e}") cuda_device_count = wp.get_cuda_device_count() cuda0 = wp.get_device("cuda:0") # preallocate some arrays arr_host = wp.zeros(num_elems, dtype=float, device="cpu", pinned=False) arr_host_pinned = wp.zeros(num_elems, dtype=float, device="cpu", pinned=True) arr_cuda0 = wp.zeros(num_elems, dtype=float, device=cuda0) arr_cuda0_src = wp.zeros(num_elems, dtype=float, device=cuda0) arr_cuda0_dst = wp.zeros(num_elems, dtype=float, device=cuda0) # mgpu support if cuda_device_count > 1: cuda1 = wp.get_device("cuda:1") arr_cuda1 = wp.zeros(num_elems, dtype=float, device=cuda1) stream0 = wp.Stream(cuda0) # preload module wp.force_load(cuda0) if cuda_device_count > 1: wp.force_load(cuda1) # capture graph with wp.ScopedDevice(cuda0): wp.capture_begin() wp.launch(inc_kernel, dim=arr_cuda0.size, inputs=[arr_cuda0]) graph0 = wp.capture_end() g_allocs = [None] * num_runs def test_alloc(num_elems, device, idx): wp.synchronize() with wp.ScopedTimer("alloc", print=VERBOSE, use_nvtx=USE_NVTX) as timer: g_allocs[idx] = wp.empty(num_elems, dtype=float, device=device) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_free(device, idx): wp.synchronize() with wp.ScopedTimer("free", print=VERBOSE, use_nvtx=USE_NVTX) as timer: g_allocs[idx] = None if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_zeros(num_elems, device, idx): wp.synchronize() with wp.ScopedTimer("zeros", print=VERBOSE, use_nvtx=USE_NVTX) as timer: g_allocs[idx] = wp.zeros(num_elems, dtype=float, device=device) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_h2d(num_elems, device): wp.synchronize() with wp.ScopedTimer("h2d", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.copy(arr_cuda0, arr_host) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_d2h(num_elems, device): wp.synchronize() with wp.ScopedTimer("d2h", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.copy(arr_host, arr_cuda0) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_h2d_pinned(num_elems, device): wp.synchronize() with wp.ScopedTimer("h2d pinned", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.copy(arr_cuda0, arr_host_pinned) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_d2h_pinned(num_elems, device): wp.synchronize() with wp.ScopedTimer("d2h pinned", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.copy(arr_host_pinned, arr_cuda0) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_d2d(num_elems, device): wp.synchronize() with wp.ScopedTimer("d2d", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.copy(arr_cuda0_dst, arr_cuda0_src) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_p2p(num_elems, src_device, dst_device): wp.synchronize() with wp.ScopedTimer("p2p", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.copy(arr_cuda0, arr_cuda1) if DO_SYNC: wp.synchronize_device(src_device) wp.synchronize_device(dst_device) return timer.elapsed def test_p2p_stream(num_elems, src_device, dst_device): stream = stream0 wp.synchronize() with wp.ScopedTimer("p2p stream", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.copy(arr_cuda0, arr_cuda1, stream=stream) if DO_SYNC: wp.synchronize_device(src_device) wp.synchronize_device(dst_device) return timer.elapsed def test_launch(num_elems, device): a = arr_cuda0 wp.synchronize() with wp.ScopedTimer("launch", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.launch(inc_kernel, dim=a.size, inputs=[a], device=device) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_launch_stream(num_elems, device): a = arr_cuda0 stream = stream0 wp.synchronize() with wp.ScopedTimer("launch stream", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.launch(inc_kernel, dim=a.size, inputs=[a], stream=stream) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_graph(num_elems, device): wp.synchronize() with wp.ScopedTimer("graph", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.capture_launch(graph0) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed def test_graph_stream(num_elems, device): wp.synchronize() with wp.ScopedTimer("graph", print=VERBOSE, use_nvtx=USE_NVTX) as timer: wp.capture_launch(graph0, stream=stream0) if DO_SYNC: wp.synchronize_device(device) return timer.elapsed alloc_times = [0] * num_runs free_times = [0] * num_runs zeros_times = [0] * num_runs d2h_times = [0] * num_runs h2d_times = [0] * num_runs d2h_pinned_times = [0] * num_runs h2d_pinned_times = [0] * num_runs d2d_times = [0] * num_runs p2p_times = [0] * num_runs p2p_stream_times = [0] * num_runs launch_times = [0] * num_runs launch_stream_times = [0] * num_runs graph_times = [0] * num_runs graph_stream_times = [0] * num_runs wp.set_device(cuda0) # alloc for i in range(num_runs): gc.disable() alloc_times[i] = test_alloc(num_elems, cuda0, i) gc.enable() # free for i in range(num_runs): gc.disable() free_times[i] = test_free(cuda0, i) gc.enable() # zeros for i in range(num_runs): gc.disable() zeros_times[i] = test_zeros(num_elems, cuda0, i) gc.enable() # free zeros for i in range(num_runs): g_allocs[i] = None # h2d, d2h pageable copy for i in range(num_runs): gc.disable() h2d_times[i] = test_h2d(num_elems, cuda0) d2h_times[i] = test_d2h(num_elems, cuda0) gc.enable() # h2d, d2h pinned copy for i in range(num_runs): gc.disable() h2d_pinned_times[i] = test_h2d_pinned(num_elems, cuda0) d2h_pinned_times[i] = test_d2h_pinned(num_elems, cuda0) gc.enable() # d2d copy for i in range(num_runs): gc.disable() d2d_times[i] = test_d2d(num_elems, cuda0) gc.enable() # p2p copy if cuda_device_count > 1: for i in range(num_runs): gc.disable() p2p_times[i] = test_p2p(num_elems, cuda1, cuda0) p2p_stream_times[i] = test_p2p_stream(num_elems, cuda1, cuda0) gc.enable() # launch for i in range(num_runs): gc.disable() launch_times[i] = test_launch(num_elems, cuda0) launch_stream_times[i] = test_launch_stream(num_elems, cuda0) gc.enable() # graph for i in range(num_runs): gc.disable() graph_times[i] = test_graph(num_elems, cuda0) graph_stream_times[i] = test_graph_stream(num_elems, cuda0) gc.enable() def print_stat(name, data, trim=trim_runs): assert len(data) - 2 * trim > 0 if trim > 0: data = sorted(data)[trim:-trim] print(f"{name:15s} {1000000 * stats.mean(data):.0f}") print("=========================") print_stat("Alloc", alloc_times) print_stat("Free", free_times) print_stat("Zeros", zeros_times) print_stat("H2D", h2d_times) print_stat("D2H", d2h_times) print_stat("H2D pinned", h2d_pinned_times) print_stat("D2H pinned", d2h_pinned_times) print_stat("D2D", d2d_times) print_stat("P2P", p2p_times) print_stat("P2P stream", p2p_stream_times) print_stat("Launch", launch_times) print_stat("Launch stream", launch_stream_times) print_stat("Graph", graph_times) print_stat("Graph stream", graph_stream_times) # ========= profiling ========== # from pyinstrument import Profiler # profiler = Profiler() # profiler.start() # for i in range(10): # # test_alloc(num_elems, cuda0) # # test_h2d(num_elems, cuda0) # test_p2p(num_elems, cuda0, cuda1) # profiler.stop() # print(profiler.output_text(show_all=True))
9,577
Python
24.073298
92
0.653963
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth_cupy.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import cupy as cp import cupyx as cpx def eval_springs(x, v, indices, rest, ke, kd, f): i = indices[:, 0] j = indices[:, 1] xi = x[i] xj = x[j] vi = v[i] vj = v[j] xij = xi - xj vij = vi - vj l = cp.linalg.norm(xij, axis=1) l_inv = 1.0 / l # normalized spring direction dir = (xij.T * l_inv).T c = l - rest dcdt = cp.sum(dir * vij, axis=1) # damping based on relative velocity. fs = dir.T * (ke * c + kd * dcdt) cpx.scatter_add(f, i, -fs.T) cpx.scatter_add(f, j, fs.T) def integrate_particles(x, v, f, w, dt): g = cp.array((0.0, 0.0 - 9.8, 0.0)) s = w > 0.0 a_ext = g * s[:, None] # simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt v += ((f.T * w).T + a_ext) * dt x += v * dt # clear forces f *= 0.0 class CpIntegrator: def __init__(self, cloth): self.cloth = cloth self.positions = cp.array(self.cloth.positions) self.velocities = cp.array(self.cloth.velocities) self.inv_mass = cp.array(self.cloth.inv_masses) self.spring_indices = cp.array(self.cloth.spring_indices) self.spring_lengths = cp.array(self.cloth.spring_lengths) self.spring_stiffness = cp.array(self.cloth.spring_stiffness) self.spring_damping = cp.array(self.cloth.spring_damping) self.forces = cp.zeros((self.cloth.num_particles, 3), dtype=cp.float32) def simulate(self, dt, substeps): sim_dt = dt / substeps for _s in range(substeps): eval_springs( self.positions, self.velocities, self.spring_indices.reshape((self.cloth.num_springs, 2)), self.spring_lengths, self.spring_stiffness, self.spring_damping, self.forces, ) # integrate integrate_particles(self.positions, self.velocities, self.forces, self.inv_mass, sim_dt) # return np.array(self.positions) return self.positions.get()
2,487
Python
26.955056
100
0.597507
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth_jax.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import jax.lax import jax.numpy as jnp import numpy as np @jax.jit def eval_springs(x, v, indices, rest, ke, kd): i = indices[:, 0] j = indices[:, 1] xi = x[i] xj = x[j] vi = v[i] vj = v[j] xij = xi - xj vij = vi - vj l = jnp.linalg.norm(xij, axis=1) l_inv = 1.0 / l # normalized spring direction dir = (xij.T * l_inv).T c = l - rest dcdt = jnp.sum(dir * vij, axis=1) # damping based on relative velocity. fs = dir.T * (ke * c + kd * dcdt) f = jnp.zeros_like(v) # f = jax.ops.index_add(f, i, -fs.T, indices_are_sorted=False, unique_indices=False) # f = jax.ops.index_add(f, j, fs.T, indices_are_sorted=False, unique_indices=False) f.at[i].add(-fs.T) f.at[j].add(fs.T) return f @jax.jit def integrate_particles(x, v, f, w, dt): g = jnp.array((0.0, 0.0 - 9.8, 0.0)) s = w > 0.0 a_ext = g * s[:, None] # simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt v += ((f.T * w).T + a_ext) * dt x += v * dt return (x, v) class JxIntegrator: def __init__(self, cloth): self.cloth = cloth self.positions = jnp.array(self.cloth.positions) self.velocities = jnp.array(self.cloth.velocities) self.inv_mass = jnp.array(self.cloth.inv_masses) print(self.positions.device_buffer.device()) self.spring_indices = jnp.array(self.cloth.spring_indices) self.spring_lengths = jnp.array(self.cloth.spring_lengths) self.spring_stiffness = jnp.array(self.cloth.spring_stiffness) self.spring_damping = jnp.array(self.cloth.spring_damping) def simulate(self, dt, substeps): sim_dt = dt / substeps for _s in range(substeps): f = eval_springs( self.positions, self.velocities, self.spring_indices.reshape((self.cloth.num_springs, 2)), self.spring_lengths, self.spring_stiffness, self.spring_damping, ) # integrate (self.positions, self.velocities) = integrate_particles( self.positions, self.velocities, f, self.inv_mass, sim_dt ) return np.array(self.positions)
2,683
Python
26.387755
88
0.600075
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth_pytorch.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import torch def eval_springs(x, v, indices, rest, ke, kd, f): i = indices[:, 0] j = indices[:, 1] xi = x[i] xj = x[j] vi = v[i] vj = v[j] xij = xi - xj vij = vi - vj l = torch.linalg.norm(xij, axis=1) l_inv = 1.0 / l # normalized spring direction dir = (xij.T * l_inv).T c = l - rest dcdt = torch.sum(dir * vij, axis=1) # damping based on relative velocity. fs = dir.T * (ke * c + kd * dcdt) f.index_add_(dim=0, index=i, source=-fs.T) f.index_add_(dim=0, index=j, source=fs.T) def integrate_particles(x, v, f, g, w, dt): s = w > 0.0 a_ext = g * s[:, None] # simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt v += ((f.T * w).T + a_ext) * dt x += v * dt # clear forces f *= 0.0 class TrIntegrator: def __init__(self, cloth, device): self.cloth = cloth self.positions = torch.tensor(self.cloth.positions, device=device) self.velocities = torch.tensor(self.cloth.velocities, device=device) self.inv_mass = torch.tensor(self.cloth.inv_masses, device=device) self.spring_indices = torch.tensor(self.cloth.spring_indices, device=device, dtype=torch.long) self.spring_lengths = torch.tensor(self.cloth.spring_lengths, device=device) self.spring_stiffness = torch.tensor(self.cloth.spring_stiffness, device=device) self.spring_damping = torch.tensor(self.cloth.spring_damping, device=device) self.forces = torch.zeros((self.cloth.num_particles, 3), dtype=torch.float32, device=device) self.gravity = torch.tensor((0.0, 0.0 - 9.8, 0.0), dtype=torch.float32, device=device) def simulate(self, dt, substeps): sim_dt = dt / substeps for _s in range(substeps): eval_springs( self.positions, self.velocities, self.spring_indices.reshape((self.cloth.num_springs, 2)), self.spring_lengths, self.spring_stiffness, self.spring_damping, self.forces, ) # integrate integrate_particles(self.positions, self.velocities, self.forces, self.gravity, self.inv_mass, sim_dt) return self.positions.cpu().numpy()
2,714
Python
30.206896
114
0.617907
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth_taichi.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import numpy as np import taichi as ti @ti.func def step(x): ret = 0.0 if x < 0: ret = 1 return ret @ti.data_oriented class TiIntegrator: @ti.kernel def eval_springs(self): for tid in range(self.cloth.num_springs): i = self.spring_indices[2 * tid] j = self.spring_indices[2 * tid + 1] ke = self.spring_stiffness[tid] kd = self.spring_damping[tid] rest = self.spring_lengths[tid] xi = self.positions[i] xj = self.positions[j] vi = self.velocities[i] vj = self.velocities[j] xij = xi - xj vij = vi - vj l = xij.norm() dir = xij.normalized() c = l - rest dcdt = dir.dot(vij) fs = dir * (ke * c + kd * dcdt) self.forces[i] -= fs self.forces[j] += fs @ti.kernel def integrate_particles(self, dt: ti.f32): for tid in range(self.cloth.num_particles): x0 = self.positions[tid] v0 = self.velocities[tid] f0 = self.forces[tid] w = self.inv_mass[tid] g = ti.Vector([0.0, 0.0, 0.0]) if w > 0.0: g = ti.Vector([0.0, -9.81, 0.0]) v1 = v0 + (f0 * w + g) * dt x1 = x0 + v1 * dt self.positions[tid] = x1 self.velocities[tid] = v1 self.forces[tid] = ti.Vector([0.0, 0.0, 0.0]) def __init__(self, cloth, device): if device == "cpu": ti.init(arch=ti.cpu) elif device == "cuda": ti.init(arch=ti.gpu) else: raise RuntimeError("Unsupported Taichi device") self.cloth = cloth self.positions = ti.Vector.field(3, dtype=ti.f32, shape=self.cloth.num_particles) self.velocities = ti.Vector.field(3, dtype=ti.f32, shape=self.cloth.num_particles) self.inv_mass = ti.field(ti.f32, shape=self.cloth.num_particles) self.spring_indices = ti.field(ti.i32, shape=self.cloth.num_springs * 2) self.spring_lengths = ti.field(ti.f32, shape=self.cloth.num_springs) self.spring_stiffness = ti.field(ti.f32, shape=self.cloth.num_springs) self.spring_damping = ti.field(ti.f32, shape=self.cloth.num_springs) self.forces = ti.Vector.field(3, dtype=ti.f32, shape=self.cloth.num_particles) # upload data self.positions.from_numpy(cloth.positions) self.velocities.from_numpy(cloth.velocities) self.inv_mass.from_numpy(cloth.inv_masses) self.forces.from_numpy(np.zeros_like(self.cloth.velocities)) self.spring_indices.from_numpy(cloth.spring_indices) self.spring_lengths.from_numpy(cloth.spring_lengths) self.spring_stiffness.from_numpy(cloth.spring_stiffness) self.spring_damping.from_numpy(cloth.spring_damping) def simulate(self, dt, substeps): sim_dt = dt / substeps for _s in range(substeps): self.eval_springs() self.integrate_particles(sim_dt) return self.positions.to_numpy()
3,563
Python
30.539823
90
0.58911
NVIDIA/warp/warp/examples/benchmarks/benchmark_launches.py
# Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. ########################################################################### # Benchmarks for kernel launches with different types of args ########################################################################### import warp as wp @wp.struct class S0: pass @wp.struct class Sf: x: float y: float z: float @wp.struct class Sv: u: wp.vec3 v: wp.vec3 w: wp.vec3 @wp.struct class Sm: M: wp.mat33 N: wp.mat33 O: wp.mat33 @wp.struct class Sa: a: wp.array(dtype=float) b: wp.array(dtype=float) c: wp.array(dtype=float) @wp.struct class Sz: a: wp.array(dtype=float) b: wp.array(dtype=float) c: wp.array(dtype=float) x: float y: float z: float u: wp.vec3 v: wp.vec3 w: wp.vec3 @wp.kernel def k0(): tid = wp.tid() # noqa: F841 @wp.kernel def kf(x: float, y: float, z: float): tid = wp.tid() # noqa: F841 @wp.kernel def kv(u: wp.vec3, v: wp.vec3, w: wp.vec3): tid = wp.tid() # noqa: F841 @wp.kernel def km(M: wp.mat33, N: wp.mat33, O: wp.mat33): tid = wp.tid() # noqa: F841 @wp.kernel def ka(a: wp.array(dtype=float), b: wp.array(dtype=float), c: wp.array(dtype=float)): tid = wp.tid() # noqa: F841 @wp.kernel def kz( a: wp.array(dtype=float), b: wp.array(dtype=float), c: wp.array(dtype=float), x: float, y: float, z: float, u: wp.vec3, v: wp.vec3, w: wp.vec3, ): tid = wp.tid() # noqa: F841 @wp.kernel def ks0(s: S0): tid = wp.tid() # noqa: F841 @wp.kernel def ksf(s: Sf): tid = wp.tid() # noqa: F841 @wp.kernel def ksv(s: Sv): tid = wp.tid() # noqa: F841 @wp.kernel def ksm(s: Sm): tid = wp.tid() # noqa: F841 @wp.kernel def ksa(s: Sa): tid = wp.tid() # noqa: F841 @wp.kernel def ksz(s: Sz): tid = wp.tid() # noqa: F841 wp.build.clear_kernel_cache() devices = wp.get_devices() num_launches = 100000 for device in devices: with wp.ScopedDevice(device): print(f"\n=================== Device '{device}' ===================") wp.force_load(device) n = 1 a = wp.zeros(n, dtype=float) b = wp.zeros(n, dtype=float) c = wp.zeros(n, dtype=float) x = 17.0 y = 42.0 z = 99.0 u = wp.vec3(1, 2, 3) v = wp.vec3(10, 20, 30) w = wp.vec3(100, 200, 300) M = wp.mat33() N = wp.mat33() O = wp.mat33() s0 = S0() sf = Sf() sf.x = x sf.y = y sf.z = z sv = Sv() sv.u = u sv.v = v sv.w = w sm = Sm() sm.M = M sm.N = N sm.O = O sa = Sa() sa.a = a sa.b = b sa.c = c sz = Sz() sz.a = a sz.b = b sz.c = c sz.x = x sz.y = y sz.z = z sz.u = u sz.v = v sz.w = w tk0 = wp.ScopedTimer("k0") tkf = wp.ScopedTimer("kf") tkv = wp.ScopedTimer("kv") tkm = wp.ScopedTimer("km") tka = wp.ScopedTimer("ka") tkz = wp.ScopedTimer("kz") ts0 = wp.ScopedTimer("s0") tsf = wp.ScopedTimer("sf") tsv = wp.ScopedTimer("sv") tsm = wp.ScopedTimer("sm") tsa = wp.ScopedTimer("sa") tsz = wp.ScopedTimer("sz") wp.synchronize_device() with tk0: for _ in range(num_launches): wp.launch(k0, dim=1, inputs=[]) wp.synchronize_device() with tkf: for _ in range(num_launches): wp.launch(kf, dim=1, inputs=[x, y, z]) wp.synchronize_device() with tkv: for _ in range(num_launches): wp.launch(kv, dim=1, inputs=[u, v, w]) wp.synchronize_device() with tkm: for _ in range(num_launches): wp.launch(km, dim=1, inputs=[M, N, O]) wp.synchronize_device() with tka: for _ in range(num_launches): wp.launch(ka, dim=1, inputs=[a, b, c]) wp.synchronize_device() with tkz: for _ in range(num_launches): wp.launch(kz, dim=1, inputs=[a, b, c, x, y, z, u, v, w]) # structs wp.synchronize_device() with ts0: for _ in range(num_launches): wp.launch(ks0, dim=1, inputs=[s0]) wp.synchronize_device() with tsf: for _ in range(num_launches): wp.launch(ksf, dim=1, inputs=[sf]) wp.synchronize_device() with tsv: for _ in range(num_launches): wp.launch(ksv, dim=1, inputs=[sv]) wp.synchronize_device() with tsm: for _ in range(num_launches): wp.launch(ksm, dim=1, inputs=[sm]) wp.synchronize_device() with tsa: for _ in range(num_launches): wp.launch(ksa, dim=1, inputs=[sa]) wp.synchronize_device() with tsz: for _ in range(num_launches): wp.launch(ksz, dim=1, inputs=[sz]) wp.synchronize_device() timers = [ [tk0, ts0], [tkf, tsf], [tkv, tsv], [tkm, tsm], [tka, tsa], [tkz, tsz], ] print("--------------------------------") print("| args | direct | struct |") print("--------------------------------") for tk, ts in timers: print(f"| {tk.name} |{tk.elapsed:10.0f} |{ts.elapsed:10.0f} |") print("--------------------------------")
6,052
Python
19.588435
85
0.477859
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth_numba.py
import math import cupy as cp import numpy as np from numba import cuda, float32 # Notes: # # Current implementation requires some familarity of writing custom cuda kernels # May be improved with cuda ufuncs and/or writing custom numba type extensions. @cuda.jit(device=True) def norm(x): s = float32(0.0) for i in range(3): s += x[i] * x[i] return math.sqrt(s) @cuda.jit(device=True) def dot(x, y): s = float32(0.0) for i in range(3): s += x[i] * y[i] return s @cuda.jit def eval_springs_cuda( num_springs, # (1,) xs, # position (N, 3) vs, # velocities (N, 3) indices, # spring indices (S, 2) rests, # spring rest length (S,) kes, # stiffness (S,) kds, # damping (S,) fs, ): # forces (N, 3) tidx = cuda.grid(1) if tidx < num_springs: i, j = indices[tidx][0], indices[tidx][1] xi, xj = xs[i], xs[j] vi, vj = vs[i], vs[j] rest, ke, kd = rests[tidx], kes[tidx], kds[tidx] xij = cuda.local.array(3, dtype=cp.float32) vij = cuda.local.array(3, dtype=cp.float32) for k in range(3): xij[k] = xi[k] - xj[k] for k in range(3): vij[k] = vi[k] - vj[k] l = norm(xij) l_inv = float32(1.0) / l # normalized spring direction xij_unit = cuda.local.array(3, dtype=cp.float32) for k in range(3): xij_unit[k] = xij[k] * l_inv c = l - rest dcdt = dot(xij_unit, vij) # mass-spring-damper model fac = ke * c + kd * dcdt df = cuda.local.array(3, dtype=cp.float32) for k in range(3): df[k] = xij_unit[k] * fac for k in range(3): cuda.atomic.add(fs[i], k, -df[k]) cuda.atomic.add(fs[j], k, df[k]) # Support const array with cp array? g = np.array([0.0, 0.0 - 9.8, 0.0], dtype=np.float32) z = np.array([0.0, 0.0, 0.0], dtype=np.float32) @cuda.jit def integrate_particles_cuda( xs, # position (N, 3) vs, # velocity (N, 3) fs, # force (N, 3) ws, # inverse of mass (N,) dt, ): # dt (1,) i = cuda.grid(1) if i < xs.shape[0]: w = ws[i] a = cuda.const.array_like(g) if w > 0.0 else cuda.const.array_like(z) for j in range(3): # vs[i] += ((f * w) + a) * dt (ideally) vs[i][j] = vs[i][j] + ((fs[i][j] * w) + a[j]) * dt xs[i][j] = xs[i][j] + vs[i][j] * dt fs[i] = 0.0 class NbIntegrator: def __init__(self, cloth): self.cloth = cloth self.positions = cp.array(self.cloth.positions) self.velocities = cp.array(self.cloth.velocities) self.inv_mass = cp.array(self.cloth.inv_masses) self.spring_indices = cp.array(self.cloth.spring_indices) self.spring_lengths = cp.array(self.cloth.spring_lengths) self.spring_stiffness = cp.array(self.cloth.spring_stiffness) self.spring_damping = cp.array(self.cloth.spring_damping) self.forces = cp.zeros((self.cloth.num_particles, 3), dtype=cp.float32) self.num_particles = self.positions.shape[0] self.integrate_tpb = 4 self.integrate_nb = self.num_particles // self.integrate_tpb + 1 self.spring_tpb = 4 self.spring_nb = self.cloth.num_springs // self.spring_tpb + 1 def simulate(self, dt, substeps): sim_dt = dt / substeps for _s in range(substeps): eval_springs_cuda[self.spring_nb, self.spring_tpb]( self.cloth.num_springs, self.positions, self.velocities, self.spring_indices.reshape((self.cloth.num_springs, 2)), self.spring_lengths, self.spring_stiffness, self.spring_damping, self.forces, ) # integrate integrate_particles_cuda[self.integrate_nb, self.integrate_tpb]( self.positions, self.velocities, self.forces, self.inv_mass, sim_dt ) return self.positions.get()
4,166
Python
27.346939
83
0.532165
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth_numpy.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import numpy as np def eval_springs(x, v, indices, rest, ke, kd, f): i = indices[:, 0] j = indices[:, 1] xi = x[i] xj = x[j] vi = v[i] vj = v[j] xij = xi - xj vij = vi - vj l = np.linalg.norm(xij, axis=1) l_inv = 1.0 / l # normalized spring direction dir = (xij.T * l_inv).T c = l - rest dcdt = np.sum(dir * vij, axis=1) # damping based on relative velocity. fs = dir.T * (ke * c + kd * dcdt) np.add.at(f, i, -fs.T) np.add.at(f, j, fs.T) def integrate_particles(x, v, f, w, dt): g = np.array((0.0, 0.0 - 9.8, 0.0)) s = w > 0.0 a_ext = g * s[:, None] # simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt v += ((f.T * w).T + a_ext) * dt x += v * dt # clear forces f *= 0.0 class NpIntegrator: def __init__(self, cloth): self.cloth = cloth self.forces = np.zeros((self.cloth.num_particles, 3), dtype=np.float32) def simulate(self, dt, substeps): sim_dt = dt / substeps for _s in range(substeps): eval_springs( self.cloth.positions, self.cloth.velocities, self.cloth.spring_indices.reshape((self.cloth.num_springs, 2)), self.cloth.spring_lengths, self.cloth.spring_stiffness, self.cloth.spring_damping, self.forces, ) # integrate integrate_particles(self.cloth.positions, self.cloth.velocities, self.forces, self.cloth.inv_masses, sim_dt) return self.cloth.positions
2,030
Python
25.038461
120
0.581281
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # include parent path import csv import os import sys import numpy as np sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "."))) from pxr import Usd, UsdGeom import warp as wp class Cloth: def __init__( self, lower, dx, dy, radius, stretch_stiffness, bend_stiffness, shear_stiffness, mass, fix_corners=True ): self.triangles = [] self.positions = [] self.velocities = [] self.inv_masses = [] self.spring_indices = [] self.spring_lengths = [] self.spring_stiffness = [] self.spring_damping = [] def grid(x, y, stride): return y * stride + x def create_spring(i, j, stiffness, damp=10.0): length = np.linalg.norm(np.array(self.positions[i]) - np.array(self.positions[j])) self.spring_indices.append(i) self.spring_indices.append(j) self.spring_lengths.append(length) self.spring_stiffness.append(stiffness) self.spring_damping.append(damp) for y in range(dy): for x in range(dx): p = np.array(lower) + radius * np.array((float(x), float(0.0), float(y))) self.positions.append(p) self.velocities.append(np.zeros(3)) if x > 0 and y > 0: self.triangles.append(grid(x - 1, y - 1, dx)) self.triangles.append(grid(x, y - 1, dx)) self.triangles.append(grid(x, y, dx)) self.triangles.append(grid(x - 1, y - 1, dx)) self.triangles.append(grid(x, y, dx)) self.triangles.append(grid(x - 1, y, dx)) if fix_corners and y == 0 and (x == 0 or x == dx - 1): w = 0.0 else: w = 1.0 / mass self.inv_masses.append(w) # horizontal springs for y in range(dy): for x in range(dx): index0 = y * dx + x if x > 0: index1 = y * dx + x - 1 create_spring(index0, index1, stretch_stiffness) if x > 1 and bend_stiffness > 0.0: index2 = y * dx + x - 2 create_spring(index0, index2, bend_stiffness) if y > 0 and x < dx - 1 and shear_stiffness > 0.0: indexDiag = (y - 1) * dx + x + 1 create_spring(index0, indexDiag, shear_stiffness) if y > 0 and x > 0 and shear_stiffness > 0.0: indexDiag = (y - 1) * dx + x - 1 create_spring(index0, indexDiag, shear_stiffness) # vertical for x in range(dx): for y in range(dy): index0 = y * dx + x if y > 0: index1 = (y - 1) * dx + x create_spring(index0, index1, stretch_stiffness) if y > 1 and bend_stiffness > 0.0: index2 = (y - 2) * dx + x create_spring(index0, index2, bend_stiffness) # harden to np arrays self.positions = np.array(self.positions, dtype=np.float32) self.velocities = np.array(self.velocities, dtype=np.float32) self.inv_masses = np.array(self.inv_masses, dtype=np.float32) self.spring_lengths = np.array(self.spring_lengths, dtype=np.float32) self.spring_indices = np.array(self.spring_indices, dtype=np.int32) self.spring_stiffness = np.array(self.spring_stiffness, dtype=np.float32) self.spring_damping = np.array(self.spring_damping, dtype=np.float32) self.num_particles = len(self.positions) self.num_springs = len(self.spring_lengths) self.num_tris = int(len(self.triangles) / 3) def run_benchmark(mode, dim, timers, render=False): # params sim_width = dim sim_height = dim sim_fps = 60.0 sim_substeps = 16 sim_duration = 1.0 sim_frames = int(sim_duration * sim_fps) sim_dt = 1.0 / sim_fps sim_time = 0.0 # wave constants k_stretch = 1000.0 k_shear = 1000.0 k_bend = 1000.0 # k_damp = 0.0 cloth = Cloth( lower=(0.0, 0.0, 0.0), dx=sim_width, dy=sim_height, radius=0.1, stretch_stiffness=k_stretch, bend_stiffness=k_bend, shear_stiffness=k_shear, mass=0.1, fix_corners=True, ) if render: # set up grid for visualization stage = Usd.Stage.CreateNew("benchmark.usd") stage.SetStartTimeCode(0.0) stage.SetEndTimeCode(sim_duration * sim_fps) stage.SetTimeCodesPerSecond(sim_fps) grid = UsdGeom.Mesh.Define(stage, "/root") grid.GetPointsAttr().Set(cloth.positions, 0.0) grid.GetFaceVertexIndicesAttr().Set(cloth.triangles, 0.0) grid.GetFaceVertexCountsAttr().Set([3] * cloth.num_tris, 0.0) with wp.ScopedTimer("Initialization", dict=timers): if mode == "warp_cpu": import benchmark_cloth_warp integrator = benchmark_cloth_warp.WpIntegrator(cloth, "cpu") elif mode == "warp_gpu": import benchmark_cloth_warp integrator = benchmark_cloth_warp.WpIntegrator(cloth, "cuda") elif mode == "taichi_cpu": import benchmark_cloth_taichi integrator = benchmark_cloth_taichi.TiIntegrator(cloth, "cpu") elif mode == "taichi_gpu": import benchmark_cloth_taichi integrator = benchmark_cloth_taichi.TiIntegrator(cloth, "cuda") elif mode == "numpy": import benchmark_cloth_numpy integrator = benchmark_cloth_numpy.NpIntegrator(cloth) elif mode == "cupy": import benchmark_cloth_cupy integrator = benchmark_cloth_cupy.CpIntegrator(cloth) elif mode == "numba": import benchmark_cloth_numba integrator = benchmark_cloth_numba.NbIntegrator(cloth) elif mode == "torch_cpu": import benchmark_cloth_pytorch integrator = benchmark_cloth_pytorch.TrIntegrator(cloth, "cpu") elif mode == "torch_gpu": import benchmark_cloth_pytorch integrator = benchmark_cloth_pytorch.TrIntegrator(cloth, "cuda") elif mode == "jax_cpu": os.environ["JAX_PLATFORM_NAME"] = "cpu" import benchmark_cloth_jax integrator = benchmark_cloth_jax.JxIntegrator(cloth) elif mode == "jax_gpu": os.environ["JAX_PLATFORM_NAME"] = "gpu" import benchmark_cloth_jax integrator = benchmark_cloth_jax.JxIntegrator(cloth) else: raise RuntimeError("Unknown simulation backend") # run one warm-up iteration to accurately measure initialization time (some engines do lazy init) positions = integrator.simulate(sim_dt, sim_substeps) label = "Dim ({}^2)".format(dim) # run simulation for _i in range(sim_frames): # simulate with wp.ScopedTimer(label, dict=timers): positions = integrator.simulate(sim_dt, sim_substeps) if render: grid.GetPointsAttr().Set(positions, sim_time * sim_fps) sim_time += sim_dt if render: stage.Save() # record profiling information timers = {} if len(sys.argv) > 1: mode = sys.argv[1] else: mode = "warp_gpu" run_benchmark(mode, 32, timers, render=False) run_benchmark(mode, 64, timers, render=False) run_benchmark(mode, 128, timers, render=False) # write results for k, v in timers.items(): print("{:16} min: {:8.2f} max: {:8.2f} avg: {:8.2f}".format(k, np.min(v), np.max(v), np.mean(v))) report = open(os.path.join("benchmark.csv"), "a") writer = csv.writer(report, delimiter=",") if report.tell() == 0: writer.writerow(["Name", "Init", "Dim (32^2)", "Dim (64^2)", "Dim (128^2)"]) writer.writerow( [ mode, np.max(timers["Initialization"]), np.mean(timers["Dim (32^2)"]), np.mean(timers["Dim (64^2)"]), np.mean(timers["Dim (128^2)"]), ] ) report.close()
8,622
Python
29.90681
111
0.568778
NVIDIA/warp/warp/examples/benchmarks/benchmark_cloth_warp.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import warp as wp wp.build.clear_kernel_cache() @wp.kernel def eval_springs( x: wp.array(dtype=wp.vec3), v: wp.array(dtype=wp.vec3), spring_indices: wp.array(dtype=int), spring_rest_lengths: wp.array(dtype=float), spring_stiffness: wp.array(dtype=float), spring_damping: wp.array(dtype=float), f: wp.array(dtype=wp.vec3), ): tid = wp.tid() i = spring_indices[tid * 2 + 0] j = spring_indices[tid * 2 + 1] ke = spring_stiffness[tid] kd = spring_damping[tid] rest = spring_rest_lengths[tid] xi = x[i] xj = x[j] vi = v[i] vj = v[j] xij = xi - xj vij = vi - vj l = wp.length(xij) l_inv = 1.0 / l # normalized spring direction dir = xij * l_inv c = l - rest dcdt = wp.dot(dir, vij) # damping based on relative velocity. fs = dir * (ke * c + kd * dcdt) wp.atomic_sub(f, i, fs) wp.atomic_add(f, j, fs) @wp.kernel def integrate_particles( x: wp.array(dtype=wp.vec3), v: wp.array(dtype=wp.vec3), f: wp.array(dtype=wp.vec3), w: wp.array(dtype=float), dt: float, ): tid = wp.tid() x0 = x[tid] v0 = v[tid] f0 = f[tid] inv_mass = w[tid] g = wp.vec3() # treat particles with inv_mass == 0 as kinematic if inv_mass > 0.0: g = wp.vec3(0.0, 0.0 - 9.81, 0.0) # simple semi-implicit Euler. v1 = v0 + a dt, x1 = x0 + v1 dt v1 = v0 + (f0 * inv_mass + g) * dt x1 = x0 + v1 * dt x[tid] = x1 v[tid] = v1 # clear forces f[tid] = wp.vec3() class WpIntegrator: def __init__(self, cloth, device): self.device = wp.get_device(device) with wp.ScopedDevice(self.device): self.positions = wp.from_numpy(cloth.positions, dtype=wp.vec3) self.positions_host = wp.from_numpy(cloth.positions, dtype=wp.vec3, device="cpu") self.invmass = wp.from_numpy(cloth.inv_masses, dtype=float) self.velocities = wp.zeros(cloth.num_particles, dtype=wp.vec3) self.forces = wp.zeros(cloth.num_particles, dtype=wp.vec3) self.spring_indices = wp.from_numpy(cloth.spring_indices, dtype=int) self.spring_lengths = wp.from_numpy(cloth.spring_lengths, dtype=float) self.spring_stiffness = wp.from_numpy(cloth.spring_stiffness, dtype=float) self.spring_damping = wp.from_numpy(cloth.spring_damping, dtype=float) self.cloth = cloth def simulate(self, dt, substeps): sim_dt = dt / substeps for _s in range(substeps): wp.launch( kernel=eval_springs, dim=self.cloth.num_springs, inputs=[ self.positions, self.velocities, self.spring_indices, self.spring_lengths, self.spring_stiffness, self.spring_damping, self.forces, ], outputs=[], device=self.device, ) # integrate wp.launch( kernel=integrate_particles, dim=self.cloth.num_particles, inputs=[self.positions, self.velocities, self.forces, self.invmass, sim_dt], outputs=[], device=self.device, ) # copy data back to host if self.device.is_cuda: wp.copy(self.positions_host, self.positions) wp.synchronize() return self.positions_host.numpy() else: return self.positions.numpy()
4,032
Python
26.623287
93
0.570933
NVIDIA/warp/warp/optim/sgd.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from typing import Any import warp as wp @wp.kernel def sgd_step_kernel( g: wp.array(dtype=Any), b: wp.array(dtype=Any), lr: float, weight_decay: float, momentum: float, damping: float, nesterov: int, t: int, params: wp.array(dtype=Any), ): i = wp.tid() gt = g[i] if weight_decay != 0.0: gt += weight_decay * params[i] if momentum != 0.0: bt = b[i] if t > 0: bt = momentum * bt + (1.0 - damping) * gt else: bt = gt if nesterov == 1: gt += momentum * bt else: gt = bt b[i] = bt params[i] = params[i] - lr * gt class SGD: """An implementation of the Stochastic Gradient Descent Optimizer It is designed to mimic Pytorch's version. https://pytorch.org/docs/stable/generated/torch.optim.SGD.html """ def __init__(self, params=None, lr=0.001, momentum=0.0, dampening=0.0, weight_decay=0.0, nesterov=False): self.b = [] # momentum buffer self.set_params(params) self.lr = lr self.momentum = momentum self.dampening = dampening self.weight_decay = weight_decay self.nesterov = nesterov self.t = 0 def set_params(self, params): self.params = params if params is not None and isinstance(params, list) and len(params) > 0: if len(self.b) != len(params): self.b = [None] * len(params) for i in range(len(params)): param = params[i] if self.b[i] is None or self.b[i].shape != param.shape or self.b[i].dtype != param.dtype: self.b[i] = wp.zeros_like(param) # Overload the kernel for each parameter so we can precompile the SGD kernel if param is not None: wp.overload(sgd_step_kernel, {"g": param, "b": param, "params": param}) def reset_internal_state(self): for b_i in self.b: b_i.zero_() self.t = 0 def step(self, grad): assert self.params is not None for i in range(len(self.params)): SGD.step_detail( grad[i], self.b[i], self.lr, self.momentum, self.dampening, self.weight_decay, self.nesterov, self.t, self.params[i], ) self.t = self.t + 1 @staticmethod def step_detail(g, b, lr, momentum, dampening, weight_decay, nesterov, t, params): assert params.dtype == g.dtype assert params.dtype == b.dtype assert params.shape == g.shape kernel_inputs = [g, b, lr, momentum, dampening, weight_decay, int(nesterov), t, params] wp.launch( kernel=sgd_step_kernel, dim=len(params), inputs=kernel_inputs, device=params.device, )
3,384
Python
31.238095
109
0.560284
NVIDIA/warp/warp/optim/linear.py
from math import sqrt from typing import Any, Callable, Optional, Tuple, Union import warp as wp import warp.sparse as sparse from warp.utils import array_inner # No need to auto-generate adjoint code for linear solvers wp.set_module_options({"enable_backward": False}) class LinearOperator: """ Linear operator to be used as left-hand-side of linear iterative solvers. Args: shape: Tuple containing the number of rows and columns of the operator dtype: Type of the operator elements device: Device on which computations involving the operator should be performed matvec: Matrix-vector multiplication routine The matrix-vector multiplication routine should have the following signature: .. code-block:: python def matvec(x: wp.array, y: wp.array, z: wp.array, alpha: Scalar, beta: Scalar): '''Performs the operation z = alpha * x + beta * y''' ... For performance reasons, by default the iterative linear solvers in this module will try to capture the calls for one or more iterations in CUDA graphs. If the `matvec` routine of a custom :class:`LinearOperator` cannot be graph-captured, the ``use_cuda_graph=False`` parameter should be passed to the solver function. """ def __init__(self, shape: Tuple[int, int], dtype: type, device: wp.context.Device, matvec: Callable): self._shape = shape self._dtype = dtype self._device = device self._matvec = matvec @property def shape(self) -> Tuple[int, int]: return self._shape @property def dtype(self) -> type: return self._dtype @property def device(self) -> wp.context.Device: return self._device @property def matvec(self) -> Callable: return self._matvec @property def scalar_type(self): return wp.types.type_scalar_type(self.dtype) _Matrix = Union[wp.array, sparse.BsrMatrix, LinearOperator] def aslinearoperator(A: _Matrix) -> LinearOperator: """ Casts the dense or sparse matrix `A` as a :class:`LinearOperator` `A` must be of one of the following types: - :class:`warp.sparse.BsrMatrix` - two-dimensional `warp.array`; then `A` is assumed to be a dense matrix - one-dimensional `warp.array`; then `A` is assumed to be a diagonal matrix - :class:`warp.sparse.LinearOperator`; no casting necessary """ if A is None or isinstance(A, LinearOperator): return A def bsr_mv(x, y, z, alpha, beta): if z.ptr != y.ptr and beta != 0.0: wp.copy(src=y, dest=z) sparse.bsr_mv(A, x, z, alpha, beta) def dense_mv(x, y, z, alpha, beta): x = x.reshape((x.shape[0], 1)) y = y.reshape((y.shape[0], 1)) z = z.reshape((y.shape[0], 1)) wp.matmul(A, x, y, z, alpha, beta) def diag_mv(x, y, z, alpha, beta): scalar_type = wp.types.type_scalar_type(A.dtype) alpha = scalar_type(alpha) beta = scalar_type(beta) wp.launch(_diag_mv_kernel, dim=A.shape, device=A.device, inputs=[A, x, y, z, alpha, beta]) def diag_mv_vec(x, y, z, alpha, beta): scalar_type = wp.types.type_scalar_type(A.dtype) alpha = scalar_type(alpha) beta = scalar_type(beta) wp.launch(_diag_mv_vec_kernel, dim=A.shape, device=A.device, inputs=[A, x, y, z, alpha, beta]) if isinstance(A, wp.array): if A.ndim == 2: return LinearOperator(A.shape, A.dtype, A.device, matvec=dense_mv) if A.ndim == 1: if wp.types.type_is_vector(A.dtype): return LinearOperator(A.shape, A.dtype, A.device, matvec=diag_mv_vec) return LinearOperator(A.shape, A.dtype, A.device, matvec=diag_mv) if isinstance(A, sparse.BsrMatrix): return LinearOperator(A.shape, A.dtype, A.device, matvec=bsr_mv) raise ValueError(f"Unable to create LinearOperator from {A}") def preconditioner(A: _Matrix, ptype: str = "diag") -> LinearOperator: """Constructs and returns a preconditioner for an input matrix. Args: A: The matrix for which to build the preconditioner ptype: The type of preconditioner. Currently the following values are supported: - ``"diag"``: Diagonal (a.k.a. Jacobi) preconditioner - ``"diag_abs"``: Similar to Jacobi, but using the absolute value of diagonal coefficients - ``"id"``: Identity (null) preconditioner """ if ptype == "id": return None if ptype in ("diag", "diag_abs"): use_abs = 1 if ptype == "diag_abs" else 0 if isinstance(A, sparse.BsrMatrix): A_diag = sparse.bsr_get_diag(A) if wp.types.type_is_matrix(A.dtype): inv_diag = wp.empty( shape=A.nrow, dtype=wp.vec(length=A.block_shape[0], dtype=A.scalar_type), device=A.device ) wp.launch( _extract_inverse_diagonal_blocked, dim=inv_diag.shape, device=inv_diag.device, inputs=[A_diag, inv_diag, use_abs], ) else: inv_diag = wp.empty(shape=A.shape[0], dtype=A.scalar_type, device=A.device) wp.launch( _extract_inverse_diagonal_scalar, dim=inv_diag.shape, device=inv_diag.device, inputs=[A_diag, inv_diag, use_abs], ) elif isinstance(A, wp.array) and A.ndim == 2: inv_diag = wp.empty(shape=A.shape[0], dtype=A.dtype, device=A.device) wp.launch( _extract_inverse_diagonal_dense, dim=inv_diag.shape, device=inv_diag.device, inputs=[A, inv_diag, use_abs], ) else: raise ValueError("Unsupported source matrix type for building diagonal preconditioner") return aslinearoperator(inv_diag) raise ValueError(f"Unsupported preconditioner type '{ptype}'") def cg( A: _Matrix, b: wp.array, x: wp.array, tol: Optional[float] = None, atol: Optional[float] = None, maxiter: Optional[float] = 0, M: Optional[_Matrix] = None, callback: Optional[Callable] = None, check_every=10, use_cuda_graph=True, ) -> Tuple[int, float, float]: """Computes an approximate solution to a symmetric, positive-definite linear system using the Conjugate Gradient algorithm. Args: A: the linear system's left-hand-side b: the linear system's right-hand-side x: initial guess and solution vector tol: relative tolerance for the residual, as a ratio of the right-hand-side norm atol: absolute tolerance for the residual maxiter: maximum number of iterations to perform before aborting. Defaults to the system size. Note that the current implementation always performs iterations in pairs, and as a result may exceed the specified maximum number of iterations by one. M: optional left-preconditioner, ideally chosen such that ``M A`` is close to identity. callback: function to be called every `check_every` iteration with the current iteration number, residual and tolerance check_every: number of iterations every which to call `callback`, check the residual against the tolerance and possibility terminate the algorithm. use_cuda_graph: If true and when run on a CUDA device, capture the solver iteration as a CUDA graph for reduced launch overhead. The linear operator and preconditioner must only perform graph-friendly operations. Returns: Tuple (final iteration number, residual norm, absolute tolerance) If both `tol` and `atol` are provided, the absolute tolerance used as the termination criterion for the residual norm is ``max(atol, tol * norm(b))``. """ A = aslinearoperator(A) M = aslinearoperator(M) if maxiter == 0: maxiter = A.shape[0] r, r_norm_sq, atol = _initialize_residual_and_tolerance(A, b, x, tol=tol, atol=atol) device = A.device scalar_dtype = wp.types.type_scalar_type(A.dtype) # Notations below follow pseudo-code from https://en.wikipedia.org/wiki/Conjugate_gradient_method # z = M r if M is not None: z = wp.zeros_like(b) M.matvec(r, z, z, alpha=1.0, beta=0.0) # rz = r' z; rz_new = wp.empty(n=1, dtype=scalar_dtype, device=device) array_inner(r, z, out=rz_new) else: z = r rz_old = wp.empty(n=1, dtype=scalar_dtype, device=device) p_Ap = wp.empty(n=1, dtype=scalar_dtype, device=device) Ap = wp.zeros_like(b) p = wp.clone(z) def do_iteration(atol_sq, rr_old, rr_new, rz_old, rz_new): # Ap = A * p; A.matvec(p, Ap, Ap, alpha=1, beta=0) array_inner(p, Ap, out=p_Ap) wp.launch( kernel=_cg_kernel_1, dim=x.shape[0], device=device, inputs=[atol_sq, rr_old, rz_old, p_Ap, x, r, p, Ap], ) array_inner(r, r, out=rr_new) # z = M r if M is not None: M.matvec(r, z, z, alpha=1.0, beta=0.0) # rz = r' z; array_inner(r, z, out=rz_new) wp.launch(kernel=_cg_kernel_2, dim=z.shape[0], device=device, inputs=[atol_sq, rr_new, rz_old, rz_new, z, p]) # We do iterations by pairs, switching old and new residual norm buffers for each odd-even couple # In the non-preconditioned case we reuse the error norm buffer for the new <r,z> computation def do_odd_even_cycle(atol_sq: float): # A pair of iterations, so that we're swapping the residual buffers twice if M is None: do_iteration(atol_sq, r_norm_sq, rz_old, r_norm_sq, rz_old) do_iteration(atol_sq, rz_old, r_norm_sq, rz_old, r_norm_sq) else: do_iteration(atol_sq, r_norm_sq, r_norm_sq, rz_new, rz_old) do_iteration(atol_sq, r_norm_sq, r_norm_sq, rz_old, rz_new) return _run_solver_loop( do_odd_even_cycle, cycle_size=2, r_norm_sq=r_norm_sq, maxiter=maxiter, atol=atol, callback=callback, check_every=check_every, use_cuda_graph=use_cuda_graph, device=device, ) def cr( A: _Matrix, b: wp.array, x: wp.array, tol: Optional[float] = None, atol: Optional[float] = None, maxiter: Optional[float] = 0, M: Optional[_Matrix] = None, callback: Optional[Callable] = None, check_every=10, use_cuda_graph=True, ) -> Tuple[int, float, float]: """Computes an approximate solution to a symmetric, positive-definite linear system using the Conjugate Residual algorithm. Args: A: the linear system's left-hand-side b: the linear system's right-hand-side x: initial guess and solution vector tol: relative tolerance for the residual, as a ratio of the right-hand-side norm atol: absolute tolerance for the residual maxiter: maximum number of iterations to perform before aborting. Defaults to the system size. Note that the current implementation always performs iterations in pairs, and as a result may exceed the specified maximum number of iterations by one. M: optional left-preconditioner, ideally chosen such that ``M A`` is close to identity. callback: function to be called every `check_every` iteration with the current iteration number, residual and tolerance check_every: number of iterations every which to call `callback`, check the residual against the tolerance and possibility terminate the algorithm. use_cuda_graph: If true and when run on a CUDA device, capture the solver iteration as a CUDA graph for reduced launch overhead. The linear operator and preconditioner must only perform graph-friendly operations. Returns: Tuple (final iteration number, residual norm, absolute tolerance) If both `tol` and `atol` are provided, the absolute tolerance used as the termination criterion for the residual norm is ``max(atol, tol * norm(b))``. """ A = aslinearoperator(A) M = aslinearoperator(M) if maxiter == 0: maxiter = A.shape[0] r, r_norm_sq, atol = _initialize_residual_and_tolerance(A, b, x, tol=tol, atol=atol) device = A.device scalar_dtype = wp.types.type_scalar_type(A.dtype) # Notations below follow roughly pseudo-code from https://en.wikipedia.org/wiki/Conjugate_residual_method # with z := M^-1 r and y := M^-1 Ap # z = M r if M is None: z = r else: z = wp.zeros_like(r) M.matvec(r, z, z, alpha=1.0, beta=0.0) Az = wp.zeros_like(b) A.matvec(z, Az, Az, alpha=1, beta=0) p = wp.clone(z) Ap = wp.clone(Az) if M is None: y = Ap else: y = wp.zeros_like(Ap) zAz_old = wp.empty(n=1, dtype=scalar_dtype, device=device) zAz_new = wp.empty(n=1, dtype=scalar_dtype, device=device) y_Ap = wp.empty(n=1, dtype=scalar_dtype, device=device) array_inner(z, Az, out=zAz_new) def do_iteration(atol_sq, rr, zAz_old, zAz_new): if M is not None: M.matvec(Ap, y, y, alpha=1.0, beta=0.0) array_inner(Ap, y, out=y_Ap) if M is None: # In non-preconditioned case, first kernel is same as CG wp.launch( kernel=_cg_kernel_1, dim=x.shape[0], device=device, inputs=[atol_sq, rr, zAz_old, y_Ap, x, r, p, Ap], ) else: # In preconditioned case, we have one more vector to update wp.launch( kernel=_cr_kernel_1, dim=x.shape[0], device=device, inputs=[atol_sq, rr, zAz_old, y_Ap, x, r, z, p, Ap, y], ) array_inner(r, r, out=rr) A.matvec(z, Az, Az, alpha=1, beta=0) array_inner(z, Az, out=zAz_new) # beta = rz_new / rz_old wp.launch( kernel=_cr_kernel_2, dim=z.shape[0], device=device, inputs=[atol_sq, rr, zAz_old, zAz_new, z, p, Az, Ap] ) # We do iterations by pairs, switching old and new residual norm buffers for each odd-even couple def do_odd_even_cycle(atol_sq: float): do_iteration(atol_sq, r_norm_sq, zAz_new, zAz_old) do_iteration(atol_sq, r_norm_sq, zAz_old, zAz_new) return _run_solver_loop( do_odd_even_cycle, cycle_size=2, r_norm_sq=r_norm_sq, maxiter=maxiter, atol=atol, callback=callback, check_every=check_every, use_cuda_graph=use_cuda_graph, device=device, ) def bicgstab( A: _Matrix, b: wp.array, x: wp.array, tol: Optional[float] = None, atol: Optional[float] = None, maxiter: Optional[float] = 0, M: Optional[_Matrix] = None, callback: Optional[Callable] = None, check_every=10, use_cuda_graph=True, is_left_preconditioner=False, ): """Computes an approximate solution to a linear system using the Biconjugate Gradient Stabilized method (BiCGSTAB). Args: A: the linear system's left-hand-side b: the linear system's right-hand-side x: initial guess and solution vector tol: relative tolerance for the residual, as a ratio of the right-hand-side norm atol: absolute tolerance for the residual maxiter: maximum number of iterations to perform before aborting. Defaults to the system size. M: optional left- or right-preconditioner, ideally chosen such that ``M A`` (resp ``A M``) is close to identity. callback: function to be called every `check_every` iteration with the current iteration number, residual and tolerance check_every: number of iterations every which to call `callback`, check the residual against the tolerance and possibility terminate the algorithm. use_cuda_graph: If true and when run on a CUDA device, capture the solver iteration as a CUDA graph for reduced launch overhead. The linear operator and preconditioner must only perform graph-friendly operations. is_left_preconditioner: whether `M` should be used as a left- or right- preconditioner. Returns: Tuple (final iteration number, residual norm, absolute tolerance) If both `tol` and `atol` are provided, the absolute tolerance used as the termination criterion for the residual norm is ``max(atol, tol * norm(b))``. """ A = aslinearoperator(A) M = aslinearoperator(M) if maxiter == 0: maxiter = A.shape[0] r, r_norm_sq, atol = _initialize_residual_and_tolerance(A, b, x, tol=tol, atol=atol) device = A.device scalar_dtype = wp.types.type_scalar_type(A.dtype) # Notations below follow pseudo-code from biconjugate https://en.wikipedia.org/wiki/Biconjugate_gradient_stabilized_method rho = wp.clone(r_norm_sq, pinned=False) r0v = wp.empty(n=1, dtype=scalar_dtype, device=device) st = wp.empty(n=1, dtype=scalar_dtype, device=device) tt = wp.empty(n=1, dtype=scalar_dtype, device=device) # work arrays r0 = wp.clone(r) v = wp.zeros_like(r) t = wp.zeros_like(r) p = wp.clone(r) if M is not None: y = wp.zeros_like(p) z = wp.zeros_like(r) if is_left_preconditioner: Mt = wp.zeros_like(t) else: y = p z = r Mt = t def do_iteration(atol_sq: float): # y = M p if M is not None: M.matvec(p, y, y, alpha=1.0, beta=0.0) # v = A * y; A.matvec(y, v, v, alpha=1, beta=0) # alpha = rho / <r0 . v> array_inner(r0, v, out=r0v) # x += alpha y # r -= alpha v wp.launch( kernel=_bicgstab_kernel_1, dim=x.shape[0], device=device, inputs=[atol_sq, r_norm_sq, rho, r0v, x, r, y, v], ) array_inner(r, r, out=r_norm_sq) # z = M r if M is not None: M.matvec(r, z, z, alpha=1.0, beta=0.0) # t = A z A.matvec(z, t, t, alpha=1, beta=0) if is_left_preconditioner: # Mt = M t if M is not None: M.matvec(t, Mt, Mt, alpha=1.0, beta=0.0) # omega = <Mt, Ms> / <Mt, Mt> array_inner(z, Mt, out=st) array_inner(Mt, Mt, out=tt) else: array_inner(r, t, out=st) array_inner(t, t, out=tt) # x += omega z # r -= omega t wp.launch( kernel=_bicgstab_kernel_2, dim=z.shape[0], device=device, inputs=[atol_sq, r_norm_sq, st, tt, z, t, x, r], ) array_inner(r, r, out=r_norm_sq) # rho = <r0, r> array_inner(r0, r, out=rho) # beta = (rho / rho_old) * alpha / omega = (rho / r0v) / omega # p = r + beta (p - omega v) wp.launch( kernel=_bicgstab_kernel_3, dim=z.shape[0], device=device, inputs=[atol_sq, r_norm_sq, rho, r0v, st, tt, p, r, v], ) return _run_solver_loop( do_iteration, cycle_size=1, r_norm_sq=r_norm_sq, maxiter=maxiter, atol=atol, callback=callback, check_every=check_every, use_cuda_graph=use_cuda_graph, device=device, ) def gmres( A: _Matrix, b: wp.array, x: wp.array, tol: Optional[float] = None, atol: Optional[float] = None, restart=31, maxiter: Optional[float] = 0, M: Optional[_Matrix] = None, callback: Optional[Callable] = None, check_every=31, use_cuda_graph=True, is_left_preconditioner=False, ): """Computes an approximate solution to a linear system using the restarted Generalized Minimum Residual method (GMRES[k]). Args: A: the linear system's left-hand-side b: the linear system's right-hand-side x: initial guess and solution vector tol: relative tolerance for the residual, as a ratio of the right-hand-side norm atol: absolute tolerance for the residual restart: The restart parameter, i.e, the `k` in `GMRES[k]`. In general, increasing this parameter reduces the number of iterations but increases memory consumption. maxiter: maximum number of iterations to perform before aborting. Defaults to the system size. Note that the current implementation always perform `restart` iterations at a time, and as a result may exceed the specified maximum number of iterations by ``restart-1``. M: optional left- or right-preconditioner, ideally chosen such that ``M A`` (resp ``A M``) is close to identity. callback: function to be called every `check_every` iteration with the current iteration number, residual and tolerance check_every: number of iterations every which to call `callback`, check the residual against the tolerance and possibility terminate the algorithm. use_cuda_graph: If true and when run on a CUDA device, capture the solver iteration as a CUDA graph for reduced launch overhead. The linear operator and preconditioner must only perform graph-friendly operations. is_left_preconditioner: whether `M` should be used as a left- or right- preconditioner. Returns: Tuple (final iteration number, residual norm, absolute tolerance) If both `tol` and `atol` are provided, the absolute tolerance used as the termination criterion for the residual norm is ``max(atol, tol * norm(b))``. """ A = aslinearoperator(A) M = aslinearoperator(M) if maxiter == 0: maxiter = A.shape[0] restart = min(restart, maxiter) check_every = max(restart, check_every) r, r_norm_sq, atol = _initialize_residual_and_tolerance(A, b, x, tol=tol, atol=atol) device = A.device scalar_dtype = wp.types.type_scalar_type(A.dtype) pivot_tolerance = _get_dtype_epsilon(scalar_dtype) ** 2 beta_sq = wp.empty_like(r_norm_sq, pinned=False) H = wp.empty(shape=(restart + 1, restart), dtype=scalar_dtype, device=device) y = wp.empty(shape=restart + 1, dtype=scalar_dtype, device=device) w = wp.zeros_like(r) V = wp.zeros(shape=(restart + 1, r.shape[0]), dtype=r.dtype, device=device) def array_coeff(H, i, j): return wp.array( ptr=H.ptr + i * H.strides[0] + j * H.strides[1], dtype=H.dtype, shape=(1,), device=H.device, copy=False, ) def array_row(V, i): return wp.array( ptr=V.ptr + i * V.strides[0], dtype=V.dtype, shape=V.shape[1], device=V.device, copy=False, ) def do_arnoldi_iteration(j: int): # w = A * v; vj = array_row(V, j) if M is not None: tmp = array_row(V, j + 1) if is_left_preconditioner: A.matvec(vj, tmp, tmp, alpha=1, beta=0) M.matvec(tmp, w, w, alpha=1, beta=0) else: M.matvec(vj, tmp, tmp, alpha=1, beta=0) A.matvec(tmp, w, w, alpha=1, beta=0) else: A.matvec(vj, w, w, alpha=1, beta=0) for i in range(j + 1): vi = array_row(V, i) hij = array_coeff(H, i, j) array_inner(w, vi, out=hij) wp.launch(_gmres_arnoldi_axpy_kernel, dim=w.shape, device=w.device, inputs=[vi, w, hij]) hjnj = array_coeff(H, j + 1, j) array_inner(w, w, out=hjnj) vjn = array_row(V, j + 1) wp.launch(_gmres_arnoldi_normalize_kernel, dim=w.shape, device=w.device, inputs=[w, vjn, hjnj]) def do_restart_cycle(atol_sq: float): if M is not None and is_left_preconditioner: M.matvec(r, w, w, alpha=1, beta=0) rh = w else: rh = r array_inner(rh, rh, out=beta_sq) v0 = array_row(V, 0) # v0 = r / beta wp.launch(_gmres_arnoldi_normalize_kernel, dim=r.shape, device=r.device, inputs=[rh, v0, beta_sq]) for j in range(restart): do_arnoldi_iteration(j) wp.launch(_gmres_normalize_lower_diagonal, dim=restart, device=device, inputs=[H]) wp.launch(_gmres_solve_least_squares, dim=1, device=device, inputs=[restart, pivot_tolerance, beta_sq, H, y]) # update x if M is None or is_left_preconditioner: wp.launch(_gmres_update_x_kernel, dim=x.shape, device=device, inputs=[restart, scalar_dtype(1.0), y, V, x]) else: wp.launch(_gmres_update_x_kernel, dim=x.shape, device=device, inputs=[restart, scalar_dtype(0.0), y, V, w]) M.matvec(w, x, x, alpha=1, beta=1) # update r and residual wp.copy(src=b, dest=r) A.matvec(x, b, r, alpha=-1.0, beta=1.0) array_inner(r, r, out=r_norm_sq) return _run_solver_loop( do_restart_cycle, cycle_size=restart, r_norm_sq=r_norm_sq, maxiter=maxiter, atol=atol, callback=callback, check_every=check_every, use_cuda_graph=use_cuda_graph, device=device, ) def _get_dtype_epsilon(dtype): if dtype == wp.float64: return 1.0e-16 elif dtype == wp.float16: return 1.0e-4 return 1.0e-8 def _get_absolute_tolerance(dtype, tol, atol, lhs_norm): eps_tol = _get_dtype_epsilon(dtype) default_tol = eps_tol ** (3 / 4) min_tol = eps_tol ** (9 / 4) if tol is None and atol is None: tol = atol = default_tol elif tol is None: tol = atol elif atol is None: atol = tol return max(tol * lhs_norm, atol, min_tol) def _initialize_residual_and_tolerance(A: LinearOperator, b: wp.array, x: wp.array, tol: float, atol: float): scalar_dtype = wp.types.type_scalar_type(A.dtype) device = A.device # Buffer for storing square norm or residual r_norm_sq = wp.empty(n=1, dtype=scalar_dtype, device=device, pinned=device.is_cuda) # Compute b norm to define absolute tolerance array_inner(b, b, out=r_norm_sq) atol = _get_absolute_tolerance(scalar_dtype, tol, atol, sqrt(r_norm_sq.numpy()[0])) # Residual r = b - Ax r = wp.empty_like(b) A.matvec(x, b, r, alpha=-1.0, beta=1.0) array_inner(r, r, out=r_norm_sq) return r, r_norm_sq, atol def _run_solver_loop( do_cycle: Callable[[float], None], cycle_size: int, r_norm_sq: wp.array, maxiter: int, atol: float, callback: Callable, check_every: int, use_cuda_graph: bool, device, ): atol_sq = atol * atol cur_iter = 0 err_sq = r_norm_sq.numpy()[0] err = sqrt(err_sq) if callback is not None: callback(cur_iter, err, atol) if err_sq <= atol_sq: return cur_iter, err, atol graph = None while True: # Do not do graph capture at first iteration -- modules may not be loaded yet if device.is_cuda and use_cuda_graph and cur_iter > 0: if graph is None: wp.capture_begin(device, force_module_load=False) try: do_cycle(atol_sq) finally: graph = wp.capture_end(device) wp.capture_launch(graph) else: do_cycle(atol_sq) cur_iter += cycle_size if cur_iter >= maxiter: break if (cur_iter % check_every) < cycle_size: err_sq = r_norm_sq.numpy()[0] if err_sq <= atol_sq: break if callback is not None: callback(cur_iter, sqrt(err_sq), atol) err_sq = r_norm_sq.numpy()[0] err = sqrt(err_sq) if callback is not None: callback(cur_iter, err, atol) return cur_iter, err, atol @wp.kernel def _diag_mv_kernel( A: wp.array(dtype=Any), x: wp.array(dtype=Any), y: wp.array(dtype=Any), z: wp.array(dtype=Any), alpha: Any, beta: Any, ): i = wp.tid() z[i] = beta * y[i] + alpha * (A[i] * x[i]) @wp.kernel def _diag_mv_vec_kernel( A: wp.array(dtype=Any), x: wp.array(dtype=Any), y: wp.array(dtype=Any), z: wp.array(dtype=Any), alpha: Any, beta: Any, ): i = wp.tid() z[i] = beta * y[i] + alpha * wp.cw_mul(A[i], x[i]) @wp.func def _inverse_diag_coefficient(coeff: Any, use_abs: wp.bool): zero = type(coeff)(0.0) one = type(coeff)(1.0) return wp.select(coeff == zero, one / wp.select(use_abs, coeff, wp.abs(coeff)), one) @wp.kernel def _extract_inverse_diagonal_blocked( diag_block: wp.array(dtype=Any), inv_diag: wp.array(dtype=Any), use_abs: int, ): i = wp.tid() d = wp.get_diag(diag_block[i]) for k in range(d.length): d[k] = _inverse_diag_coefficient(d[k], use_abs != 0) inv_diag[i] = d @wp.kernel def _extract_inverse_diagonal_scalar( diag_array: wp.array(dtype=Any), inv_diag: wp.array(dtype=Any), use_abs: int, ): i = wp.tid() inv_diag[i] = _inverse_diag_coefficient(diag_array[i], use_abs != 0) @wp.kernel def _extract_inverse_diagonal_dense( dense_matrix: wp.array2d(dtype=Any), inv_diag: wp.array(dtype=Any), use_abs: int, ): i = wp.tid() inv_diag[i] = _inverse_diag_coefficient(dense_matrix[i, i], use_abs != 0) @wp.kernel def _cg_kernel_1( tol: Any, resid: wp.array(dtype=Any), rz_old: wp.array(dtype=Any), p_Ap: wp.array(dtype=Any), x: wp.array(dtype=Any), r: wp.array(dtype=Any), p: wp.array(dtype=Any), Ap: wp.array(dtype=Any), ): i = wp.tid() alpha = wp.select(resid[0] > tol, rz_old.dtype(0.0), rz_old[0] / p_Ap[0]) x[i] = x[i] + alpha * p[i] r[i] = r[i] - alpha * Ap[i] @wp.kernel def _cg_kernel_2( tol: Any, resid: wp.array(dtype=Any), rz_old: wp.array(dtype=Any), rz_new: wp.array(dtype=Any), z: wp.array(dtype=Any), p: wp.array(dtype=Any), ): # p = r + (rz_new / rz_old) * p; i = wp.tid() beta = wp.select(resid[0] > tol, rz_old.dtype(0.0), rz_new[0] / rz_old[0]) p[i] = z[i] + beta * p[i] @wp.kernel def _cr_kernel_1( tol: Any, resid: wp.array(dtype=Any), zAz_old: wp.array(dtype=Any), y_Ap: wp.array(dtype=Any), x: wp.array(dtype=Any), r: wp.array(dtype=Any), z: wp.array(dtype=Any), p: wp.array(dtype=Any), Ap: wp.array(dtype=Any), y: wp.array(dtype=Any), ): i = wp.tid() alpha = wp.select(resid[0] > tol and y_Ap[0] > 0.0, zAz_old.dtype(0.0), zAz_old[0] / y_Ap[0]) x[i] = x[i] + alpha * p[i] r[i] = r[i] - alpha * Ap[i] z[i] = z[i] - alpha * y[i] @wp.kernel def _cr_kernel_2( tol: Any, resid: wp.array(dtype=Any), zAz_old: wp.array(dtype=Any), zAz_new: wp.array(dtype=Any), z: wp.array(dtype=Any), p: wp.array(dtype=Any), Az: wp.array(dtype=Any), Ap: wp.array(dtype=Any), ): # p = r + (rz_new / rz_old) * p; i = wp.tid() beta = wp.select(resid[0] > tol and zAz_old[0] > 0.0, zAz_old.dtype(0.0), zAz_new[0] / zAz_old[0]) p[i] = z[i] + beta * p[i] Ap[i] = Az[i] + beta * Ap[i] @wp.kernel def _bicgstab_kernel_1( tol: Any, resid: wp.array(dtype=Any), rho_old: wp.array(dtype=Any), r0v: wp.array(dtype=Any), x: wp.array(dtype=Any), r: wp.array(dtype=Any), y: wp.array(dtype=Any), v: wp.array(dtype=Any), ): i = wp.tid() alpha = wp.select(resid[0] > tol, rho_old.dtype(0.0), rho_old[0] / r0v[0]) x[i] += alpha * y[i] r[i] -= alpha * v[i] @wp.kernel def _bicgstab_kernel_2( tol: Any, resid: wp.array(dtype=Any), st: wp.array(dtype=Any), tt: wp.array(dtype=Any), z: wp.array(dtype=Any), t: wp.array(dtype=Any), x: wp.array(dtype=Any), r: wp.array(dtype=Any), ): i = wp.tid() omega = wp.select(resid[0] > tol, st.dtype(0.0), st[0] / tt[0]) x[i] += omega * z[i] r[i] -= omega * t[i] @wp.kernel def _bicgstab_kernel_3( tol: Any, resid: wp.array(dtype=Any), rho_new: wp.array(dtype=Any), r0v: wp.array(dtype=Any), st: wp.array(dtype=Any), tt: wp.array(dtype=Any), p: wp.array(dtype=Any), r: wp.array(dtype=Any), v: wp.array(dtype=Any), ): i = wp.tid() beta = wp.select(resid[0] > tol, st.dtype(0.0), rho_new[0] * tt[0] / (r0v[0] * st[0])) beta_omega = wp.select(resid[0] > tol, st.dtype(0.0), rho_new[0] / r0v[0]) p[i] = r[i] + beta * p[i] - beta_omega * v[i] @wp.kernel def _gmres_normalize_lower_diagonal(H: wp.array2d(dtype=Any)): # normalize lower-diagonal values of Hessenberg matrix i = wp.tid() H[i + 1, i] = wp.sqrt(H[i + 1, i]) @wp.kernel def _gmres_solve_least_squares( k: int, pivot_tolerance: float, beta_sq: wp.array(dtype=Any), H: wp.array2d(dtype=Any), y: wp.array(dtype=Any) ): # Solve H y = (beta, 0, ..., 0) # H Hessenberg matrix of shape (k+1, k) # Keeping H in global mem; warp kernels are launched with fixed block size, # so would not fit in registers # TODO: switch to native code with thread synchronization rhs = wp.sqrt(beta_sq[0]) # Apply 2x2 rotations to H so as to remove lower diagonal, # and apply similar rotations to right-hand-side max_k = int(k) for i in range(k): Ha = H[i] Hb = H[i + 1] # Givens rotation [[c s], [-s c]] a = Ha[i] b = Hb[i] abn_sq = a * a + b * b if abn_sq < type(abn_sq)(pivot_tolerance): # Arnoldi iteration finished early max_k = i break abn = wp.sqrt(abn_sq) c = a / abn s = b / abn # Rotate H for j in range(i, k): a = Ha[j] b = Hb[j] Ha[j] = c * a + s * b Hb[j] = c * b - s * a # Rotate rhs y[i] = c * rhs rhs = -s * rhs for i in range(max_k, k): y[i] = y.dtype(0.0) # Triangular back-solve for y for ii in range(max_k, 0, -1): i = ii - 1 Hi = H[i] yi = y[i] for j in range(ii, max_k): yi -= Hi[j] * y[j] y[i] = yi / Hi[i] @wp.kernel def _gmres_arnoldi_axpy_kernel( x: wp.array(dtype=Any), y: wp.array(dtype=Any), alpha: wp.array(dtype=Any), ): tid = wp.tid() y[tid] -= x[tid] * alpha[0] @wp.kernel def _gmres_arnoldi_normalize_kernel( x: wp.array(dtype=Any), y: wp.array(dtype=Any), alpha: wp.array(dtype=Any), ): tid = wp.tid() y[tid] = wp.select(alpha[0] == alpha.dtype(0.0), x[tid] / wp.sqrt(alpha[0]), x[tid]) @wp.kernel def _gmres_update_x_kernel(k: int, beta: Any, y: wp.array(dtype=Any), V: wp.array2d(dtype=Any), x: wp.array(dtype=Any)): tid = wp.tid() xi = beta * x[tid] for j in range(k): xi += V[j, tid] * y[j] x[tid] = xi
34,940
Python
30.620814
183
0.587035
NVIDIA/warp/warp/optim/__init__.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from .adam import Adam from .sgd import SGD
468
Python
45.899995
76
0.811966
NVIDIA/warp/warp/optim/adam.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import warp as wp @wp.kernel def adam_step_kernel_vec3( g: wp.array(dtype=wp.vec3), m: wp.array(dtype=wp.vec3), v: wp.array(dtype=wp.vec3), lr: float, beta1: float, beta2: float, t: float, eps: float, params: wp.array(dtype=wp.vec3), ): i = wp.tid() m[i] = beta1 * m[i] + (1.0 - beta1) * g[i] v[i] = beta2 * v[i] + (1.0 - beta2) * wp.cw_mul(g[i], g[i]) mhat = m[i] / (1.0 - wp.pow(beta1, (t + 1.0))) vhat = v[i] / (1.0 - wp.pow(beta2, (t + 1.0))) sqrt_vhat = wp.vec3(wp.sqrt(vhat[0]), wp.sqrt(vhat[1]), wp.sqrt(vhat[2])) eps_vec3 = wp.vec3(eps, eps, eps) params[i] = params[i] - lr * wp.cw_div(mhat, (sqrt_vhat + eps_vec3)) @wp.kernel def adam_step_kernel_float( g: wp.array(dtype=float), m: wp.array(dtype=float), v: wp.array(dtype=float), lr: float, beta1: float, beta2: float, t: float, eps: float, params: wp.array(dtype=float), ): i = wp.tid() m[i] = beta1 * m[i] + (1.0 - beta1) * g[i] v[i] = beta2 * v[i] + (1.0 - beta2) * g[i] * g[i] mhat = m[i] / (1.0 - wp.pow(beta1, (t + 1.0))) vhat = v[i] / (1.0 - wp.pow(beta2, (t + 1.0))) params[i] = params[i] - lr * mhat / (wp.sqrt(vhat) + eps) class Adam: """An implementation of the Adam Optimizer It is designed to mimic Pytorch's version. https://pytorch.org/docs/stable/generated/torch.optim.Adam.html#torch.optim.Adam """ def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08): self.m = [] # first moment self.v = [] # second moment self.set_params(params) self.lr = lr self.beta1 = betas[0] self.beta2 = betas[1] self.eps = eps self.t = 0 def set_params(self, params): self.params = params if params is not None and isinstance(params, list) and len(params) > 0: if len(self.m) != len(params): self.m = [None] * len(params) # reset first moment if len(self.v) != len(params): self.v = [None] * len(params) # reset second moment for i in range(len(params)): param = params[i] if self.m[i] is None or self.m[i].shape != param.shape or self.m[i].dtype != param.dtype: self.m[i] = wp.zeros_like(param) if self.v[i] is None or self.v[i].shape != param.shape or self.v[i].dtype != param.dtype: self.v[i] = wp.zeros_like(param) def reset_internal_state(self): for m_i in self.m: m_i.zero_() for v_i in self.v: v_i.zero_() self.t = 0 def step(self, grad): assert self.params is not None for i in range(len(self.params)): Adam.step_detail( grad[i], self.m[i], self.v[i], self.lr, self.beta1, self.beta2, self.t, self.eps, self.params[i] ) self.t = self.t + 1 @staticmethod def step_detail(g, m, v, lr, beta1, beta2, t, eps, params): assert params.dtype == g.dtype assert params.dtype == m.dtype assert params.dtype == v.dtype assert params.shape == g.shape kernel_inputs = [g, m, v, lr, beta1, beta2, t, eps, params] if params.dtype == wp.types.float32: wp.launch( kernel=adam_step_kernel_float, dim=len(params), inputs=kernel_inputs, device=params.device, ) elif params.dtype == wp.types.vec3: wp.launch( kernel=adam_step_kernel_vec3, dim=len(params), inputs=kernel_inputs, device=params.device, ) else: raise RuntimeError("Params data type not supported in Adam step kernels.")
4,251
Python
34.140496
112
0.552105
NVIDIA/warp/warp/render/__init__.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from .render_opengl import OpenGLRenderer from .render_usd import UsdRenderer from .utils import bourke_color_map
538
Python
47.999996
76
0.819703
NVIDIA/warp/warp/render/utils.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. from typing import Union import numpy as np import warp as wp def bourke_color_map(low, high, v): c = [1.0, 1.0, 1.0] if v < low: v = low if v > high: v = high dv = high - low if v < (low + 0.25 * dv): c[0] = 0.0 c[1] = 4.0 * (v - low) / dv elif v < (low + 0.5 * dv): c[0] = 0.0 c[2] = 1.0 + 4.0 * (low + 0.25 * dv - v) / dv elif v < (low + 0.75 * dv): c[0] = 4.0 * (v - low - 0.5 * dv) / dv c[2] = 0.0 else: c[1] = 1.0 + 4.0 * (low + 0.75 * dv - v) / dv c[2] = 0.0 return c def tab10_color_map(i): # matplotlib "tab10" colors colors = [ [31, 119, 180], [255, 127, 14], [44, 160, 44], [214, 39, 40], [148, 103, 189], [140, 86, 75], [227, 119, 194], [127, 127, 127], [188, 189, 34], [23, 190, 207], ] num_colors = len(colors) return [c / 255.0 for c in colors[i % num_colors]] # triangulate mesh around given surface with given thickness @wp.kernel def solidify_mesh_kernel( indices: wp.array(dtype=int, ndim=2), vertices: wp.array(dtype=wp.vec3, ndim=1), thickness: wp.array(dtype=float, ndim=1), # outputs out_vertices: wp.array(dtype=wp.vec3, ndim=1), out_indices: wp.array(dtype=int, ndim=2), ): tid = wp.tid() i = indices[tid, 0] j = indices[tid, 1] k = indices[tid, 2] vi = vertices[i] vj = vertices[j] vk = vertices[k] normal = wp.normalize(wp.cross(vj - vi, vk - vi)) ti = normal * thickness[i] tj = normal * thickness[j] tk = normal * thickness[k] # wedge vertices vi0 = vi + ti vi1 = vi - ti vj0 = vj + tj vj1 = vj - tj vk0 = vk + tk vk1 = vk - tk i0 = i * 2 i1 = i * 2 + 1 j0 = j * 2 j1 = j * 2 + 1 k0 = k * 2 k1 = k * 2 + 1 out_vertices[i0] = vi0 out_vertices[i1] = vi1 out_vertices[j0] = vj0 out_vertices[j1] = vj1 out_vertices[k0] = vk0 out_vertices[k1] = vk1 oid = tid * 8 out_indices[oid + 0, 0] = i0 out_indices[oid + 0, 1] = j0 out_indices[oid + 0, 2] = k0 out_indices[oid + 1, 0] = j0 out_indices[oid + 1, 1] = k1 out_indices[oid + 1, 2] = k0 out_indices[oid + 2, 0] = j0 out_indices[oid + 2, 1] = j1 out_indices[oid + 2, 2] = k1 out_indices[oid + 3, 0] = j0 out_indices[oid + 3, 1] = i1 out_indices[oid + 3, 2] = j1 out_indices[oid + 4, 0] = j0 out_indices[oid + 4, 1] = i0 out_indices[oid + 4, 2] = i1 out_indices[oid + 5, 0] = j1 out_indices[oid + 5, 1] = i1 out_indices[oid + 5, 2] = k1 out_indices[oid + 6, 0] = i1 out_indices[oid + 6, 1] = i0 out_indices[oid + 6, 2] = k0 out_indices[oid + 7, 0] = i1 out_indices[oid + 7, 1] = k0 out_indices[oid + 7, 2] = k1 def solidify_mesh(faces: np.ndarray, vertices: np.ndarray, thickness: Union[list, float]): """ Triangulate mesh around given surface with given thickness. :param faces: array of face indices (Nx3) :param vertices: array of vertex positions (Mx3) :param thickness: array of thickness values (Mx1) or single thickness value :return: tuple of (faces, vertices) """ faces = np.array(faces).reshape(-1, 3) out_faces = wp.zeros((len(faces) * 8, 3), dtype=wp.int32) out_vertices = wp.zeros(len(vertices) * 2, dtype=wp.vec3) if not isinstance(thickness, np.ndarray) and not isinstance(thickness, list): thickness = [thickness] * len(vertices) wp.launch( solidify_mesh_kernel, dim=len(faces), inputs=[wp.array(faces, dtype=int), wp.array(vertices, dtype=wp.vec3), wp.array(thickness, dtype=float)], outputs=[out_vertices, out_faces], ) faces = out_faces.numpy() vertices = out_vertices.numpy() return faces, vertices
4,298
Python
27.098039
113
0.563983
NVIDIA/warp/warp/render/render_usd.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import numpy as np import warp as wp def _usd_add_xform(prim): from pxr import UsdGeom prim = UsdGeom.Xform(prim) prim.ClearXformOpOrder() prim.AddTranslateOp() prim.AddOrientOp() prim.AddScaleOp() def _usd_set_xform(xform, pos: tuple, rot: tuple, scale: tuple, time): from pxr import Gf, UsdGeom xform = UsdGeom.Xform(xform) xform_ops = xform.GetOrderedXformOps() if pos is not None: xform_ops[0].Set(Gf.Vec3d(float(pos[0]), float(pos[1]), float(pos[2])), time) if rot is not None: xform_ops[1].Set(Gf.Quatf(float(rot[3]), float(rot[0]), float(rot[1]), float(rot[2])), time) if scale is not None: xform_ops[2].Set(Gf.Vec3d(float(scale[0]), float(scale[1]), float(scale[2])), time) # transforms a cylinder such that it connects the two points pos0, pos1 def _compute_segment_xform(pos0, pos1): from pxr import Gf mid = (pos0 + pos1) * 0.5 height = (pos1 - pos0).GetLength() dir = (pos1 - pos0) / height rot = Gf.Rotation() rot.SetRotateInto((0.0, 0.0, 1.0), Gf.Vec3d(dir)) scale = Gf.Vec3f(1.0, 1.0, height) return (mid, Gf.Quath(rot.GetQuat()), scale) class UsdRenderer: """A USD renderer""" def __init__(self, stage, up_axis="Y", fps=60, scaling=1.0): """Construct a UsdRenderer object Args: model: A simulation model stage (str/Usd.Stage): A USD stage (either in memory or on disk) up_axis (str): The upfacing axis of the stage fps: The number of frames per second to use in the USD file scaling: Scaling factor to use for the entities in the scene """ try: from pxr import Gf, Sdf, Usd, UsdGeom, UsdLux except ImportError as e: raise ImportError("Failed to import pxr. Please install USD (e.g. via `pip install usd-core`).") from e if isinstance(stage, str): self.stage = stage = Usd.Stage.CreateNew(stage) elif isinstance(stage, Usd.Stage): self.stage = stage else: print("Failed to create stage in renderer. Please construct with stage path or stage object.") self.up_axis = up_axis.upper() self.fps = float(fps) self.time = 0.0 self.draw_points = True self.draw_springs = False self.draw_triangles = False self.root = UsdGeom.Xform.Define(stage, "/root") # mapping from shape ID to UsdGeom class self._shape_constructors = {} # optional scaling applied to shape instances (e.g. cubes) self._shape_custom_scale = {} # apply scaling self.root.ClearXformOpOrder() s = self.root.AddScaleOp() s.Set(Gf.Vec3d(float(scaling), float(scaling), float(scaling)), 0.0) self.stage.SetDefaultPrim(self.root.GetPrim()) self.stage.SetStartTimeCode(0.0) self.stage.SetEndTimeCode(0.0) self.stage.SetTimeCodesPerSecond(self.fps) if up_axis == "X": UsdGeom.SetStageUpAxis(self.stage, UsdGeom.Tokens.x) elif up_axis == "Y": UsdGeom.SetStageUpAxis(self.stage, UsdGeom.Tokens.y) elif up_axis == "Z": UsdGeom.SetStageUpAxis(self.stage, UsdGeom.Tokens.z) dome_light = UsdLux.DomeLight.Define(stage, "/dome_light") dome_light.AddRotateXYZOp().Set((-90.0, -30.0, 0.0)) dome_light.GetEnableColorTemperatureAttr().Set(True) dome_light.GetColorTemperatureAttr().Set(6150.0) dome_light.GetIntensityAttr().Set(1.0) dome_light.GetExposureAttr().Set(9.0) dome_light.GetPrim().CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool).Set(False) distant_light = UsdLux.DistantLight.Define(stage, "/distant_light") distant_light.AddRotateXYZOp().Set((-35.0, 45.0, 0.0)) distant_light.GetEnableColorTemperatureAttr().Set(True) distant_light.GetColorTemperatureAttr().Set(7250.0) distant_light.GetIntensityAttr().Set(1.0) distant_light.GetExposureAttr().Set(10.0) def begin_frame(self, time): self.time = round(time * self.fps) self.stage.SetEndTimeCode(self.time) def end_frame(self): pass def register_body(self, body_name): from pxr import UsdGeom xform = UsdGeom.Xform.Define(self.stage, self.root.GetPath().AppendChild(body_name)) _usd_add_xform(xform) def _resolve_path(self, name, parent_body=None, is_template=False): # resolve the path to the prim with the given name and optional parent body if is_template: return self.root.GetPath().AppendChild("_template_shapes").AppendChild(name) if parent_body is None: return self.root.GetPath().AppendChild(name) else: return self.root.GetPath().AppendChild(parent_body).AppendChild(name) def add_shape_instance( self, name: str, shape, body, pos: tuple, rot: tuple, scale: tuple = (1.0, 1.0, 1.0), color: tuple = (1.0, 1.0, 1.0), custom_index: int = -1, visible: bool = True, ): if not visible: return sdf_path = self._resolve_path(name, body) instance = self._shape_constructors[shape.name].Define(self.stage, sdf_path) instance.GetPrim().GetReferences().AddInternalReference(shape) _usd_add_xform(instance) if shape.name in self._shape_custom_scale: cs = self._shape_custom_scale[shape.name] scale = (scale[0] * cs[0], scale[1] * cs[1], scale[2] * cs[2]) _usd_set_xform(instance, pos, rot, scale, self.time) def render_plane( self, name: str, pos: tuple, rot: tuple, width: float, length: float, color: tuple = None, parent_body: str = None, is_template: bool = False, ): """ Render a plane with the given dimensions. Args: name: Name of the plane pos: Position of the plane rot: Rotation of the plane width: Width of the plane length: Length of the plane color: Color of the plane parent_body: Name of the parent body is_template: Whether the plane is a template """ from pxr import Sdf, UsdGeom if is_template: prim_path = self._resolve_path(name, parent_body, is_template) blueprint = UsdGeom.Scope.Define(self.stage, prim_path) blueprint_prim = blueprint.GetPrim() blueprint_prim.SetInstanceable(True) blueprint_prim.SetSpecifier(Sdf.SpecifierClass) plane_path = prim_path.AppendChild("plane") else: plane_path = self._resolve_path(name, parent_body) prim_path = plane_path plane = UsdGeom.Mesh.Get(self.stage, plane_path) if not plane: plane = UsdGeom.Mesh.Define(self.stage, plane_path) plane.CreateDoubleSidedAttr().Set(True) width = width if width > 0.0 else 100.0 length = length if length > 0.0 else 100.0 points = ((-width, 0.0, -length), (width, 0.0, -length), (width, 0.0, length), (-width, 0.0, length)) normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0)) counts = (4,) indices = [0, 1, 2, 3] plane.GetPointsAttr().Set(points) plane.GetNormalsAttr().Set(normals) plane.GetFaceVertexCountsAttr().Set(counts) plane.GetFaceVertexIndicesAttr().Set(indices) _usd_add_xform(plane) self._shape_constructors[name] = UsdGeom.Mesh if not is_template: _usd_set_xform(plane, pos, rot, (1.0, 1.0, 1.0), 0.0) return prim_path def render_ground(self, size: float = 100.0, plane=None): from pxr import UsdGeom mesh = UsdGeom.Mesh.Define(self.stage, self.root.GetPath().AppendChild("ground")) mesh.CreateDoubleSidedAttr().Set(True) if self.up_axis == "X": points = ((0.0, size, -size), (0.0, -size, -size), (0.0, size, size), (0.0, -size, size)) normals = ((1.0, 0.0, 0.0), (1.0, 0.0, 0.0), (1.0, 0.0, 0.0), (1.0, 0.0, 0.0)) elif self.up_axis == "Y": points = ((-size, 0.0, -size), (size, 0.0, -size), (-size, 0.0, size), (size, 0.0, size)) normals = ((0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0), (0.0, 1.0, 0.0)) elif self.up_axis == "Z": points = ((-size, size, 0.0), (size, size, 0.0), (-size, -size, 0.0), (size, -size, 0.0)) normals = ((0.0, 0.0, 1.0), (0.0, 0.0, 1.0), (0.0, 0.0, 1.0), (0.0, 0.0, 1.0)) if plane is not None: normal = np.array(plane[:3]) normal /= np.linalg.norm(normal) pos = plane[3] * normal axis_up = [0.0, 0.0, 0.0] axis_up["XYZ".index(self.up_axis)] = 1.0 if np.allclose(normal, axis_up): # no rotation necessary q = (0.0, 0.0, 0.0, 1.0) else: c = np.cross(normal, axis_up) angle = np.arcsin(np.linalg.norm(c)) axis = np.abs(c) / np.linalg.norm(c) q = wp.quat_from_axis_angle(axis, angle) tf = wp.transform(pos, q) points = [wp.transform_point(tf, wp.vec3(p)) for p in points] normals = [wp.transform_vector(tf, wp.vec3(n)) for n in normals] counts = (4,) indices = [0, 2, 3, 1] mesh.GetPointsAttr().Set(points) mesh.GetNormalsAttr().Set(normals) mesh.GetFaceVertexCountsAttr().Set(counts) mesh.GetFaceVertexIndicesAttr().Set(indices) def render_sphere( self, name: str, pos: tuple, rot: tuple, radius: float, parent_body: str = None, is_template: bool = False, color: tuple = None, ): """Debug helper to add a sphere for visualization Args: pos: The position of the sphere radius: The radius of the sphere name: A name for the USD prim on the stage color: The color of the sphere """ from pxr import Gf, Sdf, UsdGeom if is_template: prim_path = self._resolve_path(name, parent_body, is_template) blueprint = UsdGeom.Scope.Define(self.stage, prim_path) blueprint_prim = blueprint.GetPrim() blueprint_prim.SetInstanceable(True) blueprint_prim.SetSpecifier(Sdf.SpecifierClass) sphere_path = prim_path.AppendChild("sphere") else: sphere_path = self._resolve_path(name, parent_body) prim_path = sphere_path sphere = UsdGeom.Sphere.Get(self.stage, sphere_path) if not sphere: sphere = UsdGeom.Sphere.Define(self.stage, sphere_path) _usd_add_xform(sphere) sphere.GetRadiusAttr().Set(radius, self.time) if color is not None: sphere.GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time) self._shape_constructors[name] = UsdGeom.Sphere if not is_template: _usd_set_xform(sphere, pos, rot, (1.0, 1.0, 1.0), self.time) return prim_path def render_capsule( self, name: str, pos: tuple, rot: tuple, radius: float, half_height: float, parent_body: str = None, is_template: bool = False, color: tuple = None, ): """ Debug helper to add a capsule for visualization Args: pos: The position of the capsule radius: The radius of the capsule half_height: The half height of the capsule name: A name for the USD prim on the stage color: The color of the capsule """ from pxr import Gf, Sdf, UsdGeom if is_template: prim_path = self._resolve_path(name, parent_body, is_template) blueprint = UsdGeom.Scope.Define(self.stage, prim_path) blueprint_prim = blueprint.GetPrim() blueprint_prim.SetInstanceable(True) blueprint_prim.SetSpecifier(Sdf.SpecifierClass) capsule_path = prim_path.AppendChild("capsule") else: capsule_path = self._resolve_path(name, parent_body) prim_path = capsule_path capsule = UsdGeom.Capsule.Get(self.stage, capsule_path) if not capsule: capsule = UsdGeom.Capsule.Define(self.stage, capsule_path) _usd_add_xform(capsule) capsule.GetRadiusAttr().Set(float(radius)) capsule.GetHeightAttr().Set(float(half_height * 2.0)) capsule.GetAxisAttr().Set("Y") if color is not None: capsule.GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time) self._shape_constructors[name] = UsdGeom.Capsule if not is_template: _usd_set_xform(capsule, pos, rot, (1.0, 1.0, 1.0), self.time) return prim_path def render_cylinder( self, name: str, pos: tuple, rot: tuple, radius: float, half_height: float, parent_body: str = None, is_template: bool = False, color: tuple = None, ): """ Debug helper to add a cylinder for visualization Args: pos: The position of the cylinder radius: The radius of the cylinder half_height: The half height of the cylinder name: A name for the USD prim on the stage color: The color of the cylinder """ from pxr import Gf, Sdf, UsdGeom if is_template: prim_path = self._resolve_path(name, parent_body, is_template) blueprint = UsdGeom.Scope.Define(self.stage, prim_path) blueprint_prim = blueprint.GetPrim() blueprint_prim.SetInstanceable(True) blueprint_prim.SetSpecifier(Sdf.SpecifierClass) cylinder_path = prim_path.AppendChild("cylinder") else: cylinder_path = self._resolve_path(name, parent_body) prim_path = cylinder_path cylinder = UsdGeom.Cylinder.Get(self.stage, cylinder_path) if not cylinder: cylinder = UsdGeom.Cylinder.Define(self.stage, cylinder_path) _usd_add_xform(cylinder) cylinder.GetRadiusAttr().Set(float(radius)) cylinder.GetHeightAttr().Set(float(half_height * 2.0)) cylinder.GetAxisAttr().Set("Y") if color is not None: cylinder.GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time) self._shape_constructors[name] = UsdGeom.Cylinder if not is_template: _usd_set_xform(cylinder, pos, rot, (1.0, 1.0, 1.0), self.time) return prim_path def render_cone( self, name: str, pos: tuple, rot: tuple, radius: float, half_height: float, parent_body: str = None, is_template: bool = False, color: tuple = None, ): """ Debug helper to add a cone for visualization Args: pos: The position of the cone radius: The radius of the cone half_height: The half height of the cone name: A name for the USD prim on the stage color: The color of the cone """ from pxr import Gf, Sdf, UsdGeom if is_template: prim_path = self._resolve_path(name, parent_body, is_template) blueprint = UsdGeom.Scope.Define(self.stage, prim_path) blueprint_prim = blueprint.GetPrim() blueprint_prim.SetInstanceable(True) blueprint_prim.SetSpecifier(Sdf.SpecifierClass) cone_path = prim_path.AppendChild("cone") else: cone_path = self._resolve_path(name, parent_body) prim_path = cone_path cone = UsdGeom.Cone.Get(self.stage, cone_path) if not cone: cone = UsdGeom.Cone.Define(self.stage, cone_path) _usd_add_xform(cone) cone.GetRadiusAttr().Set(float(radius)) cone.GetHeightAttr().Set(float(half_height * 2.0)) cone.GetAxisAttr().Set("Y") if color is not None: cone.GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time) self._shape_constructors[name] = UsdGeom.Cone if not is_template: _usd_set_xform(cone, pos, rot, (1.0, 1.0, 1.0), self.time) return prim_path def render_box( self, name: str, pos: tuple, rot: tuple, extents: tuple, parent_body: str = None, is_template: bool = False, color: tuple = None, ): """Debug helper to add a box for visualization Args: pos: The position of the box extents: The radius of the box name: A name for the USD prim on the stage color: The color of the box """ from pxr import Gf, Sdf, UsdGeom if is_template: prim_path = self._resolve_path(name, parent_body, is_template) blueprint = UsdGeom.Scope.Define(self.stage, prim_path) blueprint_prim = blueprint.GetPrim() blueprint_prim.SetInstanceable(True) blueprint_prim.SetSpecifier(Sdf.SpecifierClass) cube_path = prim_path.AppendChild("cube") else: cube_path = self._resolve_path(name, parent_body) prim_path = cube_path cube = UsdGeom.Cube.Get(self.stage, cube_path) if not cube: cube = UsdGeom.Cube.Define(self.stage, cube_path) _usd_add_xform(cube) if color is not None: cube.GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time) self._shape_constructors[name] = UsdGeom.Cube self._shape_custom_scale[name] = extents if not is_template: _usd_set_xform(cube, pos, rot, extents, self.time) return prim_path def render_ref(self, name: str, path: str, pos: tuple, rot: tuple, scale: tuple, color: tuple = None): from pxr import Gf, Usd, UsdGeom ref_path = "/root/" + name ref = UsdGeom.Xform.Get(self.stage, ref_path) if not ref: ref = UsdGeom.Xform.Define(self.stage, ref_path) ref.GetPrim().GetReferences().AddReference(path) _usd_add_xform(ref) # update transform _usd_set_xform(ref, pos, rot, scale, self.time) if color is not None: it = iter(Usd.PrimRange(ref.GetPrim())) for prim in it: if prim.IsA(UsdGeom.Gprim): UsdGeom.Gprim(prim).GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time) it.PruneChildren() def render_mesh( self, name: str, points, indices, colors=None, pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0), scale=(1.0, 1.0, 1.0), update_topology=False, parent_body: str = None, is_template: bool = False, ): from pxr import Sdf, UsdGeom if is_template: prim_path = self._resolve_path(name, parent_body, is_template) blueprint = UsdGeom.Scope.Define(self.stage, prim_path) blueprint_prim = blueprint.GetPrim() blueprint_prim.SetInstanceable(True) blueprint_prim.SetSpecifier(Sdf.SpecifierClass) mesh_path = prim_path.AppendChild("mesh") else: mesh_path = self._resolve_path(name, parent_body) prim_path = mesh_path mesh = UsdGeom.Mesh.Get(self.stage, mesh_path) if not mesh: mesh = UsdGeom.Mesh.Define(self.stage, mesh_path) UsdGeom.Primvar(mesh.GetDisplayColorAttr()).SetInterpolation("vertex") _usd_add_xform(mesh) # force topology update on first frame update_topology = True mesh.GetPointsAttr().Set(points, self.time) if update_topology: idxs = np.array(indices).reshape(-1, 3) mesh.GetFaceVertexIndicesAttr().Set(idxs, self.time) mesh.GetFaceVertexCountsAttr().Set([3] * len(idxs), self.time) if colors: mesh.GetDisplayColorAttr().Set(colors, self.time) self._shape_constructors[name] = UsdGeom.Mesh self._shape_custom_scale[name] = scale if not is_template: _usd_set_xform(mesh, pos, rot, scale, self.time) return prim_path def render_line_list(self, name, vertices, indices, color, radius): """Debug helper to add a line list as a set of capsules Args: vertices: The vertices of the line-strip color: The color of the line time: The time to update at """ from pxr import Gf, UsdGeom num_lines = int(len(indices) / 2) if num_lines < 1: return # look up rope point instancer instancer_path = self.root.GetPath().AppendChild(name) instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path) if not instancer: instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path) instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule")) instancer_capsule.GetRadiusAttr().Set(radius) instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()]) # instancer.CreatePrimvar("displayColor", Sdf.ValueTypeNames.Float3Array, "constant", 1) line_positions = [] line_rotations = [] line_scales = [] for i in range(num_lines): pos0 = vertices[indices[i * 2 + 0]] pos1 = vertices[indices[i * 2 + 1]] (pos, rot, scale) = _compute_segment_xform( Gf.Vec3f(float(pos0[0]), float(pos0[1]), float(pos0[2])), Gf.Vec3f(float(pos1[0]), float(pos1[1]), float(pos1[2])), ) line_positions.append(pos) line_rotations.append(rot) line_scales.append(scale) # line_colors.append(Gf.Vec3f((float(i)/num_lines, 0.5, 0.5))) instancer.GetPositionsAttr().Set(line_positions, self.time) instancer.GetOrientationsAttr().Set(line_rotations, self.time) instancer.GetScalesAttr().Set(line_scales, self.time) instancer.GetProtoIndicesAttr().Set([0] * num_lines, self.time) # instancer.GetPrimvar("displayColor").Set(line_colors, time) def render_line_strip(self, name: str, vertices, color: tuple, radius: float = 0.01): from pxr import Gf, UsdGeom num_lines = int(len(vertices) - 1) if num_lines < 1: return # look up rope point instancer instancer_path = self.root.GetPath().AppendChild(name) instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path) if not instancer: instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path) instancer_capsule = UsdGeom.Capsule.Define(self.stage, instancer.GetPath().AppendChild("capsule")) instancer_capsule.GetRadiusAttr().Set(radius) instancer.CreatePrototypesRel().SetTargets([instancer_capsule.GetPath()]) line_positions = [] line_rotations = [] line_scales = [] for i in range(num_lines): pos0 = vertices[i] pos1 = vertices[i + 1] (pos, rot, scale) = _compute_segment_xform( Gf.Vec3f(float(pos0[0]), float(pos0[1]), float(pos0[2])), Gf.Vec3f(float(pos1[0]), float(pos1[1]), float(pos1[2])), ) line_positions.append(pos) line_rotations.append(rot) line_scales.append(scale) instancer.GetPositionsAttr().Set(line_positions, self.time) instancer.GetOrientationsAttr().Set(line_rotations, self.time) instancer.GetScalesAttr().Set(line_scales, self.time) instancer.GetProtoIndicesAttr().Set([0] * num_lines, self.time) instancer_capsule = UsdGeom.Capsule.Get(self.stage, instancer.GetPath().AppendChild("capsule")) instancer_capsule.GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time) def render_points(self, name: str, points, radius, colors=None): from pxr import Gf, UsdGeom instancer_path = self.root.GetPath().AppendChild(name) instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path) radius_is_scalar = np.isscalar(radius) if not instancer: if colors is None or len(colors) == 3: instancer = UsdGeom.PointInstancer.Define(self.stage, instancer_path) instancer_sphere = UsdGeom.Sphere.Define(self.stage, instancer.GetPath().AppendChild("sphere")) if radius_is_scalar: instancer_sphere.GetRadiusAttr().Set(radius) else: instancer_sphere.GetRadiusAttr().Set(1.0) instancer.GetScalesAttr().Set(np.tile(radius, (3, 1)).T) if colors is not None: instancer_sphere.GetDisplayColorAttr().Set([Gf.Vec3f(colors)], self.time) instancer.CreatePrototypesRel().SetTargets([instancer_sphere.GetPath()]) instancer.CreateProtoIndicesAttr().Set([0] * len(points)) # set identity rotations quats = [Gf.Quath(1.0, 0.0, 0.0, 0.0)] * len(points) instancer.GetOrientationsAttr().Set(quats, self.time) else: instancer = UsdGeom.Points.Define(self.stage, instancer_path) if radius_is_scalar: instancer.GetWidthsAttr().Set([radius * 2.0] * len(points)) else: instancer.GetWidthsAttr().Set(radius * 2.0) if colors is None or len(colors) == 3: instancer.GetPositionsAttr().Set(points, self.time) else: instancer.GetPointsAttr().Set(points, self.time) instancer.GetDisplayColorAttr().Set(colors, self.time) def update_body_transforms(self, body_q): from pxr import Sdf, UsdGeom if isinstance(body_q, wp.array): body_q = body_q.numpy() with Sdf.ChangeBlock(): for b in range(self.model.body_count): node_name = self.body_names[b] node = UsdGeom.Xform(self.stage.GetPrimAtPath(self.root.GetPath().AppendChild(node_name))) # unpack rigid transform X_sb = wp.transform_expand(body_q[b]) _usd_set_xform(node, X_sb.p, X_sb.q, (1.0, 1.0, 1.0), self.time) def save(self): try: self.stage.Save() except Exception as e: print("Failed to save USD stage:", e) return False file_path = self.stage.GetRootLayer().realPath print(f"Saved the USD stage file at `{file_path}`") return True
27,697
Python
35.018205
115
0.579557
NVIDIA/warp/warp/render/render_opengl.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. import ctypes import sys import time from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import warp as wp from .utils import tab10_color_map Mat44 = Union[List[float], List[List[float]], np.ndarray] wp.set_module_options({"enable_backward": False}) shape_vertex_shader = """ #version 330 core layout (location = 0) in vec3 aPos; layout (location = 1) in vec3 aNormal; layout (location = 2) in vec2 aTexCoord; // column vectors of the instance transform matrix layout (location = 3) in vec4 aInstanceTransform0; layout (location = 4) in vec4 aInstanceTransform1; layout (location = 5) in vec4 aInstanceTransform2; layout (location = 6) in vec4 aInstanceTransform3; // colors to use for the checkerboard pattern layout (location = 7) in vec3 aObjectColor1; layout (location = 8) in vec3 aObjectColor2; uniform mat4 view; uniform mat4 model; uniform mat4 projection; out vec3 Normal; out vec3 FragPos; out vec2 TexCoord; out vec3 ObjectColor1; out vec3 ObjectColor2; void main() { mat4 transform = model * mat4(aInstanceTransform0, aInstanceTransform1, aInstanceTransform2, aInstanceTransform3); vec4 worldPos = transform * vec4(aPos, 1.0); gl_Position = projection * view * worldPos; FragPos = vec3(worldPos); Normal = mat3(transpose(inverse(transform))) * aNormal; TexCoord = aTexCoord; ObjectColor1 = aObjectColor1; ObjectColor2 = aObjectColor2; } """ shape_fragment_shader = """ #version 330 core out vec4 FragColor; in vec3 Normal; in vec3 FragPos; in vec2 TexCoord; in vec3 ObjectColor1; in vec3 ObjectColor2; uniform vec3 viewPos; uniform vec3 lightColor; uniform vec3 sunDirection; void main() { float ambientStrength = 0.3; vec3 ambient = ambientStrength * lightColor; vec3 norm = normalize(Normal); float diff = max(dot(norm, sunDirection), 0.0); vec3 diffuse = diff * lightColor; vec3 lightDir2 = normalize(vec3(1.0, 0.3, -0.3)); diff = max(dot(norm, lightDir2), 0.0); diffuse += diff * lightColor * 0.3; float specularStrength = 0.5; vec3 viewDir = normalize(viewPos - FragPos); vec3 reflectDir = reflect(-sunDirection, norm); float spec = pow(max(dot(viewDir, reflectDir), 0.0), 32); vec3 specular = specularStrength * spec * lightColor; reflectDir = reflect(-lightDir2, norm); spec = pow(max(dot(viewDir, reflectDir), 0.0), 64); specular += specularStrength * spec * lightColor * 0.3; // checkerboard pattern float u = TexCoord.x; float v = TexCoord.y; // blend the checkerboard pattern dependent on the gradient of the texture coordinates // to void Moire patterns vec2 grad = abs(dFdx(TexCoord)) + abs(dFdy(TexCoord)); float blendRange = 1.5; float blendFactor = max(grad.x, grad.y) * blendRange; float scale = 2.0; float checker = mod(floor(u * scale) + floor(v * scale), 2.0); checker = mix(checker, 0.5, smoothstep(0.0, 1.0, blendFactor)); vec3 checkerColor = mix(ObjectColor1, ObjectColor2, checker); vec3 result = (ambient + diffuse + specular) * checkerColor; FragColor = vec4(result, 1.0); } """ grid_vertex_shader = """ #version 330 core uniform mat4 view; uniform mat4 model; uniform mat4 projection; in vec3 position; void main() { gl_Position = projection * view * model * vec4(position, 1.0); } """ # Fragment shader source code grid_fragment_shader = """ #version 330 core out vec4 outColor; void main() { outColor = vec4(0.5, 0.5, 0.5, 1.0); } """ sky_vertex_shader = """ #version 330 core layout (location = 0) in vec3 aPos; layout (location = 1) in vec3 aNormal; layout (location = 2) in vec2 aTexCoord; uniform mat4 view; uniform mat4 model; uniform mat4 projection; uniform vec3 viewPos; out vec3 FragPos; out vec2 TexCoord; void main() { vec4 worldPos = vec4(aPos + viewPos, 1.0); gl_Position = projection * view * worldPos; FragPos = vec3(worldPos); TexCoord = aTexCoord; } """ sky_fragment_shader = """ #version 330 core out vec4 FragColor; in vec3 FragPos; in vec2 TexCoord; uniform vec3 color1; uniform vec3 color2; uniform float farPlane; uniform vec3 sunDirection; void main() { float y = tanh(FragPos.y/farPlane*10.0)*0.5+0.5; float height = sqrt(1.0-y); float s = pow(0.5, 1.0 / 10.0); s = 1.0 - clamp(s, 0.75, 1.0); vec3 haze = mix(vec3(1.0), color2 * 1.3, s); vec3 sky = mix(color1, haze, height / 1.3); float diff = max(dot(sunDirection, normalize(FragPos)), 0.0); vec3 sun = pow(diff, 32) * vec3(1.0, 0.8, 0.6) * 0.5; FragColor = vec4(sky + sun, 1.0); } """ frame_vertex_shader = """ #version 330 core layout (location = 0) in vec3 aPos; layout (location = 1) in vec2 aTexCoord; out vec2 TexCoord; void main() { gl_Position = vec4(aPos, 1.0); TexCoord = aTexCoord; } """ frame_fragment_shader = """ #version 330 core in vec2 TexCoord; out vec4 FragColor; uniform sampler2D textureSampler; void main() { FragColor = texture(textureSampler, TexCoord); } """ frame_depth_fragment_shader = """ #version 330 core in vec2 TexCoord; out vec4 FragColor; uniform sampler2D textureSampler; vec3 bourkeColorMap(float v) { vec3 c = vec3(1.0, 1.0, 1.0); v = clamp(v, 0.0, 1.0); // Ensures v is between 0 and 1 if (v < 0.25) { c.r = 0.0; c.g = 4.0 * v; } else if (v < 0.5) { c.r = 0.0; c.b = 1.0 + 4.0 * (0.25 - v); } else if (v < 0.75) { c.r = 4.0 * (v - 0.5); c.b = 0.0; } else { c.g = 1.0 + 4.0 * (0.75 - v); c.b = 0.0; } return c; } void main() { float depth = texture(textureSampler, TexCoord).r; FragColor = vec4(bourkeColorMap(sqrt(1.0 - depth)), 1.0); } """ @wp.kernel def update_vbo_transforms( instance_id: wp.array(dtype=int), instance_body: wp.array(dtype=int), instance_transforms: wp.array(dtype=wp.transform), instance_scalings: wp.array(dtype=wp.vec3), body_q: wp.array(dtype=wp.transform), # outputs vbo_transforms: wp.array(dtype=wp.mat44), ): tid = wp.tid() i = instance_id[tid] X_ws = instance_transforms[i] if instance_body: body = instance_body[i] if body >= 0: if body_q: X_ws = body_q[body] * X_ws else: return p = wp.transform_get_translation(X_ws) q = wp.transform_get_rotation(X_ws) s = instance_scalings[i] rot = wp.quat_to_matrix(q) # transposed definition vbo_transforms[tid] = wp.mat44( rot[0, 0] * s[0], rot[1, 0] * s[0], rot[2, 0] * s[0], 0.0, rot[0, 1] * s[1], rot[1, 1] * s[1], rot[2, 1] * s[1], 0.0, rot[0, 2] * s[2], rot[1, 2] * s[2], rot[2, 2] * s[2], 0.0, p[0], p[1], p[2], 1.0, ) @wp.kernel def update_vbo_vertices( points: wp.array(dtype=wp.vec3), # outputs vbo_vertices: wp.array(dtype=float, ndim=2), ): tid = wp.tid() p = points[tid] vbo_vertices[tid, 0] = p[0] vbo_vertices[tid, 1] = p[1] vbo_vertices[tid, 2] = p[2] @wp.kernel def update_points_positions( instance_positions: wp.array(dtype=wp.vec3), instance_scalings: wp.array(dtype=wp.vec3), # outputs vbo_transforms: wp.array(dtype=wp.mat44), ): tid = wp.tid() p = instance_positions[tid] s = wp.vec3(1.0) if instance_scalings: s = instance_scalings[tid] # transposed definition # fmt: off vbo_transforms[tid] = wp.mat44( s[0], 0.0, 0.0, 0.0, 0.0, s[1], 0.0, 0.0, 0.0, 0.0, s[2], 0.0, p[0], p[1], p[2], 1.0) # fmt: on @wp.kernel def update_line_transforms( lines: wp.array(dtype=wp.vec3, ndim=2), # outputs vbo_transforms: wp.array(dtype=wp.mat44), ): tid = wp.tid() p0 = lines[tid, 0] p1 = lines[tid, 1] p = 0.5 * (p0 + p1) d = p1 - p0 s = wp.length(d) axis = wp.normalize(d) y_up = wp.vec3(0.0, 1.0, 0.0) angle = wp.acos(wp.dot(axis, y_up)) axis = wp.normalize(wp.cross(axis, y_up)) q = wp.quat_from_axis_angle(axis, -angle) rot = wp.quat_to_matrix(q) # transposed definition # fmt: off vbo_transforms[tid] = wp.mat44( rot[0, 0], rot[1, 0], rot[2, 0], 0.0, s * rot[0, 1], s * rot[1, 1], s * rot[2, 1], 0.0, rot[0, 2], rot[1, 2], rot[2, 2], 0.0, p[0], p[1], p[2], 1.0, ) # fmt: on @wp.kernel def compute_gfx_vertices( indices: wp.array(dtype=int, ndim=2), vertices: wp.array(dtype=wp.vec3, ndim=1), # outputs gfx_vertices: wp.array(dtype=float, ndim=2), ): tid = wp.tid() v0 = vertices[indices[tid, 0]] v1 = vertices[indices[tid, 1]] v2 = vertices[indices[tid, 2]] i = tid * 3 j = i + 1 k = i + 2 gfx_vertices[i, 0] = v0[0] gfx_vertices[i, 1] = v0[1] gfx_vertices[i, 2] = v0[2] gfx_vertices[j, 0] = v1[0] gfx_vertices[j, 1] = v1[1] gfx_vertices[j, 2] = v1[2] gfx_vertices[k, 0] = v2[0] gfx_vertices[k, 1] = v2[1] gfx_vertices[k, 2] = v2[2] n = wp.normalize(wp.cross(v1 - v0, v2 - v0)) gfx_vertices[i, 3] = n[0] gfx_vertices[i, 4] = n[1] gfx_vertices[i, 5] = n[2] gfx_vertices[j, 3] = n[0] gfx_vertices[j, 4] = n[1] gfx_vertices[j, 5] = n[2] gfx_vertices[k, 3] = n[0] gfx_vertices[k, 4] = n[1] gfx_vertices[k, 5] = n[2] @wp.kernel def compute_average_normals( indices: wp.array(dtype=int, ndim=2), vertices: wp.array(dtype=wp.vec3), # outputs normals: wp.array(dtype=wp.vec3), faces_per_vertex: wp.array(dtype=int), ): tid = wp.tid() i = indices[tid, 0] j = indices[tid, 1] k = indices[tid, 2] v0 = vertices[i] v1 = vertices[j] v2 = vertices[k] n = wp.normalize(wp.cross(v1 - v0, v2 - v0)) wp.atomic_add(normals, i, n) wp.atomic_add(faces_per_vertex, i, 1) wp.atomic_add(normals, j, n) wp.atomic_add(faces_per_vertex, j, 1) wp.atomic_add(normals, k, n) wp.atomic_add(faces_per_vertex, k, 1) @wp.kernel def assemble_gfx_vertices( vertices: wp.array(dtype=wp.vec3, ndim=1), normals: wp.array(dtype=wp.vec3), faces_per_vertex: wp.array(dtype=int), # outputs gfx_vertices: wp.array(dtype=float, ndim=2), ): tid = wp.tid() v = vertices[tid] n = normals[tid] / float(faces_per_vertex[tid]) gfx_vertices[tid, 0] = v[0] gfx_vertices[tid, 1] = v[1] gfx_vertices[tid, 2] = v[2] gfx_vertices[tid, 3] = n[0] gfx_vertices[tid, 4] = n[1] gfx_vertices[tid, 5] = n[2] @wp.kernel def copy_rgb_frame( input_img: wp.array(dtype=wp.uint8), width: int, height: int, # outputs output_img: wp.array(dtype=float, ndim=3), ): w, v = wp.tid() pixel = v * width + w pixel *= 3 r = float(input_img[pixel + 0]) g = float(input_img[pixel + 1]) b = float(input_img[pixel + 2]) # flip vertically (OpenGL coordinates start at bottom) v = height - v - 1 output_img[v, w, 0] = r / 255.0 output_img[v, w, 1] = g / 255.0 output_img[v, w, 2] = b / 255.0 @wp.kernel def copy_rgb_frame_uint8( input_img: wp.array(dtype=wp.uint8), width: int, height: int, # outputs output_img: wp.array(dtype=wp.uint8, ndim=3), ): w, v = wp.tid() pixel = v * width + w pixel *= 3 # flip vertically (OpenGL coordinates start at bottom) v = height - v - 1 output_img[v, w, 0] = input_img[pixel + 0] output_img[v, w, 1] = input_img[pixel + 1] output_img[v, w, 2] = input_img[pixel + 2] @wp.kernel def copy_depth_frame( input_img: wp.array(dtype=wp.float32), width: int, height: int, near: float, far: float, # outputs output_img: wp.array(dtype=wp.float32, ndim=3), ): w, v = wp.tid() pixel = v * width + w # flip vertically (OpenGL coordinates start at bottom) v = height - v - 1 d = 2.0 * input_img[pixel] - 1.0 d = 2.0 * near * far / ((far - near) * d - near - far) output_img[v, w, 0] = -d @wp.kernel def copy_rgb_frame_tiles( input_img: wp.array(dtype=wp.uint8), positions: wp.array(dtype=int, ndim=2), screen_width: int, screen_height: int, tile_height: int, # outputs output_img: wp.array(dtype=float, ndim=4), ): tile, x, y = wp.tid() p = positions[tile] qx = x + p[0] qy = y + p[1] pixel = qy * screen_width + qx # flip vertically (OpenGL coordinates start at bottom) y = tile_height - y - 1 if qx >= screen_width or qy >= screen_height: output_img[tile, y, x, 0] = 0.0 output_img[tile, y, x, 1] = 0.0 output_img[tile, y, x, 2] = 0.0 return # prevent out-of-bounds access pixel *= 3 r = float(input_img[pixel + 0]) g = float(input_img[pixel + 1]) b = float(input_img[pixel + 2]) output_img[tile, y, x, 0] = r / 255.0 output_img[tile, y, x, 1] = g / 255.0 output_img[tile, y, x, 2] = b / 255.0 @wp.kernel def copy_rgb_frame_tiles_uint8( input_img: wp.array(dtype=wp.uint8), positions: wp.array(dtype=int, ndim=2), screen_width: int, screen_height: int, tile_height: int, # outputs output_img: wp.array(dtype=wp.uint8, ndim=4), ): tile, x, y = wp.tid() p = positions[tile] qx = x + p[0] qy = y + p[1] pixel = qy * screen_width + qx # flip vertically (OpenGL coordinates start at bottom) y = tile_height - y - 1 if qx >= screen_width or qy >= screen_height: output_img[tile, y, x, 0] = wp.uint8(0) output_img[tile, y, x, 1] = wp.uint8(0) output_img[tile, y, x, 2] = wp.uint8(0) return # prevent out-of-bounds access pixel *= 3 output_img[tile, y, x, 0] = input_img[pixel + 0] output_img[tile, y, x, 1] = input_img[pixel + 1] output_img[tile, y, x, 2] = input_img[pixel + 2] @wp.kernel def copy_depth_frame_tiles( input_img: wp.array(dtype=wp.float32), positions: wp.array(dtype=int, ndim=2), screen_width: int, screen_height: int, tile_height: int, near: float, far: float, # outputs output_img: wp.array(dtype=wp.float32, ndim=4), ): tile, x, y = wp.tid() p = positions[tile] qx = x + p[0] qy = y + p[1] pixel = qy * screen_width + qx # flip vertically (OpenGL coordinates start at bottom) y = tile_height - y - 1 if qx >= screen_width or qy >= screen_height: output_img[tile, y, x, 0] = far return # prevent out-of-bounds access d = 2.0 * input_img[pixel] - 1.0 d = 2.0 * near * far / ((far - near) * d - near - far) output_img[tile, y, x, 0] = -d @wp.kernel def copy_rgb_frame_tile( input_img: wp.array(dtype=wp.uint8), offset_x: int, offset_y: int, screen_width: int, screen_height: int, tile_height: int, # outputs output_img: wp.array(dtype=float, ndim=4), ): tile, x, y = wp.tid() qx = x + offset_x qy = y + offset_y pixel = qy * screen_width + qx # flip vertically (OpenGL coordinates start at bottom) y = tile_height - y - 1 if qx >= screen_width or qy >= screen_height: output_img[tile, y, x, 0] = 0.0 output_img[tile, y, x, 1] = 0.0 output_img[tile, y, x, 2] = 0.0 return # prevent out-of-bounds access pixel *= 3 r = float(input_img[pixel + 0]) g = float(input_img[pixel + 1]) b = float(input_img[pixel + 2]) output_img[tile, y, x, 0] = r / 255.0 output_img[tile, y, x, 1] = g / 255.0 output_img[tile, y, x, 2] = b / 255.0 @wp.kernel def copy_rgb_frame_tile_uint8( input_img: wp.array(dtype=wp.uint8), offset_x: int, offset_y: int, screen_width: int, screen_height: int, tile_height: int, # outputs output_img: wp.array(dtype=wp.uint8, ndim=4), ): tile, x, y = wp.tid() qx = x + offset_x qy = y + offset_y pixel = qy * screen_width + qx # flip vertically (OpenGL coordinates start at bottom) y = tile_height - y - 1 if qx >= screen_width or qy >= screen_height: output_img[tile, y, x, 0] = wp.uint8(0) output_img[tile, y, x, 1] = wp.uint8(0) output_img[tile, y, x, 2] = wp.uint8(0) return # prevent out-of-bounds access pixel *= 3 output_img[tile, y, x, 0] = input_img[pixel + 0] output_img[tile, y, x, 1] = input_img[pixel + 1] output_img[tile, y, x, 2] = input_img[pixel + 2] def check_gl_error(): from pyglet import gl error = gl.glGetError() if error != gl.GL_NO_ERROR: print(f"OpenGL error: {error}") class ShapeInstancer: """ Handles instanced rendering for a mesh. Note the vertices must be in the 8-dimensional format: [3D point, 3D normal, UV texture coordinates] """ def __init__(self, shape_shader, device): self.shape_shader = shape_shader self.device = device self.face_count = 0 self.vao = None self.instance_transform_gl_buffer = None self.instance_color1_buffer = None self.instance_color2_buffer = None self.color1 = (1.0, 1.0, 1.0) self.color2 = (0.0, 0.0, 0.0) self.num_instances = 0 self.transforms = None self.scalings = None self._instance_transform_cuda_buffer = None def __del__(self): from pyglet import gl if self.instance_transform_gl_buffer is not None: try: gl.glDeleteBuffers(1, self.instance_transform_gl_buffer) gl.glDeleteBuffers(1, self.instance_color1_buffer) gl.glDeleteBuffers(1, self.instance_color2_buffer) except gl.GLException: pass if self.vao is not None: try: gl.glDeleteVertexArrays(1, self.vao) gl.glDeleteBuffers(1, self.vbo) gl.glDeleteBuffers(1, self.ebo) except gl.GLException: pass def register_shape(self, vertices, indices, color1=(1.0, 1.0, 1.0), color2=(0.0, 0.0, 0.0)): from pyglet import gl if color1 is not None and color2 is None: color2 = np.clip(np.array(color1) + 0.25, 0.0, 1.0) self.color1 = color1 self.color2 = color2 gl.glUseProgram(self.shape_shader.id) # Create VAO, VBO, and EBO self.vao = gl.GLuint() gl.glGenVertexArrays(1, self.vao) gl.glBindVertexArray(self.vao) self.vbo = gl.GLuint() gl.glGenBuffers(1, self.vbo) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, vertices.nbytes, vertices.ctypes.data, gl.GL_STATIC_DRAW) self.ebo = gl.GLuint() gl.glGenBuffers(1, self.ebo) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices.ctypes.data, gl.GL_STATIC_DRAW) # Set up vertex attributes vertex_stride = vertices.shape[1] * vertices.itemsize # positions gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(0) # normals gl.glVertexAttribPointer(1, 3, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(3 * vertices.itemsize)) gl.glEnableVertexAttribArray(1) # uv coordinates gl.glVertexAttribPointer(2, 2, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(6 * vertices.itemsize)) gl.glEnableVertexAttribArray(2) gl.glBindVertexArray(0) self.face_count = len(indices) def update_colors(self, colors1, colors2): from pyglet import gl if colors1 is None: colors1 = np.tile(self.color1, (self.num_instances, 1)) if colors2 is None: colors2 = np.tile(self.color2, (self.num_instances, 1)) if np.shape(colors1) != (self.num_instances, 3): colors1 = np.tile(colors1, (self.num_instances, 1)) if np.shape(colors2) != (self.num_instances, 3): colors2 = np.tile(colors2, (self.num_instances, 1)) colors1 = np.array(colors1, dtype=np.float32) colors2 = np.array(colors2, dtype=np.float32) gl.glBindVertexArray(self.vao) # create buffer for checkerboard colors if self.instance_color1_buffer is None: self.instance_color1_buffer = gl.GLuint() gl.glGenBuffers(1, self.instance_color1_buffer) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instance_color1_buffer) gl.glBufferData(gl.GL_ARRAY_BUFFER, colors1.nbytes, colors1.ctypes.data, gl.GL_STATIC_DRAW) if self.instance_color2_buffer is None: self.instance_color2_buffer = gl.GLuint() gl.glGenBuffers(1, self.instance_color2_buffer) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instance_color2_buffer) gl.glBufferData(gl.GL_ARRAY_BUFFER, colors2.nbytes, colors2.ctypes.data, gl.GL_STATIC_DRAW) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instance_color1_buffer) gl.glVertexAttribPointer(7, 3, gl.GL_FLOAT, gl.GL_FALSE, colors1[0].nbytes, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(7) gl.glVertexAttribDivisor(7, 1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instance_color2_buffer) gl.glVertexAttribPointer(8, 3, gl.GL_FLOAT, gl.GL_FALSE, colors2[0].nbytes, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(8) gl.glVertexAttribDivisor(8, 1) def allocate_instances(self, positions, rotations=None, colors1=None, colors2=None, scalings=None): from pyglet import gl gl.glBindVertexArray(self.vao) self.num_instances = len(positions) # Create instance buffer and bind it as an instanced array if self.instance_transform_gl_buffer is None: self.instance_transform_gl_buffer = gl.GLuint() gl.glGenBuffers(1, self.instance_transform_gl_buffer) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instance_transform_gl_buffer) self.instance_ids = wp.array(np.arange(self.num_instances), dtype=wp.int32, device=self.device) if rotations is None: self.instance_transforms = wp.array( [(*pos, 0.0, 0.0, 0.0, 1.0) for pos in positions], dtype=wp.transform, device=self.device ) else: self.instance_transforms = wp.array( [(*pos, *rot) for pos, rot in zip(positions, rotations)], dtype=wp.transform, device=self.device, ) if scalings is None: self.instance_scalings = wp.array( np.tile((1.0, 1.0, 1.0), (self.num_instances, 1)), dtype=wp.vec3, device=self.device ) else: self.instance_scalings = wp.array(scalings, dtype=wp.vec3, device=self.device) vbo_transforms = wp.zeros(dtype=wp.mat44, shape=(self.num_instances,), device=self.device) wp.launch( update_vbo_transforms, dim=self.num_instances, inputs=[ self.instance_ids, None, self.instance_transforms, self.instance_scalings, None, ], outputs=[ vbo_transforms, ], device=self.device, ) vbo_transforms = vbo_transforms.numpy() gl.glBufferData(gl.GL_ARRAY_BUFFER, vbo_transforms.nbytes, vbo_transforms.ctypes.data, gl.GL_DYNAMIC_DRAW) # Create CUDA buffer for instance transforms self._instance_transform_cuda_buffer = wp.RegisteredGLBuffer( int(self.instance_transform_gl_buffer.value), self.device ) self.update_colors(colors1, colors2) # Set up instance attribute pointers matrix_size = vbo_transforms[0].nbytes gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.instance_transform_gl_buffer) # we can only send vec4s to the shader, so we need to split the instance transforms matrix into its column vectors for i in range(4): gl.glVertexAttribPointer( 3 + i, 4, gl.GL_FLOAT, gl.GL_FALSE, matrix_size, ctypes.c_void_p(i * matrix_size // 4) ) gl.glEnableVertexAttribArray(3 + i) gl.glVertexAttribDivisor(3 + i, 1) gl.glBindVertexArray(0) def update_instances(self, transforms: wp.array = None, scalings: wp.array = None, colors1=None, colors2=None): from pyglet import gl if transforms is not None: if transforms.device.is_cuda: wp_transforms = transforms else: wp_transforms = transforms.to(self.device) self.transforms = wp_transforms if scalings is not None: if transforms.device.is_cuda: wp_scalings = scalings else: wp_scalings = scalings.to(self.device) self.scalings = wp_scalings if transforms is not None or scalings is not None: gl.glBindVertexArray(self.vao) vbo_transforms = self._instance_transform_cuda_buffer.map(dtype=wp.mat44, shape=(self.num_instances,)) wp.launch( update_vbo_transforms, dim=self.num_instances, inputs=[ self.instance_ids, None, self.instance_transforms, self.instance_scalings, None, ], outputs=[ vbo_transforms, ], device=self.device, ) self._instance_transform_cuda_buffer.unmap() if colors1 is not None or colors2 is not None: self.update_colors(colors1, colors2) def render(self): from pyglet import gl gl.glUseProgram(self.shape_shader.id) gl.glBindVertexArray(self.vao) gl.glDrawElementsInstanced(gl.GL_TRIANGLES, self.face_count, gl.GL_UNSIGNED_INT, None, self.num_instances) gl.glBindVertexArray(0) # scope exposes VBO transforms to be set directly by a warp kernel def __enter__(self): from pyglet import gl gl.glBindVertexArray(self.vao) self.vbo_transforms = self._instance_transform_cuda_buffer.map(dtype=wp.mat44, shape=(self.num_instances,)) return self def __exit__(self, exc_type, exc_value, traceback): self._instance_transform_cuda_buffer.unmap() def str_buffer(string: str): return ctypes.c_char_p(string.encode("utf-8")) def arr_pointer(arr: np.ndarray): return arr.astype(np.float32).ctypes.data_as(ctypes.POINTER(ctypes.c_float)) class OpenGLRenderer: """ OpenGLRenderer is a simple OpenGL renderer for rendering 3D shapes and meshes. """ # number of segments to use for rendering spheres, capsules, cones and cylinders default_num_segments = 32 def __init__( self, title="Warp sim", scaling=1.0, fps=60, up_axis="Y", screen_width=1024, screen_height=768, near_plane=1.0, far_plane=100.0, camera_fov=45.0, camera_pos=(0.0, 2.0, 10.0), camera_front=(0.0, 0.0, -1.0), camera_up=(0.0, 1.0, 0.0), background_color=(0.53, 0.8, 0.92), draw_grid=True, draw_sky=True, draw_axis=True, show_info=True, render_wireframe=False, render_depth=False, axis_scale=1.0, vsync=False, headless=False, enable_backface_culling=True, enable_mouse_interaction=True, enable_keyboard_interaction=True, ): try: import pyglet # disable error checking for performance pyglet.options["debug_gl"] = False from pyglet import gl from pyglet.graphics.shader import Shader, ShaderProgram from pyglet.math import Vec3 as PyVec3 except ImportError as e: raise Exception("OpenGLRenderer requires pyglet (version >= 2.0) to be installed.") from e self.camera_near_plane = near_plane self.camera_far_plane = far_plane self.camera_fov = camera_fov self.background_color = background_color self.draw_grid = draw_grid self.draw_sky = draw_sky self.draw_axis = draw_axis self.show_info = show_info self.render_wireframe = render_wireframe self.render_depth = render_depth self.enable_backface_culling = enable_backface_culling self._device = wp.get_cuda_device() self._title = title self.window = pyglet.window.Window( width=screen_width, height=screen_height, caption=title, resizable=True, vsync=vsync, visible=not headless ) self.app = pyglet.app # making window current opengl rendering context self.window.switch_to() self.screen_width, self.screen_height = self.window.get_framebuffer_size() self.enable_mouse_interaction = enable_mouse_interaction self.enable_keyboard_interaction = enable_keyboard_interaction self._camera_speed = 0.04 if isinstance(up_axis, int): self._camera_axis = up_axis else: self._camera_axis = "XYZ".index(up_axis.upper()) self._yaw, self._pitch = -90.0, 0.0 self._last_x, self._last_y = self.screen_width // 2, self.screen_height // 2 self._first_mouse = True self._left_mouse_pressed = False self._keys_pressed = defaultdict(bool) self._input_processors = [] self._key_callbacks = [] self.render_2d_callbacks = [] self.render_3d_callbacks = [] self._camera_pos = PyVec3(0.0, 0.0, 0.0) self._camera_front = PyVec3(0.0, 0.0, -1.0) self._camera_up = PyVec3(0.0, 1.0, 0.0) self._scaling = scaling self._model_matrix = self.compute_model_matrix(self._camera_axis, scaling) self.update_view_matrix(cam_pos=camera_pos, cam_front=camera_front, cam_up=camera_up) self.update_projection_matrix() self._frame_dt = 1.0 / fps self.time = 0.0 self._start_time = time.time() self.clock_time = 0.0 self._paused = False self._frame_speed = 0.0 self.skip_rendering = False self._skip_frame_counter = 0 self._fps_update = 0.0 self._fps_render = 0.0 self._fps_alpha = 0.1 # low pass filter rate to update FPS stats self._body_name = {} self._shapes = [] self._shape_geo_hash = {} self._shape_gl_buffers = {} self._shape_instances = defaultdict(list) self._instances = {} self._instance_custom_ids = {} self._instance_shape = {} self._instance_gl_buffers = {} self._instance_transform_gl_buffer = None self._instance_transform_cuda_buffer = None self._instance_color1_buffer = None self._instance_color2_buffer = None self._instance_count = 0 self._wp_instance_ids = None self._wp_instance_custom_ids = None self._np_instance_visible = None self._instance_ids = None self._inverse_instance_ids = None self._wp_instance_transforms = None self._wp_instance_scalings = None self._wp_instance_bodies = None self._update_shape_instances = False self._add_shape_instances = False # additional shape instancer used for points and line rendering self._shape_instancers = {} # instancer for the arrow shapes sof the coordinate system axes self._axis_instancer = None # toggle tiled rendering self._tiled_rendering = False self._tile_instances = None self._tile_ncols = 0 self._tile_nrows = 0 self._tile_width = 0 self._tile_height = 0 self._tile_viewports = None self._tile_view_matrices = None self._tile_projection_matrices = None self._frame_texture = None self._frame_depth_texture = None self._frame_fbo = None self._frame_pbo = None self.window.push_handlers(on_draw=self._draw) self.window.push_handlers(on_resize=self._window_resize_callback) self.window.push_handlers(on_key_press=self._key_press_callback) self._key_handler = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self._key_handler) self.window.on_mouse_scroll = self._scroll_callback self.window.on_mouse_drag = self._mouse_drag_callback gl.glClearColor(*self.background_color, 1) gl.glEnable(gl.GL_DEPTH_TEST) gl.glDepthMask(True) gl.glDepthRange(0.0, 1.0) self._shape_shader = ShaderProgram( Shader(shape_vertex_shader, "vertex"), Shader(shape_fragment_shader, "fragment") ) self._grid_shader = ShaderProgram( Shader(grid_vertex_shader, "vertex"), Shader(grid_fragment_shader, "fragment") ) self._sun_direction = np.array((-0.2, 0.8, 0.3)) self._sun_direction /= np.linalg.norm(self._sun_direction) with self._shape_shader: gl.glUniform3f( gl.glGetUniformLocation(self._shape_shader.id, str_buffer("sunDirection")), *self._sun_direction ) gl.glUniform3f(gl.glGetUniformLocation(self._shape_shader.id, str_buffer("lightColor")), 1, 1, 1) self._loc_shape_model = gl.glGetUniformLocation(self._shape_shader.id, str_buffer("model")) self._loc_shape_view = gl.glGetUniformLocation(self._shape_shader.id, str_buffer("view")) self._loc_shape_projection = gl.glGetUniformLocation(self._shape_shader.id, str_buffer("projection")) self._loc_shape_view_pos = gl.glGetUniformLocation(self._shape_shader.id, str_buffer("viewPos")) gl.glUniform3f(self._loc_shape_view_pos, 0, 0, 10) # create grid data limit = 10.0 ticks = np.linspace(-limit, limit, 21) grid_vertices = [] for i in ticks: if self._camera_axis == 0: grid_vertices.extend([0, -limit, i, 0, limit, i]) grid_vertices.extend([0, i, -limit, 0, i, limit]) elif self._camera_axis == 1: grid_vertices.extend([-limit, 0, i, limit, 0, i]) grid_vertices.extend([i, 0, -limit, i, 0, limit]) elif self._camera_axis == 2: grid_vertices.extend([-limit, i, 0, limit, i, 0]) grid_vertices.extend([i, -limit, 0, i, limit, 0]) grid_vertices = np.array(grid_vertices, dtype=np.float32) self._grid_vertex_count = len(grid_vertices) // 3 with self._grid_shader: self._grid_vao = gl.GLuint() gl.glGenVertexArrays(1, self._grid_vao) gl.glBindVertexArray(self._grid_vao) self._grid_vbo = gl.GLuint() gl.glGenBuffers(1, self._grid_vbo) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._grid_vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, grid_vertices.nbytes, grid_vertices.ctypes.data, gl.GL_STATIC_DRAW) self._loc_grid_view = gl.glGetUniformLocation(self._grid_shader.id, str_buffer("view")) self._loc_grid_model = gl.glGetUniformLocation(self._grid_shader.id, str_buffer("model")) self._loc_grid_projection = gl.glGetUniformLocation(self._grid_shader.id, str_buffer("projection")) self._loc_grid_pos_attribute = gl.glGetAttribLocation(self._grid_shader.id, str_buffer("position")) gl.glVertexAttribPointer(self._loc_grid_pos_attribute, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glEnableVertexAttribArray(self._loc_grid_pos_attribute) # create sky data self._sky_shader = ShaderProgram(Shader(sky_vertex_shader, "vertex"), Shader(sky_fragment_shader, "fragment")) with self._sky_shader: self._loc_sky_view = gl.glGetUniformLocation(self._sky_shader.id, str_buffer("view")) self._loc_sky_model = gl.glGetUniformLocation(self._sky_shader.id, str_buffer("model")) self._loc_sky_projection = gl.glGetUniformLocation(self._sky_shader.id, str_buffer("projection")) self._loc_sky_color1 = gl.glGetUniformLocation(self._sky_shader.id, str_buffer("color1")) self._loc_sky_color2 = gl.glGetUniformLocation(self._sky_shader.id, str_buffer("color2")) self._loc_sky_far_plane = gl.glGetUniformLocation(self._sky_shader.id, str_buffer("farPlane")) gl.glUniform3f(self._loc_sky_color1, *background_color) # glUniform3f(self._loc_sky_color2, *np.clip(np.array(background_color)+0.5, 0.0, 1.0)) gl.glUniform3f(self._loc_sky_color2, 0.8, 0.4, 0.05) gl.glUniform1f(self._loc_sky_far_plane, self.camera_far_plane) self._loc_sky_view_pos = gl.glGetUniformLocation(self._sky_shader.id, str_buffer("viewPos")) gl.glUniform3f( gl.glGetUniformLocation(self._sky_shader.id, str_buffer("sunDirection")), *self._sun_direction ) # create VAO, VBO, and EBO self._sky_vao = gl.GLuint() gl.glGenVertexArrays(1, self._sky_vao) gl.glBindVertexArray(self._sky_vao) vertices, indices = self._create_sphere_mesh(self.camera_far_plane * 0.9, 32, 32, reverse_winding=True) self._sky_tri_count = len(indices) self._sky_vbo = gl.GLuint() gl.glGenBuffers(1, self._sky_vbo) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._sky_vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, vertices.nbytes, vertices.ctypes.data, gl.GL_STATIC_DRAW) self._sky_ebo = gl.GLuint() gl.glGenBuffers(1, self._sky_ebo) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self._sky_ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices.ctypes.data, gl.GL_STATIC_DRAW) # set up vertex attributes vertex_stride = vertices.shape[1] * vertices.itemsize # positions gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(0) # normals gl.glVertexAttribPointer(1, 3, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(3 * vertices.itemsize)) gl.glEnableVertexAttribArray(1) # uv coordinates gl.glVertexAttribPointer(2, 2, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(6 * vertices.itemsize)) gl.glEnableVertexAttribArray(2) gl.glBindVertexArray(0) self._last_time = time.time() self._last_begin_frame_time = self._last_time self._last_end_frame_time = self._last_time # create arrow shapes for the coordinate system axes vertices, indices = self._create_arrow_mesh( base_radius=0.02 * axis_scale, base_height=0.85 * axis_scale, cap_height=0.15 * axis_scale ) self._axis_instancer = ShapeInstancer(self._shape_shader, self._device) self._axis_instancer.register_shape(vertices, indices) sqh = np.sqrt(0.5) self._axis_instancer.allocate_instances( positions=[(0.0, 0.0, 0.0), (0.0, 0.0, 0.0), (0.0, 0.0, 0.0)], rotations=[(0.0, 0.0, 0.0, 1.0), (0.0, 0.0, -sqh, sqh), (sqh, 0.0, 0.0, sqh)], colors1=[(0.0, 1.0, 0.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0)], colors2=[(0.0, 1.0, 0.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0)], ) # create frame buffer for rendering to a texture self._frame_texture = None self._frame_depth_texture = None self._frame_fbo = None self._setup_framebuffer() # fmt: off # set up VBO for the quad that is rendered to the user window with the texture self._frame_vertices = np.array([ # Positions TexCoords -1.0, -1.0, 0.0, 0.0, 1.0, -1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 0.0, 1.0 ], dtype=np.float32) # fmt: on self._frame_indices = np.array([0, 1, 2, 2, 3, 0], dtype=np.uint32) self._frame_vao = gl.GLuint() gl.glGenVertexArrays(1, self._frame_vao) gl.glBindVertexArray(self._frame_vao) self._frame_vbo = gl.GLuint() gl.glGenBuffers(1, self._frame_vbo) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._frame_vbo) gl.glBufferData( gl.GL_ARRAY_BUFFER, self._frame_vertices.nbytes, self._frame_vertices.ctypes.data, gl.GL_STATIC_DRAW ) self._frame_ebo = gl.GLuint() gl.glGenBuffers(1, self._frame_ebo) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self._frame_ebo) gl.glBufferData( gl.GL_ELEMENT_ARRAY_BUFFER, self._frame_indices.nbytes, self._frame_indices.ctypes.data, gl.GL_STATIC_DRAW ) gl.glVertexAttribPointer(0, 2, gl.GL_FLOAT, gl.GL_FALSE, 4 * self._frame_vertices.itemsize, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(0) gl.glVertexAttribPointer( 1, 2, gl.GL_FLOAT, gl.GL_FALSE, 4 * self._frame_vertices.itemsize, ctypes.c_void_p(2 * vertices.itemsize) ) gl.glEnableVertexAttribArray(1) self._frame_shader = ShaderProgram( Shader(frame_vertex_shader, "vertex"), Shader(frame_fragment_shader, "fragment") ) gl.glUseProgram(self._frame_shader.id) self._frame_loc_texture = gl.glGetUniformLocation(self._frame_shader.id, str_buffer("textureSampler")) self._frame_depth_shader = ShaderProgram( Shader(frame_vertex_shader, "vertex"), Shader(frame_depth_fragment_shader, "fragment") ) gl.glUseProgram(self._frame_depth_shader.id) self._frame_loc_depth_texture = gl.glGetUniformLocation( self._frame_depth_shader.id, str_buffer("textureSampler") ) # unbind the VBO and VAO gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0) gl.glBindVertexArray(0) # update model matrix self.scaling = scaling check_gl_error() # create text to render stats on the screen self._info_label = pyglet.text.Label( "", font_name="Arial", font_size=12, color=(255, 255, 255, 255), x=10, y=10, anchor_x="left", anchor_y="top", multiline=True, width=400, ) # set up our own event handling so we can synchronously render frames # by calling update() in a loop from pyglet.window import Window Window._enable_event_queue = False self.window.switch_to() self.window.dispatch_pending_events() platform_event_loop = self.app.platform_event_loop platform_event_loop.start() # start event loop self.app.event_loop.dispatch_event("on_enter") @property def paused(self): return self._paused @paused.setter def paused(self, value): self._paused = value if value: self.window.set_caption(f"{self._title} (paused)") else: self.window.set_caption(self._title) @property def has_exit(self): return self.app.event_loop.has_exit def clear(self): from pyglet import gl self.app.event_loop.dispatch_event("on_exit") self.app.platform_event_loop.stop() if self._instance_transform_gl_buffer is not None: try: gl.glDeleteBuffers(1, self._instance_transform_gl_buffer) gl.glDeleteBuffers(1, self._instance_color1_buffer) gl.glDeleteBuffers(1, self._instance_color2_buffer) except gl.GLException: pass for vao, vbo, ebo, _, _vertex_cuda_buffer in self._shape_gl_buffers.values(): try: gl.glDeleteVertexArrays(1, vao) gl.glDeleteBuffers(1, vbo) gl.glDeleteBuffers(1, ebo) except gl.GLException: pass self._body_name.clear() self._shapes.clear() self._shape_geo_hash.clear() self._shape_gl_buffers.clear() self._shape_instances.clear() self._instances.clear() self._instance_shape.clear() self._instance_gl_buffers.clear() self._instance_transform_gl_buffer = None self._instance_transform_cuda_buffer = None self._instance_color1_buffer = None self._instance_color2_buffer = None self._wp_instance_ids = None self._wp_instance_custom_ids = None self._wp_instance_transforms = None self._wp_instance_scalings = None self._wp_instance_bodies = None self._np_instance_visible = None self._update_shape_instances = False def close(self): self.clear() self.window.close() @property def tiled_rendering(self): return self._tiled_rendering @tiled_rendering.setter def tiled_rendering(self, value): if value: assert self._tile_instances is not None, "Tiled rendering is not set up. Call setup_tiled_rendering first." self._tiled_rendering = value def setup_tiled_rendering( self, instances: List[List[int]], rescale_window: bool = False, tile_width: Optional[int] = None, tile_height: Optional[int] = None, tile_ncols: Optional[int] = None, tile_nrows: Optional[int] = None, tile_positions: Optional[List[Tuple[int]]] = None, tile_sizes: Optional[List[Tuple[int]]] = None, projection_matrices: Optional[List[Mat44]] = None, view_matrices: Optional[List[Mat44]] = None, ): """ Set up tiled rendering where the render buffer is split into multiple tiles that can visualize different shape instances of the scene with different view and projection matrices. See `get_pixels` which allows to retrieve the pixels of for each tile. :param instances: A list of lists of shape instance ids. Each list of shape instance ids will be rendered into a separate tile. :param rescale_window: If True, the window will be resized to fit the tiles. :param tile_width: The width of each tile in pixels (optional). :param tile_height: The height of each tile in pixels (optional). :param tile_ncols: The number of tiles rendered horizontally (optional). Will be considered if `tile_width` is set to compute the tile positions, unless `tile_positions` is defined. :param tile_positions: A list of (x, y) tuples specifying the position of each tile in pixels. If None, the tiles will be arranged in a square grid, or, if `tile_ncols` and `tile_nrows` is set, in a grid with the specified number of columns and rows. :param tile_sizes: A list of (width, height) tuples specifying the size of each tile in pixels. If None, the tiles will have the same size as specified by `tile_width` and `tile_height`. :param projection_matrices: A list of projection matrices for each tile (each view matrix is either a flattened 16-dimensional array or a 4x4 matrix). If the entire array is None, or only a view instances, the projection matrices for all, or these instances, respectively, will be derived from the current render settings. :param view_matrices: A list of view matrices for each tile (each view matrix is either a flattened 16-dimensional array or a 4x4 matrix). If the entire array is None, or only a view instances, the view matrices for all, or these instances, respectively, will be derived from the current camera settings and be updated when the camera is moved. """ assert len(instances) > 0 and all(isinstance(i, list) for i in instances), "Invalid tile instances." self._tile_instances = instances n = len(self._tile_instances) if tile_positions is None or tile_sizes is None: if tile_ncols is None or tile_nrows is None: # try to fit the tiles into a square self._tile_ncols = int(np.ceil(np.sqrt(n))) self._tile_nrows = int(np.ceil(n / float(self._tile_ncols))) else: self._tile_ncols = tile_ncols self._tile_nrows = tile_nrows self._tile_width = tile_width or max(32, self.screen_width // self._tile_ncols) self._tile_height = tile_height or max(32, self.screen_height // self._tile_nrows) self._tile_viewports = [ (i * self._tile_width, j * self._tile_height, self._tile_width, self._tile_height) for i in range(self._tile_ncols) for j in range(self._tile_nrows) ] if rescale_window: self.window.set_size(self._tile_width * self._tile_ncols, self._tile_height * self._tile_nrows) else: assert ( len(tile_positions) == n and len(tile_sizes) == n ), "Number of tiles does not match number of instances." self._tile_ncols = None self._tile_nrows = None self._tile_width = None self._tile_height = None if all(tile_sizes[i][0] == tile_sizes[0][0] for i in range(n)): # tiles all have the same width self._tile_width = tile_sizes[0][0] if all(tile_sizes[i][1] == tile_sizes[0][1] for i in range(n)): # tiles all have the same height self._tile_height = tile_sizes[0][1] self._tile_viewports = [(x, y, w, h) for (x, y), (w, h) in zip(tile_positions, tile_sizes)] if projection_matrices is None: projection_matrices = [None] * n self._tile_projection_matrices = [] for i, p in enumerate(projection_matrices): if p is None: w, h = self._tile_viewports[i][2:] self._tile_projection_matrices.append( self.compute_projection_matrix( self.camera_fov, w / h, self.camera_near_plane, self.camera_far_plane ) ) else: self._tile_projection_matrices.append(np.array(p).flatten()) if view_matrices is None: self._tile_view_matrices = [None] * n else: self._tile_view_matrices = [np.array(m).flatten() for m in view_matrices] self._tiled_rendering = True def update_tile( self, tile_id, instances: Optional[List[int]] = None, projection_matrix: Optional[Mat44] = None, view_matrix: Optional[Mat44] = None, tile_size: Optional[Tuple[int]] = None, tile_position: Optional[Tuple[int]] = None, ): """ Update the shape instances, projection matrix, view matrix, tile size, or tile position for a given tile given its index. :param tile_id: The index of the tile to update. :param instances: A list of shape instance ids (optional). :param projection_matrix: A projection matrix (optional). :param view_matrix: A view matrix (optional). :param tile_size: A (width, height) tuple specifying the size of the tile in pixels (optional). :param tile_position: A (x, y) tuple specifying the position of the tile in pixels (optional). """ assert self._tile_instances is not None, "Tiled rendering is not set up. Call setup_tiled_rendering first." assert tile_id < len(self._tile_instances), "Invalid tile id." if instances is not None: self._tile_instances[tile_id] = instances if projection_matrix is not None: self._tile_projection_matrices[tile_id] = np.array(projection_matrix).flatten() if view_matrix is not None: self._tile_view_matrices[tile_id] = np.array(view_matrix).flatten() (x, y, w, h) = self._tile_viewports[tile_id] if tile_size is not None: w, h = tile_size if tile_position is not None: x, y = tile_position self._tile_viewports[tile_id] = (x, y, w, h) def _setup_framebuffer(self): from pyglet import gl if self._frame_texture is None: self._frame_texture = gl.GLuint() gl.glGenTextures(1, self._frame_texture) if self._frame_depth_texture is None: self._frame_depth_texture = gl.GLuint() gl.glGenTextures(1, self._frame_depth_texture) # set up RGB texture gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glBindBuffer(gl.GL_PIXEL_UNPACK_BUFFER, 0) gl.glBindTexture(gl.GL_TEXTURE_2D, self._frame_texture) gl.glTexImage2D( gl.GL_TEXTURE_2D, 0, gl.GL_RGB, self.screen_width, self.screen_height, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, None, ) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) # set up depth texture gl.glBindTexture(gl.GL_TEXTURE_2D, self._frame_depth_texture) gl.glTexImage2D( gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT32, self.screen_width, self.screen_height, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, None, ) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glBindTexture(gl.GL_TEXTURE_2D, 0) # create a framebuffer object (FBO) if self._frame_fbo is None: self._frame_fbo = gl.GLuint() gl.glGenFramebuffers(1, self._frame_fbo) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self._frame_fbo) # attach the texture to the FBO as its color attachment gl.glFramebufferTexture2D( gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self._frame_texture, 0 ) # attach the depth texture to the FBO as its depth attachment gl.glFramebufferTexture2D( gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, self._frame_depth_texture, 0 ) if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: print("Framebuffer is not complete!") gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) sys.exit(1) # unbind the FBO (switch back to the default framebuffer) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) if self._frame_pbo is None: self._frame_pbo = gl.GLuint() gl.glGenBuffers(1, self._frame_pbo) # generate 1 buffer reference gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, self._frame_pbo) # binding to this buffer # allocate memory for PBO rgb_bytes_per_pixel = 3 depth_bytes_per_pixel = 4 pixels = np.zeros( (self.screen_height, self.screen_width, rgb_bytes_per_pixel + depth_bytes_per_pixel), dtype=np.uint8 ) gl.glBufferData(gl.GL_PIXEL_PACK_BUFFER, pixels.nbytes, pixels.ctypes.data, gl.GL_DYNAMIC_DRAW) gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, 0) @staticmethod def compute_projection_matrix( fov: float, aspect_ratio: float, near_plane: float, far_plane: float, ) -> Mat44: """ Compute a projection matrix given the field of view, aspect ratio, near plane, and far plane. :param fov: The field of view in degrees. :param aspect_ratio: The aspect ratio (width / height). :param near_plane: The near plane. :param far_plane: The far plane. :return: A projection matrix. """ from pyglet.math import Mat4 as PyMat4 return np.array(PyMat4.perspective_projection(aspect_ratio, near_plane, far_plane, fov)) def update_projection_matrix(self): if self.screen_height == 0: return aspect_ratio = self.screen_width / self.screen_height self._projection_matrix = self.compute_projection_matrix( self.camera_fov, aspect_ratio, self.camera_near_plane, self.camera_far_plane ) @property def camera_pos(self): return self._camera_pos @camera_pos.setter def camera_pos(self, value): self.update_view_matrix(cam_pos=value) @property def camera_front(self): return self._camera_front @camera_front.setter def camera_front(self, value): self.update_view_matrix(cam_front=value) @property def camera_up(self): return self._camera_up @camera_up.setter def camera_up(self, value): self.update_view_matrix(cam_up=value) def compute_view_matrix(self, cam_pos, cam_front, cam_up): from pyglet.math import Mat4, Vec3 model = np.array(self._model_matrix).reshape((4, 4)) cp = model @ np.array([*cam_pos / self._scaling, 1.0]) cf = model @ np.array([*cam_front / self._scaling, 1.0]) up = model @ np.array([*cam_up / self._scaling, 0.0]) cp = Vec3(*cp[:3]) cf = Vec3(*cf[:3]) up = Vec3(*up[:3]) return np.array(Mat4.look_at(cp, cp + cf, up), dtype=np.float32) def update_view_matrix(self, cam_pos=None, cam_front=None, cam_up=None, stiffness=1.0): from pyglet.math import Vec3 if cam_pos is not None: self._camera_pos = self._camera_pos * (1.0 - stiffness) + Vec3(*cam_pos) * stiffness if cam_front is not None: self._camera_front = self._camera_front * (1.0 - stiffness) + Vec3(*cam_front) * stiffness if cam_up is not None: self._camera_up = self._camera_up * (1.0 - stiffness) + Vec3(*cam_up) * stiffness self._view_matrix = self.compute_view_matrix(self._camera_pos, self._camera_front, self._camera_up) @staticmethod def compute_model_matrix(camera_axis: int, scaling: float): if camera_axis == 0: return np.array((0, 0, scaling, 0, scaling, 0, 0, 0, 0, scaling, 0, 0, 0, 0, 0, 1), dtype=np.float32) elif camera_axis == 2: return np.array((-scaling, 0, 0, 0, 0, 0, scaling, 0, 0, scaling, 0, 0, 0, 0, 0, 1), dtype=np.float32) return np.array((scaling, 0, 0, 0, 0, scaling, 0, 0, 0, 0, scaling, 0, 0, 0, 0, 1), dtype=np.float32) def update_model_matrix(self, model_matrix: Optional[Mat44] = None): from pyglet import gl # fmt: off if model_matrix is None: self._model_matrix = self.compute_model_matrix(self._camera_axis, self._scaling) else: self._model_matrix = np.array(model_matrix).flatten() # fmt: on ptr = arr_pointer(self._model_matrix) gl.glUseProgram(self._shape_shader.id) gl.glUniformMatrix4fv(self._loc_shape_model, 1, gl.GL_FALSE, ptr) gl.glUseProgram(self._grid_shader.id) gl.glUniformMatrix4fv(self._loc_grid_model, 1, gl.GL_FALSE, ptr) gl.glUseProgram(self._sky_shader.id) gl.glUniformMatrix4fv(self._loc_sky_model, 1, gl.GL_FALSE, ptr) @property def num_tiles(self): return len(self._tile_instances) @property def tile_width(self): return self._tile_width @property def tile_height(self): return self._tile_height @property def num_shapes(self): return len(self._shapes) @property def num_instances(self): return self._instance_count @property def scaling(self): return self._scaling @scaling.setter def scaling(self, scaling): self._scaling = scaling self.update_model_matrix() def begin_frame(self, t: float = None): self._last_begin_frame_time = time.time() self.time = t or self.clock_time def end_frame(self): self._last_end_frame_time = time.time() if self._add_shape_instances: self.allocate_shape_instances() if self._update_shape_instances: self.update_shape_instances() self.update() while self.paused and self.is_running(): self.update() def update(self): self.clock_time = time.time() - self._start_time update_duration = self.clock_time - self._last_time frame_duration = self._last_end_frame_time - self._last_begin_frame_time self._last_time = self.clock_time self._frame_speed = update_duration * 100.0 # self.app.event_loop.idle() self.app.platform_event_loop.step(self._frame_dt * 1e-3) if not self.skip_rendering: self._skip_frame_counter += 1 if self._skip_frame_counter > 100: self._skip_frame_counter = 0 if frame_duration > 0.0: if self._fps_update is None: self._fps_update = 1.0 / frame_duration else: update = 1.0 / frame_duration self._fps_update = (1.0 - self._fps_alpha) * self._fps_update + self._fps_alpha * update if update_duration > 0.0: if self._fps_render is None: self._fps_render = 1.0 / update_duration else: update = 1.0 / update_duration self._fps_render = (1.0 - self._fps_alpha) * self._fps_render + self._fps_alpha * update self.app.event_loop._redraw_windows(self._frame_dt * 1e-3) def _draw(self): from pyglet import gl # catch key hold events self._process_inputs() if self.enable_backface_culling: gl.glEnable(gl.GL_CULL_FACE) else: gl.glDisable(gl.GL_CULL_FACE) if self._frame_fbo is not None: gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self._frame_fbo) gl.glClearColor(*self.background_color, 1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) gl.glBindVertexArray(0) if not self._tiled_rendering: if self.draw_grid: self._draw_grid() if self.draw_sky: self._draw_sky() view_mat_ptr = arr_pointer(self._view_matrix) projection_mat_ptr = arr_pointer(self._projection_matrix) gl.glUseProgram(self._shape_shader.id) gl.glUniformMatrix4fv(self._loc_shape_view, 1, gl.GL_FALSE, view_mat_ptr) gl.glUniform3f(self._loc_shape_view_pos, *self._camera_pos) gl.glUniformMatrix4fv(self._loc_shape_view, 1, gl.GL_FALSE, view_mat_ptr) gl.glUniformMatrix4fv(self._loc_shape_projection, 1, gl.GL_FALSE, projection_mat_ptr) if self.render_wireframe: gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE) if self._tiled_rendering: self._render_scene_tiled() else: self._render_scene() for cb in self.render_3d_callbacks: cb() gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL) gl.glBindBuffer(gl.GL_PIXEL_UNPACK_BUFFER, 0) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) gl.glViewport(0, 0, self.screen_width, self.screen_height) # render frame buffer texture to screen if self._frame_fbo is not None: if self.render_depth: with self._frame_depth_shader: gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self._frame_depth_texture) gl.glUniform1i(self._frame_loc_depth_texture, 0) gl.glBindVertexArray(self._frame_vao) gl.glDrawElements(gl.GL_TRIANGLES, len(self._frame_indices), gl.GL_UNSIGNED_INT, None) gl.glBindVertexArray(0) gl.glBindTexture(gl.GL_TEXTURE_2D, 0) else: with self._frame_shader: gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self._frame_texture) gl.glUniform1i(self._frame_loc_texture, 0) gl.glBindVertexArray(self._frame_vao) gl.glDrawElements(gl.GL_TRIANGLES, len(self._frame_indices), gl.GL_UNSIGNED_INT, None) gl.glBindVertexArray(0) gl.glBindTexture(gl.GL_TEXTURE_2D, 0) # check for OpenGL errors # check_gl_error() if self.show_info: gl.glClear(gl.GL_DEPTH_BUFFER_BIT) gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) gl.glEnable(gl.GL_BLEND) text = f"""Sim Time: {self.time:.1f} Update FPS: {self._fps_update:.1f} Render FPS: {self._fps_render:.1f} Shapes: {len(self._shapes)} Instances: {len(self._instances)}""" if self.paused: text += "\nPaused (press space to resume)" self._info_label.text = text self._info_label.y = self.screen_height - 5 self._info_label.draw() for cb in self.render_2d_callbacks: cb() def _draw_grid(self, is_tiled=False): from pyglet import gl if not is_tiled: gl.glUseProgram(self._grid_shader.id) gl.glUniformMatrix4fv(self._loc_grid_view, 1, gl.GL_FALSE, arr_pointer(self._view_matrix)) gl.glUniformMatrix4fv(self._loc_grid_projection, 1, gl.GL_FALSE, arr_pointer(self._projection_matrix)) gl.glBindVertexArray(self._grid_vao) gl.glDrawArrays(gl.GL_LINES, 0, self._grid_vertex_count) gl.glBindVertexArray(0) def _draw_sky(self, is_tiled=False): from pyglet import gl if not is_tiled: gl.glUseProgram(self._sky_shader.id) gl.glUniformMatrix4fv(self._loc_sky_view, 1, gl.GL_FALSE, arr_pointer(self._view_matrix)) gl.glUniformMatrix4fv(self._loc_sky_projection, 1, gl.GL_FALSE, arr_pointer(self._projection_matrix)) gl.glUniform3f(self._loc_sky_view_pos, *self._camera_pos) gl.glBindVertexArray(self._sky_vao) gl.glDrawElements(gl.GL_TRIANGLES, self._sky_tri_count, gl.GL_UNSIGNED_INT, None) gl.glBindVertexArray(0) def _render_scene(self): from pyglet import gl start_instance_idx = 0 for shape, (vao, _, _, tri_count, _) in self._shape_gl_buffers.items(): num_instances = len(self._shape_instances[shape]) gl.glBindVertexArray(vao) gl.glDrawElementsInstancedBaseInstance( gl.GL_TRIANGLES, tri_count, gl.GL_UNSIGNED_INT, None, num_instances, start_instance_idx ) start_instance_idx += num_instances if self.draw_axis: self._axis_instancer.render() for instancer in self._shape_instancers.values(): instancer.render() gl.glBindVertexArray(0) def _render_scene_tiled(self): from pyglet import gl for i, viewport in enumerate(self._tile_viewports): projection_matrix_ptr = arr_pointer(self._tile_projection_matrices[i]) view_matrix_ptr = arr_pointer( self._tile_view_matrices[i] if self._tile_view_matrices[i] is not None else self._view_matrix ) gl.glViewport(*viewport) if self.draw_grid: gl.glUseProgram(self._grid_shader.id) gl.glUniformMatrix4fv(self._loc_grid_projection, 1, gl.GL_FALSE, projection_matrix_ptr) gl.glUniformMatrix4fv(self._loc_grid_view, 1, gl.GL_FALSE, view_matrix_ptr) self._draw_grid(is_tiled=True) if self.draw_sky: gl.glUseProgram(self._sky_shader.id) gl.glUniformMatrix4fv(self._loc_sky_projection, 1, gl.GL_FALSE, projection_matrix_ptr) gl.glUniformMatrix4fv(self._loc_sky_view, 1, gl.GL_FALSE, view_matrix_ptr) self._draw_sky(is_tiled=True) gl.glUseProgram(self._shape_shader.id) gl.glUniformMatrix4fv(self._loc_shape_projection, 1, gl.GL_FALSE, projection_matrix_ptr) gl.glUniformMatrix4fv(self._loc_shape_view, 1, gl.GL_FALSE, view_matrix_ptr) instances = self._tile_instances[i] for instance in instances: shape = self._instance_shape[instance] vao, _, _, tri_count, _ = self._shape_gl_buffers[shape] start_instance_idx = self._inverse_instance_ids[instance] gl.glBindVertexArray(vao) gl.glDrawElementsInstancedBaseInstance( gl.GL_TRIANGLES, tri_count, gl.GL_UNSIGNED_INT, None, 1, start_instance_idx ) if self.draw_axis: self._axis_instancer.render() for instancer in self._shape_instancers.values(): instancer.render() gl.glBindVertexArray(0) def _mouse_drag_callback(self, x, y, dx, dy, buttons, modifiers): if not self.enable_mouse_interaction: return import pyglet if buttons & pyglet.window.mouse.LEFT: sensitivity = 0.1 dx *= sensitivity dy *= sensitivity self._yaw += dx self._pitch += dy self._pitch = max(min(self._pitch, 89.0), -89.0) self._camera_front.x = np.cos(np.deg2rad(self._yaw)) * np.cos(np.deg2rad(self._pitch)) self._camera_front.y = np.sin(np.deg2rad(self._pitch)) self._camera_front.z = np.sin(np.deg2rad(self._yaw)) * np.cos(np.deg2rad(self._pitch)) self._camera_front = self._camera_front.normalize() self.update_view_matrix() def _scroll_callback(self, x, y, scroll_x, scroll_y): if not self.enable_mouse_interaction: return self.camera_fov -= scroll_y self.camera_fov = max(min(self.camera_fov, 90.0), 15.0) self.update_projection_matrix() def _process_inputs(self): import pyglet from pyglet.math import Vec3 as PyVec3 for cb in self._input_processors: if cb(self._key_handler) == pyglet.event.EVENT_HANDLED: return if self._key_handler[pyglet.window.key.W] or self._key_handler[pyglet.window.key.UP]: self._camera_pos += self._camera_front * (self._camera_speed * self._frame_speed) self.update_view_matrix() if self._key_handler[pyglet.window.key.S] or self._key_handler[pyglet.window.key.DOWN]: self._camera_pos -= self._camera_front * (self._camera_speed * self._frame_speed) self.update_view_matrix() if self._key_handler[pyglet.window.key.A] or self._key_handler[pyglet.window.key.LEFT]: camera_side = PyVec3.cross(self._camera_front, self._camera_up).normalize() self._camera_pos -= camera_side * (self._camera_speed * self._frame_speed) self.update_view_matrix() if self._key_handler[pyglet.window.key.D] or self._key_handler[pyglet.window.key.RIGHT]: camera_side = PyVec3.cross(self._camera_front, self._camera_up).normalize() self._camera_pos += camera_side * (self._camera_speed * self._frame_speed) self.update_view_matrix() def register_input_processor(self, callback): self._input_processors.append(callback) def _key_press_callback(self, symbol, modifiers): import pyglet if not self.enable_keyboard_interaction: return for cb in self._key_callbacks: if cb(symbol, modifiers) == pyglet.event.EVENT_HANDLED: return pyglet.event.EVENT_HANDLED if symbol == pyglet.window.key.ESCAPE: self.close() if symbol == pyglet.window.key.SPACE: self.paused = not self.paused if symbol == pyglet.window.key.TAB: self.skip_rendering = not self.skip_rendering if symbol == pyglet.window.key.C: self.draw_axis = not self.draw_axis if symbol == pyglet.window.key.G: self.draw_grid = not self.draw_grid if symbol == pyglet.window.key.I: self.show_info = not self.show_info if symbol == pyglet.window.key.X: self.render_wireframe = not self.render_wireframe if symbol == pyglet.window.key.T: self.render_depth = not self.render_depth if symbol == pyglet.window.key.B: self.enable_backface_culling = not self.enable_backface_culling def register_key_press_callback(self, callback): self._key_callbacks.append(callback) def _window_resize_callback(self, width, height): self._first_mouse = True self.screen_width, self.screen_height = self.window.get_framebuffer_size() self.update_projection_matrix() self._setup_framebuffer() def register_shape(self, geo_hash, vertices, indices, color1=None, color2=None): from pyglet import gl shape = len(self._shapes) if color1 is None: color1 = tab10_color_map(len(self._shape_geo_hash)) if color2 is None: color2 = np.clip(np.array(color1) + 0.25, 0.0, 1.0) # TODO check if we actually need to store the shape data self._shapes.append((vertices, indices, color1, color2, geo_hash)) self._shape_geo_hash[geo_hash] = shape gl.glUseProgram(self._shape_shader.id) # Create VAO, VBO, and EBO vao = gl.GLuint() gl.glGenVertexArrays(1, vao) gl.glBindVertexArray(vao) vbo = gl.GLuint() gl.glGenBuffers(1, vbo) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, vertices.nbytes, vertices.ctypes.data, gl.GL_STATIC_DRAW) vertex_cuda_buffer = wp.RegisteredGLBuffer(int(vbo.value), self._device) ebo = gl.GLuint() gl.glGenBuffers(1, ebo) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices.ctypes.data, gl.GL_STATIC_DRAW) # Set up vertex attributes vertex_stride = vertices.shape[1] * vertices.itemsize # positions gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(0) # normals gl.glVertexAttribPointer(1, 3, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(3 * vertices.itemsize)) gl.glEnableVertexAttribArray(1) # uv coordinates gl.glVertexAttribPointer(2, 2, gl.GL_FLOAT, gl.GL_FALSE, vertex_stride, ctypes.c_void_p(6 * vertices.itemsize)) gl.glEnableVertexAttribArray(2) gl.glBindVertexArray(0) self._shape_gl_buffers[shape] = (vao, vbo, ebo, len(indices), vertex_cuda_buffer) return shape def add_shape_instance( self, name: str, shape: int, body, pos, rot, scale=(1.0, 1.0, 1.0), color1=None, color2=None, custom_index: int = -1, visible: bool = True, ): if color1 is None: color1 = self._shapes[shape][2] if color2 is None: color2 = self._shapes[shape][3] instance = len(self._instances) self._shape_instances[shape].append(instance) body = self._resolve_body_id(body) self._instances[name] = (instance, body, shape, [*pos, *rot], scale, color1, color2, visible) self._instance_shape[instance] = shape self._instance_custom_ids[instance] = custom_index self._add_shape_instances = True self._instance_count = len(self._instances) return instance def update_instance_colors(self): from pyglet import gl colors1, colors2 = [], [] all_instances = list(self._instances.values()) for _shape, instances in self._shape_instances.items(): for i in instances: if i >= len(all_instances): continue instance = all_instances[i] colors1.append(instance[5]) colors2.append(instance[6]) colors1 = np.array(colors1, dtype=np.float32) colors2 = np.array(colors2, dtype=np.float32) # create buffer for checkerboard colors if self._instance_color1_buffer is None: self._instance_color1_buffer = gl.GLuint() gl.glGenBuffers(1, self._instance_color1_buffer) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_color1_buffer) gl.glBufferData(gl.GL_ARRAY_BUFFER, colors1.nbytes, colors1.ctypes.data, gl.GL_STATIC_DRAW) if self._instance_color2_buffer is None: self._instance_color2_buffer = gl.GLuint() gl.glGenBuffers(1, self._instance_color2_buffer) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_color2_buffer) gl.glBufferData(gl.GL_ARRAY_BUFFER, colors2.nbytes, colors2.ctypes.data, gl.GL_STATIC_DRAW) def allocate_shape_instances(self): from pyglet import gl self._add_shape_instances = False self._wp_instance_transforms = wp.array( [instance[3] for instance in self._instances.values()], dtype=wp.transform, device=self._device ) self._wp_instance_scalings = wp.array( [instance[4] for instance in self._instances.values()], dtype=wp.vec3, device=self._device ) self._wp_instance_bodies = wp.array( [instance[1] for instance in self._instances.values()], dtype=wp.int32, device=self._device ) gl.glUseProgram(self._shape_shader.id) if self._instance_transform_gl_buffer is not None: gl.glDeleteBuffers(1, self._instance_transform_gl_buffer) gl.glDeleteBuffers(1, self._instance_color1_buffer) gl.glDeleteBuffers(1, self._instance_color2_buffer) # create instance buffer and bind it as an instanced array self._instance_transform_gl_buffer = gl.GLuint() gl.glGenBuffers(1, self._instance_transform_gl_buffer) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_transform_gl_buffer) transforms = np.tile(np.diag(np.ones(4, dtype=np.float32)), (len(self._instances), 1, 1)) gl.glBufferData(gl.GL_ARRAY_BUFFER, transforms.nbytes, transforms.ctypes.data, gl.GL_DYNAMIC_DRAW) # create CUDA buffer for instance transforms self._instance_transform_cuda_buffer = wp.RegisteredGLBuffer( int(self._instance_transform_gl_buffer.value), self._device ) self.update_instance_colors() # set up instance attribute pointers matrix_size = transforms[0].nbytes instance_ids = [] instance_custom_ids = [] instance_visible = [] instances = list(self._instances.values()) inverse_instance_ids = {} instance_count = 0 colors_size = np.zeros(3, dtype=np.float32).nbytes for shape, (vao, _vbo, _ebo, _tri_count, _vertex_cuda_buffer) in self._shape_gl_buffers.items(): gl.glBindVertexArray(vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_transform_gl_buffer) # we can only send vec4s to the shader, so we need to split the instance transforms matrix into its column vectors for i in range(4): gl.glVertexAttribPointer( 3 + i, 4, gl.GL_FLOAT, gl.GL_FALSE, matrix_size, ctypes.c_void_p(i * matrix_size // 4) ) gl.glEnableVertexAttribArray(3 + i) gl.glVertexAttribDivisor(3 + i, 1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_color1_buffer) gl.glVertexAttribPointer(7, 3, gl.GL_FLOAT, gl.GL_FALSE, colors_size, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(7) gl.glVertexAttribDivisor(7, 1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_color2_buffer) gl.glVertexAttribPointer(8, 3, gl.GL_FLOAT, gl.GL_FALSE, colors_size, ctypes.c_void_p(0)) gl.glEnableVertexAttribArray(8) gl.glVertexAttribDivisor(8, 1) instance_ids.extend(self._shape_instances[shape]) for i in self._shape_instances[shape]: inverse_instance_ids[i] = instance_count instance_count += 1 instance_custom_ids.append(self._instance_custom_ids[i]) instance_visible.append(instances[i][7]) # trigger update to the instance transforms self._update_shape_instances = True self._wp_instance_ids = wp.array(instance_ids, dtype=wp.int32, device=self._device) self._wp_instance_custom_ids = wp.array(instance_custom_ids, dtype=wp.int32, device=self._device) self._np_instance_visible = np.array(instance_visible) self._instance_ids = instance_ids self._inverse_instance_ids = inverse_instance_ids gl.glBindVertexArray(0) def update_shape_instance(self, name, pos=None, rot=None, color1=None, color2=None, visible=None): """Update the instance transform of the shape Args: name: The name of the shape pos: The position of the shape rot: The rotation of the shape color1: The first color of the checker pattern color2: The second color of the checker pattern visible: Whether the shape is visible """ from pyglet import gl if name in self._instances: i, body, shape, tf, scale, old_color1, old_color2, v = self._instances[name] if visible is None: visible = v new_tf = np.copy(tf) if pos is not None: new_tf[:3] = pos if rot is not None: new_tf[3:] = rot self._instances[name] = ( i, body, shape, new_tf, scale, color1 or old_color1, color2 or old_color2, visible, ) self._update_shape_instances = True if color1 is not None or color2 is not None: vao, vbo, ebo, tri_count, vertex_cuda_buffer = self._shape_gl_buffers[shape] gl.glBindVertexArray(vao) self.update_instance_colors() gl.glBindVertexArray(0) return True return False def update_shape_instances(self): with self._shape_shader: self._update_shape_instances = False self._wp_instance_transforms = wp.array( [instance[3] for instance in self._instances.values()], dtype=wp.transform, device=self._device ) self.update_body_transforms(None) def update_body_transforms(self, body_tf: wp.array): if self._instance_transform_cuda_buffer is None: return body_q = None if body_tf is not None: if body_tf.device.is_cuda: body_q = body_tf else: body_q = body_tf.to(self._device) vbo_transforms = self._instance_transform_cuda_buffer.map(dtype=wp.mat44, shape=(self._instance_count,)) wp.launch( update_vbo_transforms, dim=self._instance_count, inputs=[ self._wp_instance_ids, self._wp_instance_bodies, self._wp_instance_transforms, self._wp_instance_scalings, body_q, ], outputs=[ vbo_transforms, ], device=self._device, ) self._instance_transform_cuda_buffer.unmap() def register_body(self, name): # register body name and return its ID if name not in self._body_name: self._body_name[name] = len(self._body_name) return self._body_name[name] def _resolve_body_id(self, body): if body is None: return -1 if isinstance(body, int): return body return self._body_name[body] def is_running(self): return not self.app.event_loop.has_exit def save(self): # save just keeps the window open to allow the user to interact with the scene while not self.app.event_loop.has_exit: self.update() if self.app.event_loop.has_exit: self.clear() self.app.event_loop.exit() def get_pixels(self, target_image: wp.array, split_up_tiles=True, mode="rgb", use_uint8=False): """ Read the pixels from the frame buffer (RGB or depth are supported) into the given array. If `split_up_tiles` is False, array must be of shape (screen_height, screen_width, 3) for RGB mode or (screen_height, screen_width, 1) for depth mode. If `split_up_tiles` is True, the pixels will be split up into tiles (see :attr:`tile_width` and :attr:`tile_height` for dimensions): array must be of shape (num_tiles, tile_height, tile_width, 3) for RGB mode or (num_tiles, tile_height, tile_width, 1) for depth mode. Args: target_image (array): The array to read the pixels into. Must have float32 as dtype and be on a CUDA device. split_up_tiles (bool): Whether to split up the viewport into tiles, see :meth:`setup_tiled_rendering`. mode (str): can be either "rgb" or "depth" use_uint8 (bool): Whether to use uint8 as dtype in RGB mode for the target_image array and return values in the range [0, 255]. Otherwise, float32 is assumed as dtype with values in the range [0, 1]. Returns: bool: Whether the pixels were successfully read. """ from pyglet import gl channels = 3 if mode == "rgb" else 1 if split_up_tiles: assert ( self._tile_width is not None and self._tile_height is not None ), "Tile width and height are not set, tiles must all have the same size" assert all( vp[2] == self._tile_width for vp in self._tile_viewports ), "Tile widths do not all equal global tile_width, use `get_tile_pixels` instead to retrieve pixels for a single tile" assert all( vp[3] == self._tile_height for vp in self._tile_viewports ), "Tile heights do not all equal global tile_height, use `get_tile_pixels` instead to retrieve pixels for a single tile" assert ( target_image.shape == ( self.num_tiles, self._tile_height, self._tile_width, channels, ) ), f"Shape of `target_image` array does not match {self.num_tiles} x {self._tile_height} x {self._tile_width} x {channels}" else: assert target_image.shape == ( self.screen_height, self.screen_width, channels, ), f"Shape of `target_image` array does not match {self.screen_height} x {self.screen_width} x {channels}" gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, self._frame_pbo) if mode == "rgb": gl.glBindTexture(gl.GL_TEXTURE_2D, self._frame_texture) if mode == "depth": gl.glBindTexture(gl.GL_TEXTURE_2D, self._frame_depth_texture) try: # read screen texture into PBO if mode == "rgb": gl.glGetTexImage(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0)) elif mode == "depth": gl.glGetTexImage(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) except gl.GLException: # this can happen if the window is closed/being moved to a different display gl.glBindTexture(gl.GL_TEXTURE_2D, 0) gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, 0) return False gl.glBindTexture(gl.GL_TEXTURE_2D, 0) gl.glBindBuffer(gl.GL_PIXEL_PACK_BUFFER, 0) pbo_buffer = wp.RegisteredGLBuffer( int(self._frame_pbo.value), self._device, wp.RegisteredGLBuffer.WRITE_DISCARD ) screen_size = self.screen_height * self.screen_width if mode == "rgb": img = pbo_buffer.map(dtype=wp.uint8, shape=(screen_size * channels)) elif mode == "depth": img = pbo_buffer.map(dtype=wp.float32, shape=(screen_size * channels)) img = img.to(target_image.device) if split_up_tiles: positions = wp.array(self._tile_viewports, ndim=2, dtype=wp.int32, device=target_image.device) if mode == "rgb": wp.launch( copy_rgb_frame_tiles_uint8 if use_uint8 else copy_rgb_frame_tiles, dim=(self.num_tiles, self._tile_width, self._tile_height), inputs=[img, positions, self.screen_width, self.screen_height, self._tile_height], outputs=[target_image], device=target_image.device, ) elif mode == "depth": wp.launch( copy_depth_frame_tiles, dim=(self.num_tiles, self._tile_width, self._tile_height), inputs=[ img, positions, self.screen_width, self.screen_height, self._tile_height, self.camera_near_plane, self.camera_far_plane, ], outputs=[target_image], device=target_image.device, ) else: if mode == "rgb": wp.launch( copy_rgb_frame_uint8 if use_uint8 else copy_rgb_frame, dim=(self.screen_width, self.screen_height), inputs=[img, self.screen_width, self.screen_height], outputs=[target_image], device=target_image.device, ) elif mode == "depth": wp.launch( copy_depth_frame, dim=(self.screen_width, self.screen_height), inputs=[img, self.screen_width, self.screen_height, self.camera_near_plane, self.camera_far_plane], outputs=[target_image], device=target_image.device, ) pbo_buffer.unmap() return True # def create_image_texture(self, file_path): # from PIL import Image # img = Image.open(file_path) # img_data = np.array(list(img.getdata()), np.uint8) # texture = glGenTextures(1) # glBindTexture(GL_TEXTURE_2D, texture) # glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.width, img.height, 0, GL_RGB, GL_UNSIGNED_BYTE, img_data) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # return texture # def create_check_texture(self, color1=(0, 0.5, 1.0), color2=None, width=default_texture_size, height=default_texture_size): # if width == 1 and height == 1: # pixels = np.array([np.array(color1)*255], dtype=np.uint8) # else: # pixels = np.zeros((width, height, 3), dtype=np.uint8) # half_w = width // 2 # half_h = height // 2 # color1 = np.array(np.array(color1)*255, dtype=np.uint8) # pixels[0:half_w, 0:half_h] = color1 # pixels[half_w:width, half_h:height] = color1 # if color2 is None: # color2 = np.array(np.clip(np.array(color1, dtype=np.float32) + 50, 0, 255), dtype=np.uint8) # else: # color2 = np.array(np.array(color2)*255, dtype=np.uint8) # pixels[half_w:width, 0:half_h] = color2 # pixels[0:half_w, half_h:height] = color2 # texture = glGenTextures(1) # glBindTexture(GL_TEXTURE_2D, texture) # glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, pixels.flatten()) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) # glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # return texture def render_plane( self, name: str, pos: tuple, rot: tuple, width: float, length: float, color: tuple = (1.0, 1.0, 1.0), color2=None, parent_body: str = None, is_template: bool = False, u_scaling=1.0, v_scaling=1.0, ): """Add a plane for visualization Args: name: The name of the plane pos: The position of the plane rot: The rotation of the plane width: The width of the plane length: The length of the plane color: The color of the plane texture: The texture of the plane (optional) """ geo_hash = hash(("plane", width, length)) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot): return shape else: faces = np.array([0, 1, 2, 2, 3, 0], dtype=np.uint32) normal = (0.0, 1.0, 0.0) width = width if width > 0.0 else 100.0 length = length if length > 0.0 else 100.0 aspect = width / length u = width * aspect * u_scaling v = length * v_scaling gfx_vertices = np.array( [ [-width, 0.0, -length, *normal, 0.0, 0.0], [-width, 0.0, length, *normal, 0.0, v], [width, 0.0, length, *normal, u, v], [width, 0.0, -length, *normal, u, 0.0], ], dtype=np.float32, ) shape = self.register_shape(geo_hash, gfx_vertices, faces, color1=color, color2=color2) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot) return shape def render_ground(self, size: float = 1000.0, plane=None): """Add a ground plane for visualization Args: size: The size of the ground plane """ color1 = (200 / 255, 200 / 255, 200 / 255) color2 = (150 / 255, 150 / 255, 150 / 255) sqh = np.sqrt(0.5) if self._camera_axis == 0: q = (0.0, 0.0, -sqh, sqh) elif self._camera_axis == 1: q = (0.0, 0.0, 0.0, 1.0) elif self._camera_axis == 2: q = (sqh, 0.0, 0.0, sqh) pos = (0.0, 0.0, 0.0) if plane is not None: normal = np.array(plane[:3]) normal /= np.linalg.norm(normal) pos = plane[3] * normal if np.allclose(normal, (0.0, 1.0, 0.0)): # no rotation necessary q = (0.0, 0.0, 0.0, 1.0) else: c = np.cross(normal, (0.0, 1.0, 0.0)) angle = np.arcsin(np.linalg.norm(c)) axis = np.abs(c) / np.linalg.norm(c) q = wp.quat_from_axis_angle(axis, angle) return self.render_plane( "ground", pos, q, size, size, color1, color2=color2, u_scaling=1.0, v_scaling=1.0, ) def render_sphere( self, name: str, pos: tuple, rot: tuple, radius: float, parent_body: str = None, is_template: bool = False, color=None, ): """Add a sphere for visualization Args: pos: The position of the sphere radius: The radius of the sphere name: A name for the USD prim on the stage color: The color of the sphere """ geo_hash = hash(("sphere", radius)) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot, color1=color, color2=color): return shape else: vertices, indices = self._create_sphere_mesh(radius) shape = self.register_shape(geo_hash, vertices, indices, color1=color, color2=color) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot, color1=color, color2=color) return shape def render_capsule( self, name: str, pos: tuple, rot: tuple, radius: float, half_height: float, parent_body: str = None, is_template: bool = False, up_axis: int = 1, color: tuple = None, ): """Add a capsule for visualization Args: pos: The position of the capsule radius: The radius of the capsule half_height: The half height of the capsule name: A name for the USD prim on the stage up_axis: The axis of the capsule that points up (0: x, 1: y, 2: z) color: The color of the capsule """ geo_hash = hash(("capsule", radius, half_height)) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot): return shape else: vertices, indices = self._create_capsule_mesh(radius, half_height, up_axis=up_axis) shape = self.register_shape(geo_hash, vertices, indices, color1=color, color2=color) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot) return shape def render_cylinder( self, name: str, pos: tuple, rot: tuple, radius: float, half_height: float, parent_body: str = None, is_template: bool = False, up_axis: int = 1, color: tuple = None, ): """Add a cylinder for visualization Args: pos: The position of the cylinder radius: The radius of the cylinder half_height: The half height of the cylinder name: A name for the USD prim on the stage up_axis: The axis of the cylinder that points up (0: x, 1: y, 2: z) color: The color of the capsule """ geo_hash = hash(("cylinder", radius, half_height)) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot): return shape else: vertices, indices = self._create_cylinder_mesh(radius, half_height, up_axis=up_axis) shape = self.register_shape(geo_hash, vertices, indices, color1=color, color2=color) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot) return shape def render_cone( self, name: str, pos: tuple, rot: tuple, radius: float, half_height: float, parent_body: str = None, is_template: bool = False, up_axis: int = 1, color: tuple = None, ): """Add a cone for visualization Args: pos: The position of the cone radius: The radius of the cone half_height: The half height of the cone name: A name for the USD prim on the stage up_axis: The axis of the cone that points up (0: x, 1: y, 2: z) color: The color of the cone """ geo_hash = hash(("cone", radius, half_height)) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot): return shape else: vertices, indices = self._create_cone_mesh(radius, half_height, up_axis=up_axis) shape = self.register_shape(geo_hash, vertices, indices, color1=color, color2=color) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot) return shape def render_box( self, name: str, pos: tuple, rot: tuple, extents: tuple, parent_body: str = None, is_template: bool = False, color: tuple = None, ): """Add a box for visualization Args: pos: The position of the box extents: The extents of the box name: A name for the USD prim on the stage color: The color of the box """ geo_hash = hash(("box", tuple(extents))) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot): return shape else: vertices, indices = self._create_box_mesh(extents) shape = self.register_shape(geo_hash, vertices, indices, color1=color, color2=color) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot) return shape def render_mesh( self, name: str, points, indices, colors=None, pos=(0.0, 0.0, 0.0), rot=(0.0, 0.0, 0.0, 1.0), scale=(1.0, 1.0, 1.0), update_topology=False, parent_body: str = None, is_template: bool = False, smooth_shading: bool = True, ): """Add a mesh for visualization Args: points: The points of the mesh indices: The indices of the mesh colors: The colors of the mesh pos: The position of the mesh rot: The rotation of the mesh scale: The scale of the mesh name: A name for the USD prim on the stage smooth_shading: Whether to average face normals at each vertex or introduce additional vertices for each face """ if colors is None: colors = np.ones((len(points), 3), dtype=np.float32) else: colors = np.array(colors, dtype=np.float32) points = np.array(points, dtype=np.float32) * np.array(scale, dtype=np.float32) indices = np.array(indices, dtype=np.int32).reshape((-1, 3)) if name in self._instances: self.update_shape_instance(name, pos, rot) shape = self._instances[name][2] self.update_shape_vertices(shape, points) return geo_hash = hash((points.tobytes(), indices.tobytes(), colors.tobytes())) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot): return shape else: if smooth_shading: normals = wp.zeros(len(points), dtype=wp.vec3) vertices = wp.array(points, dtype=wp.vec3) faces_per_vertex = wp.zeros(len(points), dtype=int) wp.launch( compute_average_normals, dim=len(indices), inputs=[wp.array(indices, dtype=int), vertices], outputs=[normals, faces_per_vertex], ) gfx_vertices = wp.zeros((len(points), 8), dtype=float) wp.launch( assemble_gfx_vertices, dim=len(points), inputs=[vertices, normals, faces_per_vertex], outputs=[gfx_vertices], ) gfx_vertices = gfx_vertices.numpy() gfx_indices = indices.flatten() else: gfx_vertices = wp.zeros((len(indices) * 3, 8), dtype=float) wp.launch( compute_gfx_vertices, dim=len(indices), inputs=[wp.array(indices, dtype=int), wp.array(points, dtype=wp.vec3)], outputs=[gfx_vertices], ) gfx_vertices = gfx_vertices.numpy() gfx_indices = np.arange(len(indices) * 3) shape = self.register_shape(geo_hash, gfx_vertices, gfx_indices) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot) return shape def render_arrow( self, name: str, pos: tuple, rot: tuple, base_radius: float, base_height: float, cap_radius: float = None, cap_height: float = None, parent_body: str = None, is_template: bool = False, up_axis: int = 1, color: Tuple[float, float, float] = None, ): """Add a arrow for visualization Args: pos: The position of the arrow base_radius: The radius of the cylindrical base of the arrow base_height: The height of the cylindrical base of the arrow cap_radius: The radius of the conical cap of the arrow cap_height: The height of the conical cap of the arrow name: A name for the USD prim on the stage up_axis: The axis of the arrow that points up (0: x, 1: y, 2: z) """ geo_hash = hash(("arrow", base_radius, base_height, cap_radius, cap_height)) if geo_hash in self._shape_geo_hash: shape = self._shape_geo_hash[geo_hash] if self.update_shape_instance(name, pos, rot): return shape else: vertices, indices = self._create_arrow_mesh( base_radius, base_height, cap_radius, cap_height, up_axis=up_axis ) shape = self.register_shape(geo_hash, vertices, indices) if not is_template: body = self._resolve_body_id(parent_body) self.add_shape_instance(name, shape, body, pos, rot, color1=color, color2=color) return shape def render_ref(self, name: str, path: str, pos: tuple, rot: tuple, scale: tuple, color: tuple = None): """ Create a reference (instance) with the given name to the given path. """ if path in self._instances: _, body, shape, _, original_scale, color1, color2 = self._instances[path] if color is not None: color1 = color2 = color self.add_shape_instance(name, shape, body, pos, rot, scale or original_scale, color1, color2) return raise Exception("Cannot create reference to path: " + path) def render_points(self, name: str, points, radius, colors=None): """Add a set of points Args: points: The points to render radius: The radius of the points (scalar or list) colors: The colors of the points name: A name for the USD prim on the stage """ if len(points) == 0: return if isinstance(points, wp.array): wp_points = points else: wp_points = wp.array(points, dtype=wp.vec3, device=self._device) if name not in self._shape_instancers: np_points = points.numpy() if isinstance(points, wp.array) else points instancer = ShapeInstancer(self._shape_shader, self._device) radius_is_scalar = np.isscalar(radius) if radius_is_scalar: vertices, indices = self._create_sphere_mesh(radius) else: vertices, indices = self._create_sphere_mesh(1.0) if colors is None: color = tab10_color_map(len(self._shape_geo_hash)) elif len(colors) == 3: color = colors else: color = colors[0] instancer.register_shape(vertices, indices, color, color) scalings = None if radius_is_scalar else np.tile(radius, (3, 1)).T instancer.allocate_instances(np_points, colors1=colors, colors2=colors, scalings=scalings) self._shape_instancers[name] = instancer else: instancer = self._shape_instancers[name] if len(points) != instancer.num_instances: np_points = points.numpy() if isinstance(points, wp.array) else points instancer.allocate_instances(np_points) with instancer: wp.launch( update_points_positions, dim=len(points), inputs=[wp_points, instancer.instance_scalings], outputs=[instancer.vbo_transforms], device=self._device, ) def _render_lines(self, name: str, lines, color: tuple, radius: float = 0.01): if len(lines) == 0: return if name not in self._shape_instancers: instancer = ShapeInstancer(self._shape_shader, self._device) vertices, indices = self._create_capsule_mesh(radius, 0.5) if color is None or isinstance(color, list) and len(color) > 0 and isinstance(color[0], list): color = tab10_color_map(len(self._shape_geo_hash)) instancer.register_shape(vertices, indices, color, color) instancer.allocate_instances(np.zeros((len(lines), 3))) self._shape_instancers[name] = instancer else: instancer = self._shape_instancers[name] if len(lines) != instancer.num_instances: instancer.allocate_instances(np.zeros((len(lines), 3))) instancer.update_colors(color, color) lines_wp = wp.array(lines, dtype=wp.vec3, ndim=2, device=self._device) with instancer: wp.launch( update_line_transforms, dim=len(lines), inputs=[lines_wp], outputs=[instancer.vbo_transforms], device=self._device, ) def render_line_list(self, name: str, vertices, indices, color: tuple = None, radius: float = 0.01): """Add a line list as a set of capsules Args: vertices: The vertices of the line-list indices: The indices of the line-list color: The color of the line radius: The radius of the line """ lines = [] for i in range(len(indices) // 2): lines.append((vertices[indices[2 * i]], vertices[indices[2 * i + 1]])) lines = np.array(lines) self._render_lines(name, lines, color, radius) def render_line_strip(self, name: str, vertices, color: tuple = None, radius: float = 0.01): """Add a line strip as a set of capsules Args: vertices: The vertices of the line-strip color: The color of the line radius: The radius of the line """ lines = [] for i in range(len(vertices) - 1): lines.append((vertices[i], vertices[i + 1])) lines = np.array(lines) self._render_lines(name, lines, color, radius) def update_shape_vertices(self, shape, points): if isinstance(points, wp.array): wp_points = points.to(self._device) else: wp_points = wp.array(points, dtype=wp.vec3, device=self._device) cuda_buffer = self._shape_gl_buffers[shape][4] vertices_shape = self._shapes[shape][0].shape vbo_vertices = cuda_buffer.map(dtype=wp.float32, shape=vertices_shape) wp.launch( update_vbo_vertices, dim=vertices_shape[0], inputs=[wp_points], outputs=[vbo_vertices], device=self._device, ) cuda_buffer.unmap() @staticmethod def _create_sphere_mesh( radius=1.0, num_latitudes=default_num_segments, num_longitudes=default_num_segments, reverse_winding=False, ): vertices = [] indices = [] for i in range(num_latitudes + 1): theta = i * np.pi / num_latitudes sin_theta = np.sin(theta) cos_theta = np.cos(theta) for j in range(num_longitudes + 1): phi = j * 2 * np.pi / num_longitudes sin_phi = np.sin(phi) cos_phi = np.cos(phi) x = cos_phi * sin_theta y = cos_theta z = sin_phi * sin_theta u = float(j) / num_longitudes v = float(i) / num_latitudes vertices.append([x * radius, y * radius, z * radius, x, y, z, u, v]) for i in range(num_latitudes): for j in range(num_longitudes): first = i * (num_longitudes + 1) + j second = first + num_longitudes + 1 if reverse_winding: indices.extend([first, second, first + 1, second, second + 1, first + 1]) else: indices.extend([first, first + 1, second, second, first + 1, second + 1]) return np.array(vertices, dtype=np.float32), np.array(indices, dtype=np.uint32) @staticmethod def _create_capsule_mesh(radius, half_height, up_axis=1, segments=default_num_segments): vertices = [] indices = [] x_dir, y_dir, z_dir = ((1, 2, 0), (2, 0, 1), (0, 1, 2))[up_axis] up_vector = np.zeros(3) up_vector[up_axis] = half_height for i in range(segments + 1): theta = i * np.pi / segments sin_theta = np.sin(theta) cos_theta = np.cos(theta) for j in range(segments + 1): phi = j * 2 * np.pi / segments sin_phi = np.sin(phi) cos_phi = np.cos(phi) z = cos_phi * sin_theta y = cos_theta x = sin_phi * sin_theta u = cos_theta * 0.5 + 0.5 v = cos_phi * sin_theta * 0.5 + 0.5 xyz = x, y, z x, y, z = xyz[x_dir], xyz[y_dir], xyz[z_dir] xyz = np.array((x, y, z), dtype=np.float32) * radius if j < segments // 2: xyz += up_vector else: xyz -= up_vector vertices.append([*xyz, x, y, z, u, v]) nv = len(vertices) for i in range(segments + 1): for j in range(segments + 1): first = (i * (segments + 1) + j) % nv second = (first + segments + 1) % nv indices.extend([first, second, (first + 1) % nv, second, (second + 1) % nv, (first + 1) % nv]) vertex_data = np.array(vertices, dtype=np.float32) index_data = np.array(indices, dtype=np.uint32) return vertex_data, index_data @staticmethod def _create_cone_mesh(radius, half_height, up_axis=1, segments=default_num_segments): # render it as a cylinder with zero top radius so we get correct normals on the sides return OpenGLRenderer._create_cylinder_mesh(radius, half_height, up_axis, segments, 0.0) @staticmethod def _create_cylinder_mesh(radius, half_height, up_axis=1, segments=default_num_segments, top_radius=None): if up_axis not in (0, 1, 2): raise ValueError("up_axis must be between 0 and 2") x_dir, y_dir, z_dir = ( (1, 2, 0), (0, 1, 2), (2, 0, 1), )[up_axis] indices = [] cap_vertices = [] side_vertices = [] # create center cap vertices position = np.array([0, -half_height, 0])[[x_dir, y_dir, z_dir]] normal = np.array([0, -1, 0])[[x_dir, y_dir, z_dir]] cap_vertices.append([*position, *normal, 0.5, 0.5]) cap_vertices.append([*-position, *-normal, 0.5, 0.5]) if top_radius is None: top_radius = radius side_slope = -np.arctan2(top_radius - radius, 2 * half_height) # create the cylinder base and top vertices for j in (-1, 1): center_index = max(j, 0) if j == 1: radius = top_radius for i in range(segments): theta = 2 * np.pi * i / segments cos_theta = np.cos(theta) sin_theta = np.sin(theta) x = cos_theta y = j * half_height z = sin_theta position = np.array([radius * x, y, radius * z]) normal = np.array([x, side_slope, z]) normal = normal / np.linalg.norm(normal) uv = (i / (segments - 1), (j + 1) / 2) vertex = np.hstack([position[[x_dir, y_dir, z_dir]], normal[[x_dir, y_dir, z_dir]], uv]) side_vertices.append(vertex) normal = np.array([0, j, 0]) uv = (cos_theta * 0.5 + 0.5, sin_theta * 0.5 + 0.5) vertex = np.hstack([position[[x_dir, y_dir, z_dir]], normal[[x_dir, y_dir, z_dir]], uv]) cap_vertices.append(vertex) cs = center_index * segments indices.extend([center_index, i + cs + 2, (i + 1) % segments + cs + 2][::-j]) # create the cylinder side indices for i in range(segments): index1 = len(cap_vertices) + i + segments index2 = len(cap_vertices) + ((i + 1) % segments) + segments index3 = len(cap_vertices) + i index4 = len(cap_vertices) + ((i + 1) % segments) indices.extend([index1, index2, index3, index2, index4, index3]) vertex_data = np.array(np.vstack((cap_vertices, side_vertices)), dtype=np.float32) index_data = np.array(indices, dtype=np.uint32) return vertex_data, index_data @staticmethod def _create_arrow_mesh( base_radius, base_height, cap_radius=None, cap_height=None, up_axis=1, segments=default_num_segments ): if up_axis not in (0, 1, 2): raise ValueError("up_axis must be between 0 and 2") if cap_radius is None: cap_radius = base_radius * 1.8 if cap_height is None: cap_height = base_height * 0.18 up_vector = np.array([0, 0, 0]) up_vector[up_axis] = 1 base_vertices, base_indices = OpenGLRenderer._create_cylinder_mesh( base_radius, base_height / 2, up_axis, segments ) cap_vertices, cap_indices = OpenGLRenderer._create_cone_mesh(cap_radius, cap_height / 2, up_axis, segments) base_vertices[:, :3] += base_height / 2 * up_vector # move cap slightly lower to avoid z-fighting cap_vertices[:, :3] += (base_height + cap_height / 2 - 1e-3 * base_height) * up_vector vertex_data = np.vstack((base_vertices, cap_vertices)) index_data = np.hstack((base_indices, cap_indices + len(base_vertices))) return vertex_data, index_data @staticmethod def _create_box_mesh(extents): x_extent, y_extent, z_extent = extents vertices = [ # Position Normal UV [-x_extent, -y_extent, -z_extent, -1, 0, 0, 0, 0], [-x_extent, -y_extent, z_extent, -1, 0, 0, 1, 0], [-x_extent, y_extent, z_extent, -1, 0, 0, 1, 1], [-x_extent, y_extent, -z_extent, -1, 0, 0, 0, 1], [x_extent, -y_extent, -z_extent, 1, 0, 0, 0, 0], [x_extent, -y_extent, z_extent, 1, 0, 0, 1, 0], [x_extent, y_extent, z_extent, 1, 0, 0, 1, 1], [x_extent, y_extent, -z_extent, 1, 0, 0, 0, 1], [-x_extent, -y_extent, -z_extent, 0, -1, 0, 0, 0], [-x_extent, -y_extent, z_extent, 0, -1, 0, 1, 0], [x_extent, -y_extent, z_extent, 0, -1, 0, 1, 1], [x_extent, -y_extent, -z_extent, 0, -1, 0, 0, 1], [-x_extent, y_extent, -z_extent, 0, 1, 0, 0, 0], [-x_extent, y_extent, z_extent, 0, 1, 0, 1, 0], [x_extent, y_extent, z_extent, 0, 1, 0, 1, 1], [x_extent, y_extent, -z_extent, 0, 1, 0, 0, 1], [-x_extent, -y_extent, -z_extent, 0, 0, -1, 0, 0], [-x_extent, y_extent, -z_extent, 0, 0, -1, 1, 0], [x_extent, y_extent, -z_extent, 0, 0, -1, 1, 1], [x_extent, -y_extent, -z_extent, 0, 0, -1, 0, 1], [-x_extent, -y_extent, z_extent, 0, 0, 1, 0, 0], [-x_extent, y_extent, z_extent, 0, 0, 1, 1, 0], [x_extent, y_extent, z_extent, 0, 0, 1, 1, 1], [x_extent, -y_extent, z_extent, 0, 0, 1, 0, 1], ] # fmt: off indices = [ 0, 1, 2, 0, 2, 3, 4, 6, 5, 4, 7, 6, 8, 10, 9, 8, 11, 10, 12, 13, 14, 12, 14, 15, 16, 17, 18, 16, 18, 19, 20, 22, 21, 20, 23, 22, ] # fmt: on return np.array(vertices, dtype=np.float32), np.array(indices, dtype=np.uint32) if __name__ == "__main__": renderer = OpenGLRenderer()
122,962
Python
36.261515
211
0.575048
NVIDIA/warp/warp/native/spatial.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once namespace wp { //--------------------------------------------------------------------------------- // Represents a twist in se(3) template <typename Type> using spatial_vector_t = vec_t<6,Type>; template<typename Type> CUDA_CALLABLE inline Type spatial_dot(const spatial_vector_t<Type>& a, const spatial_vector_t<Type>& b) { return dot(a, b); } template<typename Type> CUDA_CALLABLE inline vec_t<3,Type> &w_vec( spatial_vector_t<Type>& a ) { return *(vec_t<3,Type>*)(&a); } template<typename Type> CUDA_CALLABLE inline vec_t<3,Type> &v_vec( spatial_vector_t<Type>& a ) { return *(vec_t<3,Type>*)(&a.c[3]); } template<typename Type> CUDA_CALLABLE inline const vec_t<3,Type> &w_vec( const spatial_vector_t<Type>& a ) { spatial_vector_t<Type> &non_const_vec = *(spatial_vector_t<Type>*)(const_cast<Type*>(&a.c[0])); return w_vec(non_const_vec); } template<typename Type> CUDA_CALLABLE inline const vec_t<3,Type> &v_vec( const spatial_vector_t<Type>& a ) { spatial_vector_t<Type> &non_const_vec = *(spatial_vector_t<Type>*)(const_cast<Type*>(&a.c[0])); return v_vec(non_const_vec); } template<typename Type> CUDA_CALLABLE inline spatial_vector_t<Type> spatial_cross(const spatial_vector_t<Type>& a, const spatial_vector_t<Type>& b) { vec_t<3,Type> w = cross(w_vec(a), w_vec(b)); vec_t<3,Type> v = cross(v_vec(a), w_vec(b)) + cross(w_vec(a), v_vec(b)); return spatial_vector_t<Type>({w[0], w[1], w[2], v[0], v[1], v[2]}); } template<typename Type> CUDA_CALLABLE inline spatial_vector_t<Type> spatial_cross_dual(const spatial_vector_t<Type>& a, const spatial_vector_t<Type>& b) { vec_t<3,Type> w = cross(w_vec(a), w_vec(b)) + cross(v_vec(a), v_vec(b)); vec_t<3,Type> v = cross(w_vec(a), v_vec(b)); return spatial_vector_t<Type>({w[0], w[1], w[2], v[0], v[1], v[2]}); } template<typename Type> CUDA_CALLABLE inline vec_t<3,Type> spatial_top(const spatial_vector_t<Type>& a) { return w_vec(a); } template<typename Type> CUDA_CALLABLE inline vec_t<3,Type> spatial_bottom(const spatial_vector_t<Type>& a) { return v_vec(a); } template<typename Type> CUDA_CALLABLE inline void adj_spatial_dot(const spatial_vector_t<Type>& a, const spatial_vector_t<Type>& b, spatial_vector_t<Type>& adj_a, spatial_vector_t<Type>& adj_b, const Type& adj_ret) { adj_dot(a, b, adj_a, adj_b, adj_ret); } template<typename Type> CUDA_CALLABLE inline void adj_spatial_cross(const spatial_vector_t<Type>& a, const spatial_vector_t<Type>& b, spatial_vector_t<Type>& adj_a, spatial_vector_t<Type>& adj_b, const spatial_vector_t<Type>& adj_ret) { adj_cross(w_vec(a), w_vec(b), w_vec(adj_a), w_vec(adj_b), w_vec(adj_ret)); adj_cross(v_vec(a), w_vec(b), v_vec(adj_a), w_vec(adj_b), v_vec(adj_ret)); adj_cross(w_vec(a), v_vec(b), w_vec(adj_a), v_vec(adj_b), v_vec(adj_ret)); } template<typename Type> CUDA_CALLABLE inline void adj_spatial_cross_dual(const spatial_vector_t<Type>& a, const spatial_vector_t<Type>& b, spatial_vector_t<Type>& adj_a, spatial_vector_t<Type>& adj_b, const spatial_vector_t<Type>& adj_ret) { adj_cross(w_vec(a), w_vec(b), w_vec(adj_a), w_vec(adj_b), w_vec(adj_ret)); adj_cross(v_vec(a), v_vec(b), v_vec(adj_a), v_vec(adj_b), w_vec(adj_ret)); adj_cross(w_vec(a), v_vec(b), w_vec(adj_a), v_vec(adj_b), v_vec(adj_ret)); } template<typename Type> CUDA_CALLABLE inline void adj_spatial_top(const spatial_vector_t<Type>& a, spatial_vector_t<Type>& adj_a, const vec_t<3,Type>& adj_ret) { w_vec(adj_a) += adj_ret; } template<typename Type> CUDA_CALLABLE inline void adj_spatial_bottom(const spatial_vector_t<Type>& a, spatial_vector_t<Type>& adj_a, const vec_t<3,Type>& adj_ret) { v_vec(adj_a) += adj_ret; } //--------------------------------------------------------------------------------- // Represents a rigid body transform<Type>ation template<typename Type> struct transform_t { vec_t<3,Type> p; quat_t<Type> q; CUDA_CALLABLE inline transform_t(vec_t<3,Type> p=vec_t<3,Type>(), quat_t<Type> q=quat_t<Type>()) : p(p), q(q) {} CUDA_CALLABLE inline transform_t(Type) {} // helps uniform initialization CUDA_CALLABLE inline Type operator[](int index) const { assert(index < 7); return p.c[index]; } CUDA_CALLABLE inline Type& operator[](int index) { assert(index < 7); return p.c[index]; } }; template<typename Type=float32> CUDA_CALLABLE inline transform_t<Type> transform_identity() { return transform_t<Type>(vec_t<3,Type>(), quat_identity<Type>()); } template<typename Type> inline CUDA_CALLABLE bool operator==(const transform_t<Type>& a, const transform_t<Type>& b) { return a.p == b.p && a.q == b.q; } template<typename Type> inline bool CUDA_CALLABLE isfinite(const transform_t<Type>& t) { return isfinite(t.p) && isfinite(t.q); } template<typename Type> CUDA_CALLABLE inline vec_t<3,Type> transform_get_translation(const transform_t<Type>& t) { return t.p; } template<typename Type> CUDA_CALLABLE inline quat_t<Type> transform_get_rotation(const transform_t<Type>& t) { return t.q; } template<typename Type> CUDA_CALLABLE inline transform_t<Type> transform_multiply(const transform_t<Type>& a, const transform_t<Type>& b) { return { quat_rotate(a.q, b.p) + a.p, mul(a.q, b.q) }; } template<typename Type> CUDA_CALLABLE inline void adj_transform_multiply(const transform_t<Type>& a, const transform_t<Type>& b, transform_t<Type>& adj_a, transform_t<Type>& adj_b, const transform_t<Type>& adj_ret) { // translational part adj_quat_rotate(a.q, b.p, adj_a.q, adj_b.p, adj_ret.p); adj_a.p += adj_ret.p; // rotational part adj_mul(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q); } template<typename Type> CUDA_CALLABLE inline transform_t<Type> transform_inverse(const transform_t<Type>& t) { quat_t<Type> q_inv = quat_inverse(t.q); return transform_t<Type>(-quat_rotate(q_inv, t.p), q_inv); } template<typename Type> CUDA_CALLABLE inline vec_t<3,Type> transform_vector(const transform_t<Type>& t, const vec_t<3,Type>& x) { return quat_rotate(t.q, x); } template<typename Type> CUDA_CALLABLE inline vec_t<3,Type> transform_point(const transform_t<Type>& t, const vec_t<3,Type>& x) { return t.p + quat_rotate(t.q, x); } // not totally sure why you'd want to do this seeing as adding/subtracting two rotation // quats doesn't seem to do anything meaningful template<typename Type> CUDA_CALLABLE inline transform_t<Type> add(const transform_t<Type>& a, const transform_t<Type>& b) { return { a.p + b.p, a.q + b.q }; } template<typename Type> CUDA_CALLABLE inline transform_t<Type> sub(const transform_t<Type>& a, const transform_t<Type>& b) { return { a.p - b.p, a.q - b.q }; } // also not sure why you'd want to do this seeing as the quat would end up unnormalized template<typename Type> CUDA_CALLABLE inline transform_t<Type> mul(const transform_t<Type>& a, Type s) { return { a.p*s, a.q*s }; } template<typename Type> CUDA_CALLABLE inline transform_t<Type> mul(Type s, const transform_t<Type>& a) { return mul(a, s); } template<typename Type> CUDA_CALLABLE inline transform_t<Type> mul(const transform_t<Type>& a, const transform_t<Type>& b) { return transform_multiply(a, b); } template<typename Type> CUDA_CALLABLE inline transform_t<Type> operator*(const transform_t<Type>& a, Type s) { return mul(a, s); } template<typename Type> CUDA_CALLABLE inline transform_t<Type> operator*(Type s, const transform_t<Type>& a) { return mul(a, s); } template<typename Type> inline CUDA_CALLABLE Type tensordot(const transform_t<Type>& a, const transform_t<Type>& b) { // corresponds to `np.tensordot()` with all axes being contracted return tensordot(a.p, b.p) + tensordot(a.q, b.q); } template<typename Type> inline CUDA_CALLABLE Type extract(const transform_t<Type>& t, int i) { return t[i]; } template<typename Type> inline void CUDA_CALLABLE adj_extract(const transform_t<Type>& t, int i, transform_t<Type>& adj_t, int& adj_i, Type adj_ret) { adj_t[i] += adj_ret; } // adjoint methods template<typename Type> CUDA_CALLABLE inline void adj_add(const transform_t<Type>& a, const transform_t<Type>& b, transform_t<Type>& adj_a, transform_t<Type>& adj_b, const transform_t<Type>& adj_ret) { adj_add(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p); adj_add(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q); } template<typename Type> CUDA_CALLABLE inline void adj_sub(const transform_t<Type>& a, const transform_t<Type>& b, transform_t<Type>& adj_a, transform_t<Type>& adj_b, const transform_t<Type>& adj_ret) { adj_sub(a.p, b.p, adj_a.p, adj_b.p, adj_ret.p); adj_sub(a.q, b.q, adj_a.q, adj_b.q, adj_ret.q); } template<typename Type> CUDA_CALLABLE inline void adj_mul(const transform_t<Type>& a, Type s, transform_t<Type>& adj_a, Type& adj_s, const transform_t<Type>& adj_ret) { adj_mul(a.p, s, adj_a.p, adj_s, adj_ret.p); adj_mul(a.q, s, adj_a.q, adj_s, adj_ret.q); } template<typename Type> CUDA_CALLABLE inline void adj_mul(Type s, const transform_t<Type>& a, Type& adj_s, transform_t<Type>& adj_a, const transform_t<Type>& adj_ret) { adj_mul(a, s, adj_a, adj_s, adj_ret); } template<typename Type> CUDA_CALLABLE inline void adj_mul(const transform_t<Type>& a, const transform_t<Type>& b, transform_t<Type>& adj_a, transform_t<Type>& adj_b, const transform_t<Type>& adj_ret) { adj_transform_multiply(a, b, adj_a, adj_b, adj_ret); } template<typename Type> inline CUDA_CALLABLE transform_t<Type> atomic_add(transform_t<Type>* addr, const transform_t<Type>& value) { vec_t<3,Type> p = atomic_add(&addr->p, value.p); quat_t<Type> q = atomic_add(&addr->q, value.q); return transform_t<Type>(p, q); } template<typename Type> CUDA_CALLABLE inline void adj_transform_t(const vec_t<3,Type>& p, const quat_t<Type>& q, vec_t<3,Type>& adj_p, quat_t<Type>& adj_q, const transform_t<Type>& adj_ret) { adj_p += adj_ret.p; adj_q += adj_ret.q; } template<typename Type> CUDA_CALLABLE inline void adj_transform_get_translation(const transform_t<Type>& t, transform_t<Type>& adj_t, const vec_t<3,Type>& adj_ret) { adj_t.p += adj_ret; } template<typename Type> CUDA_CALLABLE inline void adj_transform_get_rotation(const transform_t<Type>& t, transform_t<Type>& adj_t, const quat_t<Type>& adj_ret) { adj_t.q += adj_ret; } template<typename Type> CUDA_CALLABLE inline void adj_transform_inverse(const transform_t<Type>& t, transform_t<Type>& adj_t, const transform_t<Type>& adj_ret) { // forward quat_t<Type> q_inv = quat_inverse(t.q); vec_t<3,Type> p = quat_rotate(q_inv, t.p); vec_t<3,Type> np = -p; // transform<Type> t = transform<Type>(np, q_inv) // backward quat_t<Type> adj_q_inv(0.0f); quat_t<Type> adj_q(0.0f); vec_t<3,Type> adj_p(0.0f); vec_t<3,Type> adj_np(0.0f); adj_transform_t(np, q_inv, adj_np, adj_q_inv, adj_ret); adj_p = -adj_np; adj_quat_rotate(q_inv, t.p, adj_q_inv, adj_t.p, adj_p); adj_quat_inverse(t.q, adj_t.q, adj_q_inv); } template<typename Type> CUDA_CALLABLE inline void adj_transform_vector(const transform_t<Type>& t, const vec_t<3,Type>& x, transform_t<Type>& adj_t, vec_t<3,Type>& adj_x, const vec_t<3,Type>& adj_ret) { adj_quat_rotate(t.q, x, adj_t.q, adj_x, adj_ret); } template<typename Type> CUDA_CALLABLE inline void adj_transform_point(const transform_t<Type>& t, const vec_t<3,Type>& x, transform_t<Type>& adj_t, vec_t<3,Type>& adj_x, const vec_t<3,Type>& adj_ret) { adj_quat_rotate(t.q, x, adj_t.q, adj_x, adj_ret); adj_t.p += adj_ret; } template<typename Type> CUDA_CALLABLE void print(transform_t<Type> t); template<typename Type> CUDA_CALLABLE inline transform_t<Type> lerp(const transform_t<Type>& a, const transform_t<Type>& b, Type t) { return a*(Type(1)-t) + b*t; } template<typename Type> CUDA_CALLABLE inline void adj_lerp(const transform_t<Type>& a, const transform_t<Type>& b, Type t, transform_t<Type>& adj_a, transform_t<Type>& adj_b, Type& adj_t, const transform_t<Type>& adj_ret) { adj_a += adj_ret*(Type(1)-t); adj_b += adj_ret*t; adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret); } template<typename Type> using spatial_matrix_t = mat_t<6,6,Type>; template<typename Type> inline CUDA_CALLABLE spatial_matrix_t<Type> spatial_adjoint(const mat_t<3,3,Type>& R, const mat_t<3,3,Type>& S) { spatial_matrix_t<Type> adT; // T = [Rah, 0] // [S R] // diagonal blocks for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adT.data[i][j] = R.data[i][j]; adT.data[i+3][j+3] = R.data[i][j]; } } // lower off diagonal for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adT.data[i+3][j] = S.data[i][j]; } } return adT; } template<typename Type> inline CUDA_CALLABLE void adj_spatial_adjoint(const mat_t<3,3,Type>& R, const mat_t<3,3,Type>& S, mat_t<3,3,Type>& adj_R, mat_t<3,3,Type>& adj_S, const spatial_matrix_t<Type>& adj_ret) { // diagonal blocks for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adj_R.data[i][j] += adj_ret.data[i][j]; adj_R.data[i][j] += adj_ret.data[i+3][j+3]; } } // lower off diagonal for (int i=0; i < 3; ++i) { for (int j=0; j < 3; ++j) { adj_S.data[i][j] += adj_ret.data[i+3][j]; } } } CUDA_CALLABLE inline int row_index(int stride, int i, int j) { return i*stride + j; } // builds spatial Jacobian J which is an (joint_count*6)x(dof_count) matrix template<typename Type> CUDA_CALLABLE inline void spatial_jacobian( const spatial_vector_t<Type>* S, const int* joint_parents, const int* joint_qd_start, int joint_start, // offset of the first joint for the articulation int joint_count, int J_start, Type* J) { const int articulation_dof_start = joint_qd_start[joint_start]; const int articulation_dof_end = joint_qd_start[joint_start + joint_count]; const int articulation_dof_count = articulation_dof_end-articulation_dof_start; // shift output pointers const int S_start = articulation_dof_start; S += S_start; J += J_start; for (int i=0; i < joint_count; ++i) { const int row_start = i * 6; int j = joint_start + i; while (j != -1) { const int joint_dof_start = joint_qd_start[j]; const int joint_dof_end = joint_qd_start[j+1]; const int joint_dof_count = joint_dof_end-joint_dof_start; // fill out each row of the Jacobian walking up the tree //for (int col=dof_start; col < dof_end; ++col) for (int dof=0; dof < joint_dof_count; ++dof) { const int col = (joint_dof_start-articulation_dof_start) + dof; J[row_index(articulation_dof_count, row_start+0, col)] = S[col].w[0]; J[row_index(articulation_dof_count, row_start+1, col)] = S[col].w[1]; J[row_index(articulation_dof_count, row_start+2, col)] = S[col].w[2]; J[row_index(articulation_dof_count, row_start+3, col)] = S[col].v[0]; J[row_index(articulation_dof_count, row_start+4, col)] = S[col].v[1]; J[row_index(articulation_dof_count, row_start+5, col)] = S[col].v[2]; } j = joint_parents[j]; } } } template<typename Type> CUDA_CALLABLE inline void adj_spatial_jacobian( const spatial_vector_t<Type>* S, const int* joint_parents, const int* joint_qd_start, const int joint_start, const int joint_count, const int J_start, const Type* J, // adjs spatial_vector_t<Type>* adj_S, int* adj_joint_parents, int* adj_joint_qd_start, int& adj_joint_start, int& adj_joint_count, int& adj_J_start, const Type* adj_J) { const int articulation_dof_start = joint_qd_start[joint_start]; const int articulation_dof_end = joint_qd_start[joint_start + joint_count]; const int articulation_dof_count = articulation_dof_end-articulation_dof_start; // shift output pointers const int S_start = articulation_dof_start; S += S_start; J += J_start; adj_S += S_start; adj_J += J_start; for (int i=0; i < joint_count; ++i) { const int row_start = i * 6; int j = joint_start + i; while (j != -1) { const int joint_dof_start = joint_qd_start[j]; const int joint_dof_end = joint_qd_start[j+1]; const int joint_dof_count = joint_dof_end-joint_dof_start; // fill out each row of the Jacobian walking up the tree //for (int col=dof_start; col < dof_end; ++col) for (int dof=0; dof < joint_dof_count; ++dof) { const int col = (joint_dof_start-articulation_dof_start) + dof; adj_S[col].w[0] += adj_J[row_index(articulation_dof_count, row_start+0, col)]; adj_S[col].w[1] += adj_J[row_index(articulation_dof_count, row_start+1, col)]; adj_S[col].w[2] += adj_J[row_index(articulation_dof_count, row_start+2, col)]; adj_S[col].v[0] += adj_J[row_index(articulation_dof_count, row_start+3, col)]; adj_S[col].v[1] += adj_J[row_index(articulation_dof_count, row_start+4, col)]; adj_S[col].v[2] += adj_J[row_index(articulation_dof_count, row_start+5, col)]; } j = joint_parents[j]; } } } template<typename Type> CUDA_CALLABLE inline void spatial_mass(const spatial_matrix_t<Type>* I_s, int joint_start, int joint_count, int M_start, Type* M) { const int stride = joint_count*6; for (int l=0; l < joint_count; ++l) { for (int i=0; i < 6; ++i) { for (int j=0; j < 6; ++j) { M[M_start + row_index(stride, l*6 + i, l*6 + j)] = I_s[joint_start + l].data[i][j]; } } } } template<typename Type> CUDA_CALLABLE inline void adj_spatial_mass( const spatial_matrix_t<Type>* I_s, const int joint_start, const int joint_count, const int M_start, const Type* M, spatial_matrix_t<Type>* adj_I_s, int& adj_joint_start, int& adj_joint_count, int& adj_M_start, const Type* adj_M) { const int stride = joint_count*6; for (int l=0; l < joint_count; ++l) { for (int i=0; i < 6; ++i) { for (int j=0; j < 6; ++j) { adj_I_s[joint_start + l].data[i][j] += adj_M[M_start + row_index(stride, l*6 + i, l*6 + j)]; } } } } using transform = transform_t<float>; using transformh = transform_t<half>; using transformf = transform_t<float>; using transformd = transform_t<double>; using spatial_vector = spatial_vector_t<float>; using spatial_vectorh = spatial_vector_t<half>; using spatial_vectorf = spatial_vector_t<float>; using spatial_vectord = spatial_vector_t<double>; using spatial_matrix = spatial_matrix_t<float>; using spatial_matrixh = spatial_matrix_t<half>; using spatial_matrixf = spatial_matrix_t<float>; using spatial_matrixd = spatial_matrix_t<double>; } // namespace wp
19,793
C
30.369255
216
0.625827
NVIDIA/warp/warp/native/noise.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #ifndef M_PI_F #define M_PI_F 3.14159265358979323846f #endif namespace wp { inline CUDA_CALLABLE float smootherstep(float t) { return t * t * t * (t * (t * 6.f - 15.f) + 10.f); } inline CUDA_CALLABLE float smootherstep_gradient(float t) { return 30.f * t * t * (t * (t - 2.f) + 1.f); } inline CUDA_CALLABLE float smoothstep(float t) { return t * t * (3.f - t * 2.f); } inline CUDA_CALLABLE float smoothstep_gradient(float t) { return 6.f * t * (1.f - t); } inline CUDA_CALLABLE float interpolate(float a0, float a1, float t) { return (a1 - a0) * smootherstep(t) + a0; // return (a1 - a0) * smoothstep(t) + a0; // return (a1 - a0) * t + a0; } inline CUDA_CALLABLE float interpolate_gradient(float a0, float a1, float t, float d_a0, float d_a1, float d_t) { return (d_a1 - d_a0) * smootherstep(t) + (a1 - a0) * smootherstep_gradient(t) * d_t + d_a0; // return (d_a1 - d_a0) * smoothstep(t) + (a1 - a0) * smoothstep_gradient(t) * d_t + d_a0; // return (d_a1 - d_a0) * t + (a1 - a0) * d_t + d_a0; } inline CUDA_CALLABLE vec2 interpolate_gradient_2d(float a0, float a1, float t, vec2& d_a0, vec2& d_a1, vec2& d_t) { return (d_a1 - d_a0) * smootherstep(t) + (a1 - a0) * smootherstep_gradient(t) * d_t + d_a0; } inline CUDA_CALLABLE vec3 interpolate_gradient_3d(float a0, float a1, float t, vec3& d_a0, vec3& d_a1, vec3& d_t) { return (d_a1 - d_a0) * smootherstep(t) + (a1 - a0) * smootherstep_gradient(t) * d_t + d_a0; } inline CUDA_CALLABLE vec4 interpolate_gradient_4d(float a0, float a1, float t, vec4& d_a0, vec4& d_a1, vec4& d_t) { return (d_a1 - d_a0) * smootherstep(t) + (a1 - a0) * smootherstep_gradient(t) * d_t + d_a0; } inline CUDA_CALLABLE float random_gradient_1d(uint32 state, int ix) { const uint32 p1 = 73856093; uint32 idx = ix*p1 + state; return randf(idx, -1.f, 1.f); } inline CUDA_CALLABLE vec2 random_gradient_2d(uint32 state, int ix, int iy) { const uint32 p1 = 73856093; const uint32 p2 = 19349663; uint32 idx = ix*p1 ^ iy*p2 + state; return normalize(sample_unit_square(idx)); } inline CUDA_CALLABLE vec3 random_gradient_3d(uint32 state, int ix, int iy, int iz) { const uint32 p1 = 73856093; const uint32 p2 = 19349663; const uint32 p3 = 53471161; uint32 idx = ix*p1 ^ iy*p2 ^ iz*p3 + state; return normalize(sample_unit_cube(idx)); } inline CUDA_CALLABLE vec4 random_gradient_4d(uint32 state, int ix, int iy, int iz, int it) { const uint32 p1 = 73856093; const uint32 p2 = 19349663; const uint32 p3 = 53471161; const uint32 p4 = 10000019; uint32 idx = ix*p1 ^ iy*p2 ^ iz*p3 ^ it*p4 + state; return normalize(sample_unit_hypercube(idx)); } inline CUDA_CALLABLE float dot_grid_gradient_1d(uint32 state, int ix, float dx) { float gradient = random_gradient_1d(state, ix); return dx*gradient; } inline CUDA_CALLABLE float dot_grid_gradient_2d(uint32 state, int ix, int iy, float dx, float dy) { vec2 gradient = random_gradient_2d(state, ix, iy); return (dx*gradient[0] + dy*gradient[1]); } inline CUDA_CALLABLE float dot_grid_gradient_3d(uint32 state, int ix, int iy, int iz, float dx, float dy, float dz) { vec3 gradient = random_gradient_3d(state, ix, iy, iz); return (dx*gradient[0] + dy*gradient[1] + dz*gradient[2]); } inline CUDA_CALLABLE float dot_grid_gradient_4d(uint32 state, int ix, int iy, int iz, int it, float dx, float dy, float dz, float dt) { vec4 gradient = random_gradient_4d(state, ix, iy, iz, it); return (dx*gradient[0] + dy*gradient[1] + dz*gradient[2] + dt*gradient[3]); } inline CUDA_CALLABLE float noise_1d(uint32 state, int x0, int x1, float dx) { //vX float v0 = dot_grid_gradient_1d(state, x0, dx); float v1 = dot_grid_gradient_1d(state, x1, dx-1.f); return interpolate(v0, v1, dx); } inline CUDA_CALLABLE float noise_1d_gradient(uint32 state, int x0, int x1, float dx) { float gradient_x0 = random_gradient_1d(state, x0); float v0 = dx * gradient_x0; float gradient_x1 = random_gradient_1d(state, x1); float v1 = (dx-1.f) * gradient_x1; return interpolate_gradient(v0, v1, dx, gradient_x0, gradient_x1, 1.f); } inline CUDA_CALLABLE float noise_2d(uint32 state, int x0, int y0, int x1, int y1, float dx, float dy) { //vXY float v00 = dot_grid_gradient_2d(state, x0, y0, dx, dy); float v10 = dot_grid_gradient_2d(state, x1, y0, dx-1.f, dy); float xi0 = interpolate(v00, v10, dx); float v01 = dot_grid_gradient_2d(state, x0, y1, dx, dy-1.f); float v11 = dot_grid_gradient_2d(state, x1, y1, dx-1.f, dy-1.f); float xi1 = interpolate(v01, v11, dx); return interpolate(xi0, xi1, dy); } inline CUDA_CALLABLE vec2 noise_2d_gradient(uint32 state, int x0, int y0, int x1, int y1, float dx, float dy) { vec2 d00 = vec2(dx, dy); vec2 gradient_v00 = random_gradient_2d(state, x0, y0); float v00 = dot(d00, gradient_v00); vec2 d10 = vec2(dx-1.f, dy); vec2 gradient_v10 = random_gradient_2d(state, x1, y0); float v10 = dot(d10, gradient_v10); vec2 d01 = vec2(dx, dy-1.f); vec2 gradient_v01 = random_gradient_2d(state, x0, y1); float v01 = dot(d01, gradient_v01); vec2 d11 = vec2(dx-1.f, dy-1.f); vec2 gradient_v11 = random_gradient_2d(state, x1, y1); float v11 = dot(d11, gradient_v11); vec2 dx_dt = vec2(1.f, 0.f); float xi0 = interpolate(v00, v10, dx); vec2 gradient_xi0 = interpolate_gradient_2d(v00, v10, dx, gradient_v00, gradient_v10, dx_dt); float xi1 = interpolate(v01, v11, dx); vec2 gradient_xi1 = interpolate_gradient_2d(v01, v11, dx, gradient_v01, gradient_v11, dx_dt); vec2 dy_dt = vec2(0.f, 1.f); vec2 gradient = interpolate_gradient_2d(xi0, xi1, dy, gradient_xi0, gradient_xi1, dy_dt); return gradient; } inline CUDA_CALLABLE float noise_3d(uint32 state, int x0, int y0, int z0, int x1, int y1, int z1, float dx, float dy, float dz) { //vXYZ float v000 = dot_grid_gradient_3d(state, x0, y0, z0, dx, dy, dz); float v100 = dot_grid_gradient_3d(state, x1, y0, z0, dx-1.f, dy, dz); float xi00 = interpolate(v000, v100, dx); float v010 = dot_grid_gradient_3d(state, x0, y1, z0, dx, dy-1.f, dz); float v110 = dot_grid_gradient_3d(state, x1, y1, z0, dx-1.f, dy-1.f, dz); float xi10 = interpolate(v010, v110, dx); float yi0 = interpolate(xi00, xi10, dy); float v001 = dot_grid_gradient_3d(state, x0, y0, z1, dx, dy, dz-1.f); float v101 = dot_grid_gradient_3d(state, x1, y0, z1, dx-1.f, dy, dz-1.f); float xi01 = interpolate(v001, v101, dx); float v011 = dot_grid_gradient_3d(state, x0, y1, z1, dx, dy-1.f, dz-1.f); float v111 = dot_grid_gradient_3d(state, x1, y1, z1, dx-1.f, dy-1.f, dz-1.f); float xi11 = interpolate(v011, v111, dx); float yi1 = interpolate(xi01, xi11, dy); return interpolate(yi0, yi1, dz); } inline CUDA_CALLABLE vec3 noise_3d_gradient(uint32 state, int x0, int y0, int z0, int x1, int y1, int z1, float dx, float dy, float dz) { vec3 d000 = vec3(dx, dy, dz); vec3 gradient_v000 = random_gradient_3d(state, x0, y0, z0); float v000 = dot(d000, gradient_v000); vec3 d100 = vec3(dx-1.f, dy, dz); vec3 gradient_v100 = random_gradient_3d(state, x1, y0, z0); float v100 = dot(d100, gradient_v100); vec3 d010 = vec3(dx, dy-1.f, dz); vec3 gradient_v010 = random_gradient_3d(state, x0, y1, z0); float v010 = dot(d010, gradient_v010); vec3 d110 = vec3(dx-1.f, dy-1.f, dz); vec3 gradient_v110 = random_gradient_3d(state, x1, y1, z0); float v110 = dot(d110, gradient_v110); vec3 d001 = vec3(dx, dy, dz-1.f); vec3 gradient_v001 = random_gradient_3d(state, x0, y0, z1); float v001 = dot(d001, gradient_v001); vec3 d101 = vec3(dx-1.f, dy, dz-1.f); vec3 gradient_v101 = random_gradient_3d(state, x1, y0, z1); float v101 = dot(d101, gradient_v101); vec3 d011 = vec3(dx, dy-1.f, dz-1.f); vec3 gradient_v011 = random_gradient_3d(state, x0, y1, z1); float v011 = dot(d011, gradient_v011); vec3 d111 = vec3(dx-1.f, dy-1.f, dz-1.f); vec3 gradient_v111 = random_gradient_3d(state, x1, y1, z1); float v111 = dot(d111, gradient_v111); vec3 dx_dt = vec3(1.f, 0.f, 0.f); float xi00 = interpolate(v000, v100, dx); vec3 gradient_xi00 = interpolate_gradient_3d(v000, v100, dx, gradient_v000, gradient_v100, dx_dt); float xi10 = interpolate(v010, v110, dx); vec3 gradient_xi10 = interpolate_gradient_3d(v010, v110, dx, gradient_v010, gradient_v110, dx_dt); float xi01 = interpolate(v001, v101, dx); vec3 gradient_xi01 = interpolate_gradient_3d(v001, v101, dx, gradient_v001, gradient_v101, dx_dt); float xi11 = interpolate(v011, v111, dx); vec3 gradient_xi11 = interpolate_gradient_3d(v011, v111, dx, gradient_v011, gradient_v111, dx_dt); vec3 dy_dt = vec3(0.f, 1.f, 0.f); float yi0 = interpolate(xi00, xi10, dy); vec3 gradient_yi0 = interpolate_gradient_3d(xi00, xi10, dy, gradient_xi00, gradient_xi10, dy_dt); float yi1 = interpolate(xi01, xi11, dy); vec3 gradient_yi1 = interpolate_gradient_3d(xi01, xi11, dy, gradient_xi01, gradient_xi11, dy_dt); vec3 dz_dt = vec3(0.f, 0.f, 1.f); vec3 gradient = interpolate_gradient_3d(yi0, yi1, dz, gradient_yi0, gradient_yi1, dz_dt); return gradient; } inline CUDA_CALLABLE float noise_4d(uint32 state, int x0, int y0, int z0, int t0, int x1, int y1, int z1, int t1, float dx, float dy, float dz, float dt) { //vXYZT float v0000 = dot_grid_gradient_4d(state, x0, y0, z0, t0, dx, dy, dz, dt); float v1000 = dot_grid_gradient_4d(state, x1, y0, z0, t0, dx-1.f, dy, dz, dt); float xi000 = interpolate(v0000, v1000, dx); float v0100 = dot_grid_gradient_4d(state, x0, y1, z0, t0, dx, dy-1.f, dz, dt); float v1100 = dot_grid_gradient_4d(state, x1, y1, z0, t0, dx-1.f, dy-1.f, dz, dt); float xi100 = interpolate(v0100, v1100, dx); float yi00 = interpolate(xi000, xi100, dy); float v0010 = dot_grid_gradient_4d(state, x0, y0, z1, t0, dx, dy, dz-1.f, dt); float v1010 = dot_grid_gradient_4d(state, x1, y0, z1, t0, dx-1.f, dy, dz-1.f, dt); float xi010 = interpolate(v0010, v1010, dx); float v0110 = dot_grid_gradient_4d(state, x0, y1, z1, t0, dx, dy-1.f, dz-1.f, dt); float v1110 = dot_grid_gradient_4d(state, x1, y1, z1, t0, dx-1.f, dy-1.f, dz-1.f, dt); float xi110 = interpolate(v0110, v1110, dx); float yi10 = interpolate(xi010, xi110, dy); float zi0 = interpolate(yi00, yi10, dz); float v0001 = dot_grid_gradient_4d(state, x0, y0, z0, t1, dx, dy, dz, dt-1.f); float v1001 = dot_grid_gradient_4d(state, x1, y0, z0, t1, dx-1.f, dy, dz, dt-1.f); float xi001 = interpolate(v0001, v1001, dx); float v0101 = dot_grid_gradient_4d(state, x0, y1, z0, t1, dx, dy-1.f, dz, dt-1.f); float v1101 = dot_grid_gradient_4d(state, x1, y1, z0, t1, dx-1.f, dy-1.f, dz, dt-1.f); float xi101 = interpolate(v0101, v1101, dx); float yi01 = interpolate(xi001, xi101, dy); float v0011 = dot_grid_gradient_4d(state, x0, y0, z1, t1, dx, dy, dz-1.f, dt-1.f); float v1011 = dot_grid_gradient_4d(state, x1, y0, z1, t1, dx-1.f, dy, dz-1.f, dt-1.f); float xi011 = interpolate(v0011, v1011, dx); float v0111 = dot_grid_gradient_4d(state, x0, y1, z1, t1, dx, dy-1.f, dz-1.f, dt-1.f); float v1111 = dot_grid_gradient_4d(state, x1, y1, z1, t1, dx-1.f, dy-1.f, dz-1.f, dt-1.f); float xi111 = interpolate(v0111, v1111, dx); float yi11 = interpolate(xi011, xi111, dy); float zi1 = interpolate(yi01, yi11, dz); return interpolate(zi0, zi1, dt); } inline CUDA_CALLABLE vec4 noise_4d_gradient(uint32 state, int x0, int y0, int z0, int t0, int x1, int y1, int z1, int t1, float dx, float dy, float dz, float dt) { vec4 d0000 = vec4(dx, dy, dz, dt); vec4 gradient_v0000 = random_gradient_4d(state, x0, y0, z0, t0); float v0000 = dot(d0000, gradient_v0000); vec4 d1000 = vec4(dx-1.f, dy, dz, dt); vec4 gradient_v1000 = random_gradient_4d(state, x1, y0, z0, t0); float v1000 = dot(d1000, gradient_v1000); vec4 d0100 = vec4(dx, dy-1.f, dz, dt); vec4 gradient_v0100 = random_gradient_4d(state, x0, y1, z0, t0); float v0100 = dot(d0100, gradient_v0100); vec4 d1100 = vec4(dx-1.f, dy-1.f, dz, dt); vec4 gradient_v1100 = random_gradient_4d(state, x1, y1, z0, t0); float v1100 = dot(d1100, gradient_v1100); vec4 d0010 = vec4(dx, dy, dz-1.f, dt); vec4 gradient_v0010 = random_gradient_4d(state, x0, y0, z1, t0); float v0010 = dot(d0010, gradient_v0010); vec4 d1010 = vec4(dx-1.f, dy, dz-1.f, dt); vec4 gradient_v1010 = random_gradient_4d(state, x1, y0, z1, t0); float v1010 = dot(d1010, gradient_v1010); vec4 d0110 = vec4(dx, dy-1.f, dz-1.f, dt); vec4 gradient_v0110 = random_gradient_4d(state, x0, y1, z1, t0); float v0110 = dot(d0110, gradient_v0110); vec4 d1110 = vec4(dx-1.f, dy-1.f, dz-1.f, dt); vec4 gradient_v1110 = random_gradient_4d(state, x1, y1, z1, t0); float v1110 = dot(d1110, gradient_v1110); vec4 d0001 = vec4(dx, dy, dz, dt-1.f); vec4 gradient_v0001 = random_gradient_4d(state, x0, y0, z0, t1); float v0001 = dot(d0001, gradient_v0001); vec4 d1001 = vec4(dx-1.f, dy, dz, dt-1.f); vec4 gradient_v1001 = random_gradient_4d(state, x1, y0, z0, t1); float v1001 = dot(d1001, gradient_v1001); vec4 d0101 = vec4(dx, dy-1.f, dz, dt-1.f); vec4 gradient_v0101 = random_gradient_4d(state, x0, y1, z0, t1); float v0101 = dot(d0101, gradient_v0101); vec4 d1101 = vec4(dx-1.f, dy-1.f, dz, dt-1.f); vec4 gradient_v1101 = random_gradient_4d(state, x1, y1, z0, t1); float v1101 = dot(d1101, gradient_v1101); vec4 d0011 = vec4(dx, dy, dz-1.f, dt-1.f); vec4 gradient_v0011 = random_gradient_4d(state, x0, y0, z1, t1); float v0011 = dot(d0011, gradient_v0011); vec4 d1011 = vec4(dx-1.f, dy, dz-1.f, dt-1.f); vec4 gradient_v1011 = random_gradient_4d(state, x1, y0, z1, t1); float v1011 = dot(d1011, gradient_v1011); vec4 d0111 = vec4(dx, dy-1.f, dz-1.f, dt-1.f); vec4 gradient_v0111 = random_gradient_4d(state, x0, y1, z1, t1); float v0111 = dot(d0111, gradient_v0111); vec4 d1111 = vec4(dx-1.f, dy-1.f, dz-1.f, dt-1.f); vec4 gradient_v1111 = random_gradient_4d(state, x1, y1, z1, t1); float v1111 = dot(d1111, gradient_v1111); vec4 dx_dt = vec4(1.f, 0.f, 0.f, 0.f); float xi000 = interpolate(v0000, v1000, dx); vec4 gradient_xi000 = interpolate_gradient_4d(v0000, v1000, dx, gradient_v0000, gradient_v1000, dx_dt); float xi100 = interpolate(v0100, v1100, dx); vec4 gradient_xi100 = interpolate_gradient_4d(v0100, v1100, dx, gradient_v0100, gradient_v1100, dx_dt); float xi010 = interpolate(v0010, v1010, dx); vec4 gradient_xi010 = interpolate_gradient_4d(v0010, v1010, dx, gradient_v0010, gradient_v1010, dx_dt); float xi110 = interpolate(v0110, v1110, dx); vec4 gradient_xi110 = interpolate_gradient_4d(v0110, v1110, dx, gradient_v0110, gradient_v1110, dx_dt); float xi001 = interpolate(v0001, v1001, dx); vec4 gradient_xi001 = interpolate_gradient_4d(v0001, v1001, dx, gradient_v0001, gradient_v1001, dx_dt); float xi101 = interpolate(v0101, v1101, dx); vec4 gradient_xi101 = interpolate_gradient_4d(v0101, v1101, dx, gradient_v0101, gradient_v1101, dx_dt); float xi011 = interpolate(v0011, v1011, dx); vec4 gradient_xi011 = interpolate_gradient_4d(v0011, v1011, dx, gradient_v0011, gradient_v1011, dx_dt); float xi111 = interpolate(v0111, v1111, dx); vec4 gradient_xi111 = interpolate_gradient_4d(v0111, v1111, dx, gradient_v0111, gradient_v1111, dx_dt); vec4 dy_dt = vec4(0.f, 1.f, 0.f, 0.f); float yi00 = interpolate(xi000, xi100, dy); vec4 gradient_yi00 = interpolate_gradient_4d(xi000, xi100, dy, gradient_xi000, gradient_xi100, dy_dt); float yi10 = interpolate(xi010, xi110, dy); vec4 gradient_yi10 = interpolate_gradient_4d(xi010, xi110, dy, gradient_xi010, gradient_xi110, dy_dt); float yi01 = interpolate(xi001, xi101, dy); vec4 gradient_yi01 = interpolate_gradient_4d(xi001, xi101, dy, gradient_xi001, gradient_xi101, dy_dt); float yi11 = interpolate(xi011, xi111, dy); vec4 gradient_yi11 = interpolate_gradient_4d(xi011, xi111, dy, gradient_xi011, gradient_xi111, dy_dt); vec4 dz_dt = vec4(0.f, 0.f, 1.f, 0.f); float zi0 = interpolate(yi00, yi10, dz); vec4 gradient_zi0 = interpolate_gradient_4d(yi00, yi10, dz, gradient_yi00, gradient_yi10, dz_dt); float zi1 = interpolate(yi01, yi11, dz); vec4 gradient_zi1 = interpolate_gradient_4d(yi01, yi11, dz, gradient_yi01, gradient_yi11, dz_dt); vec4 dt_dt = vec4(0.f, 0.f, 0.f, 1.f); vec4 gradient = interpolate_gradient_4d(zi0, zi1, dt, gradient_zi0, gradient_zi1, dt_dt); return gradient; } // non-periodic Perlin noise inline CUDA_CALLABLE float noise(uint32 state, float x) { float dx = x - floor(x); int x0 = (int)floor(x); int x1 = x0 + 1; return noise_1d(state, x0, x1, dx); } inline CUDA_CALLABLE void adj_noise(uint32 state, float x, uint32& adj_state, float& adj_x, const float adj_ret) { float dx = x - floor(x); int x0 = (int)floor(x); int x1 = x0 + 1; float gradient = noise_1d_gradient(state, x0, x1, dx); adj_x += gradient * adj_ret; } inline CUDA_CALLABLE float noise(uint32 state, const vec2& xy) { float dx = xy[0] - floor(xy[0]); float dy = xy[1] - floor(xy[1]); int x0 = (int)floor(xy[0]); int y0 = (int)floor(xy[1]); int x1 = x0 + 1; int y1 = y0 + 1; return noise_2d(state, x0, y0, x1, y1, dx, dy); } inline CUDA_CALLABLE void adj_noise(uint32 state, const vec2& xy, uint32& adj_state, vec2& adj_xy, const float adj_ret) { float dx = xy[0] - floor(xy[0]); float dy = xy[1] - floor(xy[1]); int x0 = (int)floor(xy[0]); int y0 = (int)floor(xy[1]); int x1 = x0 + 1; int y1 = y0 + 1; vec2 gradient = noise_2d_gradient(state, x0, y0, x1, y1, dx, dy); adj_xy[0] += gradient[0] * adj_ret; adj_xy[1] += gradient[1] * adj_ret; } inline CUDA_CALLABLE float noise(uint32 state, const vec3& xyz) { float dx = xyz[0] - floor(xyz[0]); float dy = xyz[1] - floor(xyz[1]); float dz = xyz[2] - floor(xyz[2]); int x0 = (int)floor(xyz[0]); int y0 = (int)floor(xyz[1]); int z0 = (int)floor(xyz[2]); int x1 = x0 + 1; int y1 = y0 + 1; int z1 = z0 + 1; return noise_3d(state, x0, y0, z0, x1, y1, z1, dx, dy, dz); } inline CUDA_CALLABLE void adj_noise(uint32 state, const vec3& xyz, uint32& adj_state, vec3& adj_xyz, const float adj_ret) { float dx = xyz[0] - floor(xyz[0]); float dy = xyz[1] - floor(xyz[1]); float dz = xyz[2] - floor(xyz[2]); int x0 = (int)floor(xyz[0]); int y0 = (int)floor(xyz[1]); int z0 = (int)floor(xyz[2]); int x1 = x0 + 1; int y1 = y0 + 1; int z1 = z0 + 1; vec3 gradient = noise_3d_gradient(state, x0, y0, z0, x1, y1, z1, dx, dy, dz); adj_xyz[0] += gradient[0] * adj_ret; adj_xyz[1] += gradient[1] * adj_ret; adj_xyz[2] += gradient[2] * adj_ret; } inline CUDA_CALLABLE float noise(uint32 state, const vec4& xyzt) { float dx = xyzt[0] - floor(xyzt[0]); float dy = xyzt[1] - floor(xyzt[1]); float dz = xyzt[2] - floor(xyzt[2]); float dt = xyzt[3] - floor(xyzt[3]); int x0 = (int)floor(xyzt[0]); int y0 = (int)floor(xyzt[1]); int z0 = (int)floor(xyzt[2]); int t0 = (int)floor(xyzt[3]); int x1 = x0 + 1; int y1 = y0 + 1; int z1 = z0 + 1; int t1 = t0 + 1; return noise_4d(state, x0, y0, z0, t0, x1, y1, z1, t1, dx, dy, dz, dt); } inline CUDA_CALLABLE void adj_noise(uint32 state, const vec4& xyzt, uint32& adj_state, vec4& adj_xyzt, const float adj_ret) { float dx = xyzt[0] - floor(xyzt[0]); float dy = xyzt[1] - floor(xyzt[1]); float dz = xyzt[2] - floor(xyzt[2]); float dt = xyzt[3] - floor(xyzt[3]); int x0 = (int)floor(xyzt[0]); int y0 = (int)floor(xyzt[1]); int z0 = (int)floor(xyzt[2]); int t0 = (int)floor(xyzt[3]); int x1 = x0 + 1; int y1 = y0 + 1; int z1 = z0 + 1; int t1 = t0 + 1; vec4 gradient = noise_4d_gradient(state, x0, y0, z0, t0, x1, y1, z1, t1, dx, dy, dz, dt); adj_xyzt[0] += gradient[0] * adj_ret; adj_xyzt[1] += gradient[1] * adj_ret; adj_xyzt[2] += gradient[2] * adj_ret; adj_xyzt[3] += gradient[3] * adj_ret; } // periodic Perlin noise inline CUDA_CALLABLE float pnoise(uint32 state, float x, int px) { float dx = x - floor(x); int x0 = mod(((int)floor(x)), px); int x1 = mod((x0 + 1), px); return noise_1d(state, x0, x1, dx); } inline CUDA_CALLABLE void adj_pnoise(uint32 state, float x, int px, uint32& adj_state, float& adj_x, int& adj_px, const float adj_ret) { float dx = x - floor(x); int x0 = mod(((int)floor(x)), px); int x1 = mod((x0 + 1), px); float gradient = noise_1d_gradient(state, x0, x1, dx); adj_x += gradient * adj_ret; } inline CUDA_CALLABLE float pnoise(uint32 state, const vec2& xy, int px, int py) { float dx = xy[0] - floor(xy[0]); float dy = xy[1] - floor(xy[1]); int x0 = mod(((int)floor(xy[0])), px); int y0 = mod(((int)floor(xy[1])), py); int x1 = mod((x0 + 1), px); int y1 = mod((y0 + 1), py); return noise_2d(state, x0, y0, x1, y1, dx, dy); } inline CUDA_CALLABLE void adj_pnoise(uint32 state, const vec2& xy, int px, int py, uint32& adj_state, vec2& adj_xy, int& adj_px, int& adj_py, const float adj_ret) { float dx = xy[0] - floor(xy[0]); float dy = xy[1] - floor(xy[1]); int x0 = mod(((int)floor(xy[0])), px); int y0 = mod(((int)floor(xy[1])), py); int x1 = mod((x0 + 1), px); int y1 = mod((y0 + 1), py); vec2 gradient = noise_2d_gradient(state, x0, y0, x1, y1, dx, dy); adj_xy[0] += gradient[0] * adj_ret; adj_xy[1] += gradient[1] * adj_ret; } inline CUDA_CALLABLE float pnoise(uint32 state, const vec3& xyz, int px, int py, int pz) { float dx = xyz[0] - floor(xyz[0]); float dy = xyz[1] - floor(xyz[1]); float dz = xyz[2] - floor(xyz[2]); int x0 = mod(((int)floor(xyz[0])), px); int y0 = mod(((int)floor(xyz[1])), py); int z0 = mod(((int)floor(xyz[2])), pz); int x1 = mod((x0 + 1), px); int y1 = mod((y0 + 1), py); int z1 = mod((z0 + 1), pz); return noise_3d(state, x0, y0, z0, x1, y1, z1, dx, dy, dz); } inline CUDA_CALLABLE void adj_pnoise(uint32 state, const vec3& xyz, int px, int py, int pz, uint32& adj_state, vec3& adj_xyz, int& adj_px, int& adj_py, int& adj_pz, const float adj_ret) { float dx = xyz[0] - floor(xyz[0]); float dy = xyz[1] - floor(xyz[1]); float dz = xyz[2] - floor(xyz[2]); int x0 = mod(((int)floor(xyz[0])), px); int y0 = mod(((int)floor(xyz[1])), py); int z0 = mod(((int)floor(xyz[2])), pz); int x1 = mod((x0 + 1), px); int y1 = mod((y0 + 1), py); int z1 = mod((z0 + 1), pz); vec3 gradient = noise_3d_gradient(state, x0, y0, z0, x1, y1, z1, dx, dy, dz); adj_xyz[0] += gradient[0] * adj_ret; adj_xyz[1] += gradient[1] * adj_ret; adj_xyz[2] += gradient[2] * adj_ret; } inline CUDA_CALLABLE float pnoise(uint32 state, const vec4& xyzt, int px, int py, int pz, int pt) { float dx = xyzt[0] - floor(xyzt[0]); float dy = xyzt[1] - floor(xyzt[1]); float dz = xyzt[2] - floor(xyzt[2]); float dt = xyzt[3] - floor(xyzt[3]); int x0 = mod(((int)floor(xyzt[0])), px); int y0 = mod(((int)floor(xyzt[1])), py); int z0 = mod(((int)floor(xyzt[2])), pz); int t0 = mod(((int)floor(xyzt[3])), pt); int x1 = mod((x0 + 1), px); int y1 = mod((y0 + 1), py); int z1 = mod((z0 + 1), pz); int t1 = mod((t0 + 1), pt); return noise_4d(state, x0, y0, z0, t0, x1, y1, z1, t1, dx, dy, dz, dt); } inline CUDA_CALLABLE void adj_pnoise(uint32 state, const vec4& xyzt, int px, int py, int pz, int pt, uint32& adj_state, vec4& adj_xyzt, int& adj_px, int& adj_py, int& adj_pz, int& adj_pt, const float adj_ret) { float dx = xyzt[0] - floor(xyzt[0]); float dy = xyzt[1] - floor(xyzt[1]); float dz = xyzt[2] - floor(xyzt[2]); float dt = xyzt[3] - floor(xyzt[3]); int x0 = mod(((int)floor(xyzt[0])), px); int y0 = mod(((int)floor(xyzt[1])), py); int z0 = mod(((int)floor(xyzt[2])), pz); int t0 = mod(((int)floor(xyzt[3])), pt); int x1 = mod((x0 + 1), px); int y1 = mod((y0 + 1), py); int z1 = mod((z0 + 1), pz); int t1 = mod((t0 + 1), pt); vec4 gradient = noise_4d_gradient(state, x0, y0, z0, t0, x1, y1, z1, t1, dx, dy, dz, dt); adj_xyzt[0] += gradient[0] * adj_ret; adj_xyzt[1] += gradient[1] * adj_ret; adj_xyzt[2] += gradient[2] * adj_ret; adj_xyzt[3] += gradient[3] * adj_ret; } // curl noise inline CUDA_CALLABLE vec2 curlnoise(uint32 state, const vec2& xy, const uint32 octaves, const float lacunarity, const float gain) { vec2 curl_sum = vec2(0.f); float freq = 1.f; float amplitude = 1.f; for (int i = 0; i < octaves; i++) { vec2 pt = freq * xy; float dx = pt[0] - floor(pt[0]); float dy = pt[1] - floor(pt[1]); int x0 = (int)floor(pt[0]); int y0 = (int)floor(pt[1]); int x1 = x0 + 1; int y1 = y0 + 1; vec2 grad_field = noise_2d_gradient(state, x0, y0, x1, y1, dx, dy); curl_sum += amplitude * grad_field; amplitude *= gain; freq *= lacunarity; } return vec2(-curl_sum[1], curl_sum[0]); } inline CUDA_CALLABLE void adj_curlnoise(uint32 state, const vec2& xy, const uint32 octaves, const float lacunarity, const float gain, uint32& adj_state, vec2& adj_xy, const uint32& adj_octaves, const float& adj_lacunarity, const float& adj_gain, const vec2& adj_ret) {} inline CUDA_CALLABLE vec3 curlnoise(uint32 state, const vec3& xyz, const uint32 octaves, const float lacunarity, const float gain) { vec3 curl_sum_1 = vec3(0.f); vec3 curl_sum_2 = vec3(0.f); vec3 curl_sum_3 = vec3(0.f); float freq = 1.f; float amplitude = 1.f; for(int i = 0; i < octaves; i++) { vec3 pt = freq * xyz; float dx = pt[0] - floor(pt[0]); float dy = pt[1] - floor(pt[1]); float dz = pt[2] - floor(pt[2]); int x0 = (int)floor(pt[0]); int y0 = (int)floor(pt[1]); int z0 = (int)floor(pt[2]); int x1 = x0 + 1; int y1 = y0 + 1; int z1 = z0 + 1; vec3 grad_field_1 = noise_3d_gradient(state, x0, y0, z0, x1, y1, z1, dx, dy, dz); state = rand_init(state, 10019689); vec3 grad_field_2 = noise_3d_gradient(state, x0, y0, z0, x1, y1, z1, dx, dy, dz); state = rand_init(state, 13112221); vec3 grad_field_3 = noise_3d_gradient(state, x0, y0, z0, x1, y1, z1, dx, dy, dz); curl_sum_1 += amplitude * grad_field_1; curl_sum_2 += amplitude * grad_field_2; curl_sum_3 += amplitude * grad_field_3; amplitude *= gain; freq *= lacunarity; } return vec3( curl_sum_3[1] - curl_sum_2[2], curl_sum_1[2] - curl_sum_3[0], curl_sum_2[0] - curl_sum_1[1]); } inline CUDA_CALLABLE void adj_curlnoise(uint32 state, const vec3& xyz, const uint32 octaves, const float lacunarity, const float gain, uint32& adj_state, vec3& adj_xyz, const uint32& adj_octaves, const float& adj_lacunarity, const float& adj_gain, vec3& adj_ret) {} inline CUDA_CALLABLE vec3 curlnoise(uint32 state, const vec4& xyzt, const uint32 octaves, const float lacunarity, const float gain) { vec4 curl_sum_1 = vec4(0.f); vec4 curl_sum_2 = vec4(0.f); vec4 curl_sum_3 = vec4(0.f); float freq = 1.f; float amplitude = 1.f; for(int i = 0; i < octaves; i++) { vec4 pt = freq * xyzt; float dx = pt[0] - floor(pt[0]); float dy = pt[1] - floor(pt[1]); float dz = pt[2] - floor(pt[2]); float dt = pt[3] - floor(pt[3]); int x0 = (int)floor(pt[0]); int y0 = (int)floor(pt[1]); int z0 = (int)floor(pt[2]); int t0 = (int)floor(pt[3]); int x1 = x0 + 1; int y1 = y0 + 1; int z1 = z0 + 1; int t1 = t0 + 1; vec4 grad_field_1 = noise_4d_gradient(state, x0, y0, z0, t0, x1, y1, z1, t1, dx, dy, dz, dt); state = rand_init(state, 10019689); vec4 grad_field_2 = noise_4d_gradient(state, x0, y0, z0, t0, x1, y1, z1, t1, dx, dy, dz, dt); state = rand_init(state, 13112221); vec4 grad_field_3 = noise_4d_gradient(state, x0, y0, z0, t0, x1, y1, z1, t1, dx, dy, dz, dt); curl_sum_1 += amplitude * grad_field_1; curl_sum_2 += amplitude * grad_field_2; curl_sum_3 += amplitude * grad_field_3; amplitude *= gain; freq *= lacunarity; } return vec3( curl_sum_3[1] - curl_sum_2[2], curl_sum_1[2] - curl_sum_3[0], curl_sum_2[0] - curl_sum_1[1]); } inline CUDA_CALLABLE void adj_curlnoise(uint32 state, const vec4& xyzt, const uint32 octaves, const float lacunarity, const float gain, uint32& adj_state, vec4& adj_xyzt, const uint32& adj_octaves, const float& adj_lacunarity, const float& adj_gain, const vec3& adj_ret) {} } // namespace wp
29,762
C
33.974148
273
0.611518
NVIDIA/warp/warp/native/vec.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "initializer_array.h" namespace wp { template<unsigned Length, typename Type> struct vec_t { Type c[Length]; inline CUDA_CALLABLE vec_t() : c() {} inline CUDA_CALLABLE vec_t(Type s) { for( unsigned i=0; i < Length; ++i ) { c[i] = s; } } template <typename OtherType> inline explicit CUDA_CALLABLE vec_t(const vec_t<Length, OtherType>& other) { for( unsigned i=0; i < Length; ++i ) { c[i] = static_cast<Type>(other[i]); } } inline CUDA_CALLABLE vec_t(Type x, Type y) { assert(Length == 2); c[0]=x; c[1]=y; } inline CUDA_CALLABLE vec_t(Type x, Type y, Type z) { assert(Length == 3); c[0]=x; c[1]=y; c[2]=z; } inline CUDA_CALLABLE vec_t(Type x, Type y, Type z, Type w) { assert(Length == 4); c[0]=x; c[1]=y; c[2]=z; c[3]=w; } inline CUDA_CALLABLE vec_t(const initializer_array<Length, Type> &l) { for( unsigned i=0; i < Length; ++i ) { c[i] = l[i]; } } // special screw vector constructor for spatial_vectors: inline CUDA_CALLABLE vec_t(vec_t<3,Type> w, vec_t<3,Type> v) { c[0] = w[0]; c[1] = w[1]; c[2] = w[2]; c[3] = v[0]; c[4] = v[1]; c[5] = v[2]; } inline CUDA_CALLABLE Type operator[](int index) const { assert(index < Length); return c[index]; } inline CUDA_CALLABLE Type& operator[](int index) { assert(index < Length); return c[index]; } }; using vec2b = vec_t<2,int8>; using vec3b = vec_t<3,int8>; using vec4b = vec_t<4,int8>; using vec2ub = vec_t<2,uint8>; using vec3ub = vec_t<3,uint8>; using vec4ub = vec_t<4,uint8>; using vec2s = vec_t<2,int16>; using vec3s = vec_t<3,int16>; using vec4s = vec_t<4,int16>; using vec2us = vec_t<2,uint16>; using vec3us = vec_t<3,uint16>; using vec4us = vec_t<4,uint16>; using vec2i = vec_t<2,int32>; using vec3i = vec_t<3,int32>; using vec4i = vec_t<4,int32>; using vec2ui = vec_t<2,uint32>; using vec3ui = vec_t<3,uint32>; using vec4ui = vec_t<4,uint32>; using vec2l = vec_t<2,int64>; using vec3l = vec_t<3,int64>; using vec4l = vec_t<4,int64>; using vec2ul = vec_t<2,uint64>; using vec3ul = vec_t<3,uint64>; using vec4ul = vec_t<4,uint64>; using vec2h = vec_t<2,half>; using vec3h = vec_t<3,half>; using vec4h = vec_t<4,half>; using vec2 = vec_t<2,float>; using vec3 = vec_t<3,float>; using vec4 = vec_t<4,float>; using vec2f = vec_t<2,float>; using vec3f = vec_t<3,float>; using vec4f = vec_t<4,float>; using vec2d = vec_t<2,double>; using vec3d = vec_t<3,double>; using vec4d = vec_t<4,double>; //-------------- // vec<Length, Type> methods // Should these accept const references as arguments? It's all // inlined so maybe it doesn't matter? Even if it does, it // probably depends on the Length of the vector... // negation: template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> operator - (vec_t<Length, Type> a) { // NB: this constructor will initialize all ret's components to 0, which is // unnecessary... vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = -a[i]; } // Wonder if this does a load of copying when it returns... hopefully not as it's inlined? return ret; } template<unsigned Length, typename Type> CUDA_CALLABLE inline vec_t<Length, Type> pos(const vec_t<Length, Type>& x) { return x; } template<unsigned Length, typename Type> CUDA_CALLABLE inline vec_t<Length, Type> neg(const vec_t<Length, Type>& x) { return -x; } template<typename Type> CUDA_CALLABLE inline vec_t<3, Type> neg(const vec_t<3, Type>& x) { return vec_t<3, Type>(-x.c[0], -x.c[1], -x.c[2]); } template<typename Type> CUDA_CALLABLE inline vec_t<2, Type> neg(const vec_t<2, Type>& x) { return vec_t<2, Type>(-x.c[0], -x.c[1]); } template<unsigned Length, typename Type> CUDA_CALLABLE inline void adj_neg(const vec_t<Length, Type>& x, vec_t<Length, Type>& adj_x, const vec_t<Length, Type>& adj_ret) { adj_x -= adj_ret; } // equality: template<unsigned Length, typename Type> inline CUDA_CALLABLE bool operator ==(const vec_t<Length, Type>& a, const vec_t<Length, Type>& b) { for( unsigned i=0; i < Length; ++i ) { if(a[i] != b[i]) { return false; } } return true; } // scalar multiplication: template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> mul(vec_t<Length, Type> a, Type s) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = a[i] * s; } return ret; } template<typename Type> inline CUDA_CALLABLE vec_t<3, Type> mul(vec_t<3, Type> a, Type s) { return vec_t<3, Type>(a.c[0]*s,a.c[1]*s,a.c[2]*s); } template<typename Type> inline CUDA_CALLABLE vec_t<2, Type> mul(vec_t<2, Type> a, Type s) { return vec_t<2, Type>(a.c[0]*s,a.c[1]*s); } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> mul(Type s, vec_t<Length, Type> a) { return mul(a, s); } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> operator*(Type s, vec_t<Length, Type> a) { return mul(a, s); } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> operator*(vec_t<Length, Type> a, Type s) { return mul(a, s); } // component wise multiplication: template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = a[i] * b[i]; } return ret; } // division template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> div(vec_t<Length, Type> a, Type s) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = a[i] / s; } return ret; } template<typename Type> inline CUDA_CALLABLE vec_t<3, Type> div(vec_t<3, Type> a, Type s) { return vec_t<3, Type>(a.c[0]/s,a.c[1]/s,a.c[2]/s); } template<typename Type> inline CUDA_CALLABLE vec_t<2, Type> div(vec_t<2, Type> a, Type s) { return vec_t<2, Type>(a.c[0]/s,a.c[1]/s); } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> div(Type s, vec_t<Length, Type> a) { vec_t<Length, Type> ret; for (unsigned i=0; i < Length; ++i) { ret[i] = s / a[i]; } return ret; } template<typename Type> inline CUDA_CALLABLE vec_t<3, Type> div(Type s, vec_t<3, Type> a) { return vec_t<3, Type>(s/a.c[0],s/a.c[1],s/a.c[2]); } template<typename Type> inline CUDA_CALLABLE vec_t<2, Type> div(Type s, vec_t<2, Type> a) { return vec_t<2, Type>(s/a.c[0],s/a.c[1]); } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> operator / (vec_t<Length, Type> a, Type s) { return div(a,s); } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> operator / (Type s, vec_t<Length, Type> a) { return div(s, a); } // component wise division template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = a[i] / b[i]; } return ret; } // addition template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> add(vec_t<Length, Type> a, vec_t<Length, Type> b) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = a[i] + b[i]; } return ret; } template<typename Type> inline CUDA_CALLABLE vec_t<2, Type> add(vec_t<2, Type> a, vec_t<2, Type> b) { return vec_t<2, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1]); } template<typename Type> inline CUDA_CALLABLE vec_t<3, Type> add(vec_t<3, Type> a, vec_t<3, Type> b) { return vec_t<3, Type>( a.c[0] + b.c[0], a.c[1] + b.c[1], a.c[2] + b.c[2]); } // subtraction template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> sub(vec_t<Length, Type> a, vec_t<Length, Type> b) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = Type(a[i] - b[i]); } return ret; } template<typename Type> inline CUDA_CALLABLE vec_t<2, Type> sub(vec_t<2, Type> a, vec_t<2, Type> b) { return vec_t<2, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1]); } template<typename Type> inline CUDA_CALLABLE vec_t<3, Type> sub(vec_t<3, Type> a, vec_t<3, Type> b) { return vec_t<3, Type>( a.c[0] - b.c[0], a.c[1] - b.c[1], a.c[2] - b.c[2]); } // dot product: template<unsigned Length, typename Type> inline CUDA_CALLABLE Type dot(vec_t<Length, Type> a, vec_t<Length, Type> b) { Type ret(0); for( unsigned i=0; i < Length; ++i ) { ret += a[i] * b[i]; } return ret; } template<typename Type> inline CUDA_CALLABLE Type dot(vec_t<2, Type> a, vec_t<2, Type> b) { return a.c[0] * b.c[0] + a.c[1] * b.c[1]; } template<typename Type> inline CUDA_CALLABLE Type dot(vec_t<3, Type> a, vec_t<3, Type> b) { return a.c[0] * b.c[0] + a.c[1] * b.c[1] + a.c[2] * b.c[2]; } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type tensordot(vec_t<Length, Type> a, vec_t<Length, Type> b) { // corresponds to `np.tensordot()` with all axes being contracted return dot(a, b); } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type extract(const vec_t<Length, Type> & a, int idx) { #ifndef NDEBUG if (idx < 0 || idx >= Length) { printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__); assert(0); } #endif return a[idx]; } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type* index(vec_t<Length, Type>& v, int idx) { #ifndef NDEBUG if (idx < 0 || idx >= Length) { printf("vec index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__); assert(0); } #endif return &v[idx]; } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type* indexref(vec_t<Length, Type>* v, int idx) { #ifndef NDEBUG if (idx < 0 || idx >= Length) { printf("vec store %d out of bounds at %s %d\n", idx, __FILE__, __LINE__); assert(0); } #endif return &((*v)[idx]); } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_index(vec_t<Length, Type>& v, int idx, vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value) { // nop } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_indexref(vec_t<Length, Type>* v, int idx, vec_t<Length, Type>& adj_v, int adj_idx, const Type& adj_value) { // nop } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type length(vec_t<Length, Type> a) { return sqrt(dot(a, a)); } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type length_sq(vec_t<Length, Type> a) { return dot(a, a); } template<typename Type> inline CUDA_CALLABLE Type length(vec_t<2, Type> a) { return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]); } template<typename Type> inline CUDA_CALLABLE Type length(vec_t<3, Type> a) { return sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]); } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> normalize(vec_t<Length, Type> a) { Type l = length(a); if (l > Type(kEps)) return div(a,l); else return vec_t<Length, Type>(); } template<typename Type> inline CUDA_CALLABLE vec_t<2, Type> normalize(vec_t<2, Type> a) { Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1]); if (l > Type(kEps)) return vec_t<2, Type>(a.c[0]/l,a.c[1]/l); else return vec_t<2, Type>(); } template<typename Type> inline CUDA_CALLABLE vec_t<3, Type> normalize(vec_t<3, Type> a) { Type l = sqrt(a.c[0] * a.c[0] + a.c[1] * a.c[1] + a.c[2] * a.c[2]); if (l > Type(kEps)) return vec_t<3, Type>(a.c[0]/l,a.c[1]/l,a.c[2]/l); else return vec_t<3, Type>(); } template<typename Type> inline CUDA_CALLABLE vec_t<3,Type> cross(vec_t<3,Type> a, vec_t<3,Type> b) { return { Type(a[1]*b[2] - a[2]*b[1]), Type(a[2]*b[0] - a[0]*b[2]), Type(a[0]*b[1] - a[1]*b[0]) }; } template<unsigned Length, typename Type> inline bool CUDA_CALLABLE isfinite(vec_t<Length, Type> x) { for( unsigned i=0; i < Length; ++i ) { if(!isfinite(x[i])) { return false; } } return true; } template<unsigned Length, typename Type> inline bool CUDA_CALLABLE isnan(vec_t<Length, Type> x) { for( unsigned i=0; i < Length; ++i ) { if(isnan(x[i])) { return true; } } return false; } template<unsigned Length, typename Type> inline bool CUDA_CALLABLE isinf(vec_t<Length, Type> x) { for( unsigned i=0; i < Length; ++i ) { if(isinf(x[i])) { return true; } } return false; } // These two functions seem to compile very slowly template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length,Type> min(vec_t<Length,Type> a, vec_t<Length,Type> b) { vec_t<Length,Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = a[i] < b[i] ? a[i] : b[i]; } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length,Type> max(vec_t<Length,Type> a, vec_t<Length,Type> b) { vec_t<Length,Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = a[i] > b[i] ? a[i] : b[i]; } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type min(vec_t<Length,Type> v) { Type ret = v[0]; for( unsigned i=1; i < Length; ++i ) { if (v[i] < ret) ret = v[i]; } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE Type max(vec_t<Length,Type> v) { Type ret = v[0]; for( unsigned i=1; i < Length; ++i ) { if (v[i] > ret) ret = v[i]; } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE unsigned argmin(vec_t<Length,Type> v) { unsigned ret = 0; for( unsigned i=1; i < Length; ++i ) { if (v[i] < v[ret]) ret = i; } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE unsigned argmax(vec_t<Length,Type> v) { unsigned ret = 0; for( unsigned i=1; i < Length; ++i ) { if (v[i] > v[ret]) ret = i; } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, const Type& tolerance) { const Type diff(0); for(size_t i=0; i<Length; ++i) { diff = max(diff,abs(actual[i] - expected[i])); } if (diff > tolerance) { printf("Error, expect_near() failed with tolerance "); print(tolerance); printf("\t Expected: "); print(expected); printf("\t Actual: "); print(actual); } } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_expect_near(const vec_t<Length, Type>& actual, const vec_t<Length, Type>& expected, Type tolerance, vec_t<Length, Type>& adj_actual, vec_t<Length, Type>& adj_expected, Type adj_tolerance) { // nop } // adjoint for the initializer_array constructor: template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_vec_t(const initializer_array<Length, Type> &cmps, const initializer_array<Length, Type*> &adj_cmps, const vec_t<Length, Type>& adj_ret) { for(unsigned i=0; i < Length; ++i) { *(adj_cmps[i]) += adj_ret[i]; } } // adjoint for the component constructors: template<typename Type> inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type &adj_cmpx, Type &adj_cmpy, const vec_t<2, Type>& adj_ret) { adj_cmpx += adj_ret.c[0]; adj_cmpy += adj_ret.c[1]; } template<typename Type> inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, const vec_t<3, Type>& adj_ret) { adj_cmpx += adj_ret.c[0]; adj_cmpy += adj_ret.c[1]; adj_cmpz += adj_ret.c[2]; } template<typename Type> inline CUDA_CALLABLE void adj_vec_t(Type cmpx, Type cmpy, Type cmpz, Type cmpw, Type &adj_cmpx, Type &adj_cmpy, Type &adj_cmpz, Type &adj_cmpw, const vec_t<4, Type>& adj_ret) { adj_cmpx += adj_ret.c[0]; adj_cmpy += adj_ret.c[1]; adj_cmpz += adj_ret.c[2]; adj_cmpw += adj_ret.c[3]; } // adjoint for the constant constructor: template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_vec_t(Type s, Type& adj_s, const vec_t<Length, Type>& adj_ret) { for( unsigned i=0; i < Length; ++i ) { adj_s += adj_ret[i]; } } // adjoint for the casting constructor template<unsigned Length, typename Type, typename OtherType> inline CUDA_CALLABLE void adj_vec_t(const vec_t<Length, OtherType>& other, vec_t<Length, OtherType>& adj_other, const vec_t<Length, Type>& adj_ret) { for( unsigned i=0; i < Length; ++i ) { adj_other[i] += static_cast<OtherType>(adj_ret[i]); } } template<typename Type> CUDA_CALLABLE inline void adj_vec_t(const vec_t<3,Type>& w, const vec_t<3,Type>& v, vec_t<3,Type>& adj_w, vec_t<3,Type>& adj_v, const vec_t<6,Type>& adj_ret) { adj_w[0] += adj_ret[0]; adj_w[1] += adj_ret[1]; adj_w[2] += adj_ret[2]; adj_v[0] += adj_ret[3]; adj_v[1] += adj_ret[4]; adj_v[2] += adj_ret[5]; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_mul(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret) { for( unsigned i=0; i < Length; ++i ) { adj_a[i] += s*adj_ret[i]; } adj_s += dot(a, adj_ret); #if FP_CHECK if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret)) { // \TODO: How shall we implement this error message? //printf("adj_mul((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w); assert(0); } #endif } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_mul(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret) { adj_mul(a, s, adj_a, adj_s, adj_ret); } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_cw_mul(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret) { adj_a += cw_mul(b, adj_ret); adj_b += cw_mul(a, adj_ret); } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_div(vec_t<Length, Type> a, Type s, vec_t<Length, Type>& adj_a, Type& adj_s, const vec_t<Length, Type>& adj_ret) { adj_s -= dot(a , adj_ret)/ (s * s); // - a / s^2 for( unsigned i=0; i < Length; ++i ) { adj_a[i] += adj_ret[i] / s; } #if FP_CHECK if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret)) { // \TODO: How shall we implement this error message? // printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w); assert(0); } #endif } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_div(Type s, vec_t<Length, Type> a, Type& adj_s, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret) { adj_s -= dot(a , adj_ret)/ (s * s); // - a / s^2 for( unsigned i=0; i < Length; ++i ) { adj_a[i] += s / adj_ret[i]; } #if FP_CHECK if (!isfinite(a) || !isfinite(s) || !isfinite(adj_a) || !isfinite(adj_s) || !isfinite(adj_ret)) { // \TODO: How shall we implement this error message? // printf("adj_div((%f %f %f %f), %f, (%f %f %f %f), %f, (%f %f %f %f)\n", a.x, a.y, a.z, a.w, s, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_s, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w); assert(0); } #endif } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_cw_div(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret) { adj_a += cw_div(adj_ret, b); adj_b -= cw_mul(adj_ret, cw_div(ret, b)); } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_add(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret) { adj_a += adj_ret; adj_b += adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_add(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret) { adj_a.c[0] += adj_ret.c[0]; adj_a.c[1] += adj_ret.c[1]; adj_b.c[0] += adj_ret.c[0]; adj_b.c[1] += adj_ret.c[1]; } template<typename Type> inline CUDA_CALLABLE void adj_add(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret) { adj_a.c[0] += adj_ret.c[0]; adj_a.c[1] += adj_ret.c[1]; adj_a.c[2] += adj_ret.c[2]; adj_b.c[0] += adj_ret.c[0]; adj_b.c[1] += adj_ret.c[1]; adj_b.c[2] += adj_ret.c[2]; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_sub(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const vec_t<Length, Type>& adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_sub(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const vec_t<2, Type>& adj_ret) { adj_a.c[0] += adj_ret.c[0]; adj_a.c[1] += adj_ret.c[1]; adj_b.c[0] -= adj_ret.c[0]; adj_b.c[1] -= adj_ret.c[1]; } template<typename Type> inline CUDA_CALLABLE void adj_sub(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const vec_t<3, Type>& adj_ret) { adj_a.c[0] += adj_ret.c[0]; adj_a.c[1] += adj_ret.c[1]; adj_a.c[2] += adj_ret.c[2]; adj_b.c[0] -= adj_ret.c[0]; adj_b.c[1] -= adj_ret.c[1]; adj_b.c[2] -= adj_ret.c[2]; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_dot(vec_t<Length, Type> a, vec_t<Length, Type> b, vec_t<Length, Type>& adj_a, vec_t<Length, Type>& adj_b, const Type adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; #if FP_CHECK if (!isfinite(a) || !isfinite(b) || !isfinite(adj_a) || !isfinite(adj_b) || !isfinite(adj_ret)) { // \TODO: How shall we implement this error message? //printf("adj_dot((%f %f %f %f), (%f %f %f %f), (%f %f %f %f), (%f %f %f %f), %f)\n", a.x, a.y, a.z, a.w, b.x, b.y, b.z, b.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_b.x, adj_b.y, adj_b.z, adj_b.w, adj_ret); assert(0); } #endif } template<typename Type> inline CUDA_CALLABLE void adj_dot(vec_t<2, Type> a, vec_t<2, Type> b, vec_t<2, Type>& adj_a, vec_t<2, Type>& adj_b, const Type adj_ret) { adj_a.c[0] += b.c[0]*adj_ret; adj_a.c[1] += b.c[1]*adj_ret; adj_b.c[0] += a.c[0]*adj_ret; adj_b.c[1] += a.c[1]*adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_dot(vec_t<3, Type> a, vec_t<3, Type> b, vec_t<3, Type>& adj_a, vec_t<3, Type>& adj_b, const Type adj_ret) { adj_a.c[0] += b.c[0]*adj_ret; adj_a.c[1] += b.c[1]*adj_ret; adj_a.c[2] += b.c[2]*adj_ret; adj_b.c[0] += a.c[0]*adj_ret; adj_b.c[1] += a.c[1]*adj_ret; adj_b.c[2] += a.c[2]*adj_ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_extract(const vec_t<Length, Type> & a, int idx, vec_t<Length, Type> & adj_a, int & adj_idx, Type & adj_ret) { #ifndef NDEBUG if (idx < 0 || idx > Length) { printf("Tvec2<Scalar> index %d out of bounds at %s %d\n", idx, __FILE__, __LINE__); assert(0); } #endif adj_a[idx] += adj_ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_length(vec_t<Length, Type> a, Type ret, vec_t<Length, Type>& adj_a, const Type adj_ret) { if (ret > Type(kEps)) { adj_a += div(a, ret) * adj_ret; } #if FP_CHECK if (!isfinite(adj_a)) { // \TODO: How shall we implement this error message? //printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret); assert(0); } #endif } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_length_sq(vec_t<Length, Type> a, vec_t<Length, Type>& adj_a, const Type adj_ret) { adj_a += Type(2.0)*a*adj_ret; #if FP_CHECK if (!isfinite(adj_a)) { // \TODO: How shall we implement this error message? //printf("%s:%d - adj_length((%f %f %f %f), (%f %f %f %f), (%f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret); assert(0); } #endif } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_normalize(vec_t<Length, Type> a, vec_t<Length, Type>& ret, vec_t<Length, Type>& adj_a, const vec_t<Length, Type>& adj_ret) { Type d = length(a); if (d > Type(kEps)) { Type invd = Type(1.0f)/d; adj_a += (adj_ret*invd - ret*(dot(ret, adj_ret))*invd); #if FP_CHECK if (!isfinite(adj_a)) { // \TODO: How shall we implement this error message? //printf("%s:%d - adj_normalize((%f %f %f %f), (%f %f %f %f), (%f, %f, %f, %f))\n", __FILE__, __LINE__, a.x, a.y, a.z, a.w, adj_a.x, adj_a.y, adj_a.z, adj_a.w, adj_ret.x, adj_ret.y, adj_ret.z, adj_ret.w); assert(0); } #endif } } template<typename Type> inline CUDA_CALLABLE void adj_cross(vec_t<3,Type> a, vec_t<3,Type> b, vec_t<3,Type>& adj_a, vec_t<3,Type>& adj_b, const vec_t<3,Type>& adj_ret) { // todo: sign check adj_a += cross(b, adj_ret); adj_b -= cross(a, adj_ret); } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_isfinite(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret) { } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_isnan(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret) { } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_isinf(const vec_t<Length, Type> &x, vec_t<Length,Type>& adj_x, const bool &adj_ret) { } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret) { for( unsigned i=0; i < Length; ++i ) { if (a[i] < b[i]) adj_a[i] += adj_ret[i]; else adj_b[i] += adj_ret[i]; } } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &a, const vec_t<Length,Type> &b, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, const vec_t<Length,Type> &adj_ret) { for( unsigned i=0; i < Length; ++i ) { if (a[i] > b[i]) adj_a[i] += adj_ret[i]; else adj_b[i] += adj_ret[i]; } } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_min(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret) { unsigned i = argmin(v); adj_v[i] += adj_ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_max(const vec_t<Length,Type> &v, vec_t<Length,Type>& adj_v, const Type &adj_ret) { unsigned i = argmax(v); adj_v[i] += adj_ret; } // Do I need to specialize these for different lengths? template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> atomic_add(vec_t<Length, Type> * addr, vec_t<Length, Type> value) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = atomic_add(&(addr -> c[i]), value[i]); } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> atomic_min(vec_t<Length, Type> * addr, vec_t<Length, Type> value) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = atomic_min(&(addr -> c[i]), value[i]); } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE vec_t<Length, Type> atomic_max(vec_t<Length, Type> * addr, vec_t<Length, Type> value) { vec_t<Length, Type> ret; for( unsigned i=0; i < Length; ++i ) { ret[i] = atomic_max(&(addr -> c[i]), value[i]); } return ret; } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_atomic_minmax( vec_t<Length,Type> *addr, vec_t<Length,Type> *adj_addr, const vec_t<Length,Type> &value, vec_t<Length,Type> &adj_value) { for (unsigned i=0; i < Length; ++i) adj_atomic_minmax(&(addr->c[i]), &(adj_addr->c[i]), value[i], adj_value[i]); } // ok, the original implementation of this didn't take the absolute values. // I wouldn't consider this expected behavior. It looks like it's only // being used for bounding boxes at the moment, where this doesn't matter, // but you often use it for ray tracing where it does. Not sure if the // fabs() incurs a performance hit... template<unsigned Length, typename Type> CUDA_CALLABLE inline int longest_axis(const vec_t<Length, Type>& v) { Type lmax = abs(v[0]); int ret(0); for( unsigned i=1; i < Length; ++i ) { Type l = abs(v[i]); if( l > lmax ) { ret = i; lmax = l; } } return ret; } template<unsigned Length, typename Type> CUDA_CALLABLE inline vec_t<Length,Type> lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t) { return a*(Type(1)-t) + b*t; } template<unsigned Length, typename Type> CUDA_CALLABLE inline void adj_lerp(const vec_t<Length,Type>& a, const vec_t<Length,Type>& b, Type t, vec_t<Length,Type>& adj_a, vec_t<Length,Type>& adj_b, Type& adj_t, const vec_t<Length,Type>& adj_ret) { adj_a += adj_ret*(Type(1)-t); adj_b += adj_ret*t; adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret); } // for integral types we do not accumulate gradients template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int8>* buf, const vec_t<Length, int8> &value) { } template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint8>* buf, const vec_t<Length, uint8> &value) { } template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int16>* buf, const vec_t<Length, int16> &value) { } template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint16>* buf, const vec_t<Length, uint16> &value) { } template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int32>* buf, const vec_t<Length, int32> &value) { } template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint32>* buf, const vec_t<Length, uint32> &value) { } template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, int64>* buf, const vec_t<Length, int64> &value) { } template<unsigned Length> CUDA_CALLABLE inline void adj_atomic_add(vec_t<Length, uint64>* buf, const vec_t<Length, uint64> &value) { } // adjoints for some of the constructors, used in intersect.h inline CUDA_CALLABLE void adj_vec2(float x, float y, float& adj_x, float& adj_y, const vec2& adj_ret) { adj_x += adj_ret[0]; adj_y += adj_ret[1]; } inline CUDA_CALLABLE void adj_vec3(float x, float y, float z, float& adj_x, float& adj_y, float& adj_z, const vec3& adj_ret) { adj_x += adj_ret[0]; adj_y += adj_ret[1]; adj_z += adj_ret[2]; } inline CUDA_CALLABLE void adj_vec4(float x, float y, float z, float w, float& adj_x, float& adj_y, float& adj_z, float& adj_w, const vec4& adj_ret) { adj_x += adj_ret[0]; adj_y += adj_ret[1]; adj_z += adj_ret[2]; adj_w += adj_ret[3]; } inline CUDA_CALLABLE void adj_vec3(float s, float& adj_s, const vec3& adj_ret) { adj_vec_t(s, adj_s, adj_ret); } inline CUDA_CALLABLE void adj_vec4(float s, float& adj_s, const vec4& adj_ret) { adj_vec_t(s, adj_s, adj_ret); } } // namespace wp
33,223
C
27.203735
217
0.599615
NVIDIA/warp/warp/native/cuda_util.cpp
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #if WP_ENABLE_CUDA #include "cuda_util.h" #include "error.h" #if defined(_WIN32) #define WIN32_LEAN_AND_MEAN #define NOMINMAX #include <windows.h> #include <wingdi.h> // needed for OpenGL includes #elif defined(__linux__) #include <dlfcn.h> #endif #include <set> #include <stack> // the minimum CUDA version required from the driver #define WP_CUDA_DRIVER_VERSION 11030 // the minimum CUDA Toolkit version required to build Warp #define WP_CUDA_TOOLKIT_VERSION 11050 #define WP_CUDA_VERSION_MAJOR(version) (version / 1000) #define WP_CUDA_VERSION_MINOR(version) ((version % 1000) / 10) // check if the CUDA Toolkit is too old #if CUDA_VERSION < WP_CUDA_TOOLKIT_VERSION #error Building Warp requires CUDA Toolkit version 11.5 or higher #endif // Avoid including <cudaGLTypedefs.h>, which requires OpenGL headers to be installed. // We define our own GL types, based on the spec here: https://www.khronos.org/opengl/wiki/OpenGL_Type namespace wp { typedef uint32_t GLuint; } // function prototypes adapted from <cudaGLTypedefs.h> typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterBuffer_v3000)(CUgraphicsResource *pCudaResource, wp::GLuint buffer, unsigned int Flags); // function pointers to driver API entry points // these are explicitly versioned according to cudaTypedefs.h from CUDA Toolkit WP_CUDA_TOOLKIT_VERSION #if CUDA_VERSION < 12000 static PFN_cuGetProcAddress_v11030 pfn_cuGetProcAddress; #else static PFN_cuGetProcAddress_v12000 pfn_cuGetProcAddress; #endif static PFN_cuDriverGetVersion_v2020 pfn_cuDriverGetVersion; static PFN_cuGetErrorName_v6000 pfn_cuGetErrorName; static PFN_cuGetErrorString_v6000 pfn_cuGetErrorString; static PFN_cuInit_v2000 pfn_cuInit; static PFN_cuDeviceGet_v2000 pfn_cuDeviceGet; static PFN_cuDeviceGetCount_v2000 pfn_cuDeviceGetCount; static PFN_cuDeviceGetName_v2000 pfn_cuDeviceGetName; static PFN_cuDeviceGetAttribute_v2000 pfn_cuDeviceGetAttribute; static PFN_cuDeviceGetUuid_v11040 pfn_cuDeviceGetUuid; static PFN_cuDevicePrimaryCtxRetain_v7000 pfn_cuDevicePrimaryCtxRetain; static PFN_cuDevicePrimaryCtxRelease_v11000 pfn_cuDevicePrimaryCtxRelease; static PFN_cuDeviceCanAccessPeer_v4000 pfn_cuDeviceCanAccessPeer; static PFN_cuMemGetInfo_v3020 pfn_cuMemGetInfo; static PFN_cuCtxGetCurrent_v4000 pfn_cuCtxGetCurrent; static PFN_cuCtxSetCurrent_v4000 pfn_cuCtxSetCurrent; static PFN_cuCtxPushCurrent_v4000 pfn_cuCtxPushCurrent; static PFN_cuCtxPopCurrent_v4000 pfn_cuCtxPopCurrent; static PFN_cuCtxSynchronize_v2000 pfn_cuCtxSynchronize; static PFN_cuCtxGetDevice_v2000 pfn_cuCtxGetDevice; static PFN_cuCtxCreate_v3020 pfn_cuCtxCreate; static PFN_cuCtxDestroy_v4000 pfn_cuCtxDestroy; static PFN_cuCtxEnablePeerAccess_v4000 pfn_cuCtxEnablePeerAccess; static PFN_cuCtxDisablePeerAccess_v4000 pfn_cuCtxDisablePeerAccess; static PFN_cuStreamCreate_v2000 pfn_cuStreamCreate; static PFN_cuStreamDestroy_v4000 pfn_cuStreamDestroy; static PFN_cuStreamSynchronize_v2000 pfn_cuStreamSynchronize; static PFN_cuStreamWaitEvent_v3020 pfn_cuStreamWaitEvent; static PFN_cuStreamGetCtx_v9020 pfn_cuStreamGetCtx; static PFN_cuStreamGetCaptureInfo_v11030 pfn_cuStreamGetCaptureInfo; static PFN_cuStreamUpdateCaptureDependencies_v11030 pfn_cuStreamUpdateCaptureDependencies; static PFN_cuEventCreate_v2000 pfn_cuEventCreate; static PFN_cuEventDestroy_v4000 pfn_cuEventDestroy; static PFN_cuEventRecord_v2000 pfn_cuEventRecord; static PFN_cuEventRecordWithFlags_v11010 pfn_cuEventRecordWithFlags; static PFN_cuEventSynchronize_v2000 pfn_cuEventSynchronize; static PFN_cuModuleLoadDataEx_v2010 pfn_cuModuleLoadDataEx; static PFN_cuModuleUnload_v2000 pfn_cuModuleUnload; static PFN_cuModuleGetFunction_v2000 pfn_cuModuleGetFunction; static PFN_cuLaunchKernel_v4000 pfn_cuLaunchKernel; static PFN_cuMemcpyPeerAsync_v4000 pfn_cuMemcpyPeerAsync; static PFN_cuPointerGetAttribute_v4000 pfn_cuPointerGetAttribute; static PFN_cuGraphicsMapResources_v3000 pfn_cuGraphicsMapResources; static PFN_cuGraphicsUnmapResources_v3000 pfn_cuGraphicsUnmapResources; static PFN_cuGraphicsResourceGetMappedPointer_v3020 pfn_cuGraphicsResourceGetMappedPointer; static PFN_cuGraphicsGLRegisterBuffer_v3000 pfn_cuGraphicsGLRegisterBuffer; static PFN_cuGraphicsUnregisterResource_v3000 pfn_cuGraphicsUnregisterResource; static bool cuda_driver_initialized = false; bool ContextGuard::always_restore = false; CudaTimingState* g_cuda_timing_state = NULL; static bool get_driver_entry_point(const char* name, void** pfn) { if (!pfn_cuGetProcAddress || !name || !pfn) return false; #if CUDA_VERSION < 12000 CUresult r = pfn_cuGetProcAddress(name, pfn, WP_CUDA_DRIVER_VERSION, CU_GET_PROC_ADDRESS_DEFAULT); #else CUresult r = pfn_cuGetProcAddress(name, pfn, WP_CUDA_DRIVER_VERSION, CU_GET_PROC_ADDRESS_DEFAULT, NULL); #endif if (r != CUDA_SUCCESS) { fprintf(stderr, "Warp CUDA error: Failed to get driver entry point '%s' (CUDA error %u)\n", name, unsigned(r)); return false; } return true; } bool init_cuda_driver() { #if defined(_WIN32) static HMODULE hCudaDriver = LoadLibraryA("nvcuda.dll"); if (hCudaDriver == NULL) { fprintf(stderr, "Warp CUDA error: Could not open nvcuda.dll.\n"); return false; } pfn_cuGetProcAddress = (PFN_cuGetProcAddress)GetProcAddress(hCudaDriver, "cuGetProcAddress"); #elif defined(__linux__) static void* hCudaDriver = dlopen("libcuda.so", RTLD_NOW); if (hCudaDriver == NULL) { // WSL and possibly other systems might require the .1 suffix hCudaDriver = dlopen("libcuda.so.1", RTLD_NOW); if (hCudaDriver == NULL) { fprintf(stderr, "Warp CUDA error: Could not open libcuda.so.\n"); return false; } } pfn_cuGetProcAddress = (PFN_cuGetProcAddress)dlsym(hCudaDriver, "cuGetProcAddress"); #endif if (!pfn_cuGetProcAddress) { fprintf(stderr, "Warp CUDA error: Failed to get function cuGetProcAddress\n"); return false; } // check the CUDA driver version and report an error if it's too low int driver_version = 0; if (get_driver_entry_point("cuDriverGetVersion", &(void*&)pfn_cuDriverGetVersion) && check_cu(pfn_cuDriverGetVersion(&driver_version))) { if (driver_version < WP_CUDA_DRIVER_VERSION) { fprintf(stderr, "Warp CUDA error: Warp requires CUDA driver %d.%d or higher, but the current driver only supports CUDA %d.%d\n", WP_CUDA_VERSION_MAJOR(WP_CUDA_DRIVER_VERSION), WP_CUDA_VERSION_MINOR(WP_CUDA_DRIVER_VERSION), WP_CUDA_VERSION_MAJOR(driver_version), WP_CUDA_VERSION_MINOR(driver_version)); return false; } } else { fprintf(stderr, "Warp CUDA warning: Unable to determine CUDA driver version\n"); } // initialize driver entry points get_driver_entry_point("cuGetErrorString", &(void*&)pfn_cuGetErrorString); get_driver_entry_point("cuGetErrorName", &(void*&)pfn_cuGetErrorName); get_driver_entry_point("cuInit", &(void*&)pfn_cuInit); get_driver_entry_point("cuDeviceGet", &(void*&)pfn_cuDeviceGet); get_driver_entry_point("cuDeviceGetCount", &(void*&)pfn_cuDeviceGetCount); get_driver_entry_point("cuDeviceGetName", &(void*&)pfn_cuDeviceGetName); get_driver_entry_point("cuDeviceGetAttribute", &(void*&)pfn_cuDeviceGetAttribute); get_driver_entry_point("cuDeviceGetUuid", &(void*&)pfn_cuDeviceGetUuid); get_driver_entry_point("cuDevicePrimaryCtxRetain", &(void*&)pfn_cuDevicePrimaryCtxRetain); get_driver_entry_point("cuDevicePrimaryCtxRelease", &(void*&)pfn_cuDevicePrimaryCtxRelease); get_driver_entry_point("cuDeviceCanAccessPeer", &(void*&)pfn_cuDeviceCanAccessPeer); get_driver_entry_point("cuMemGetInfo", &(void*&)pfn_cuMemGetInfo); get_driver_entry_point("cuCtxSetCurrent", &(void*&)pfn_cuCtxSetCurrent); get_driver_entry_point("cuCtxGetCurrent", &(void*&)pfn_cuCtxGetCurrent); get_driver_entry_point("cuCtxPushCurrent", &(void*&)pfn_cuCtxPushCurrent); get_driver_entry_point("cuCtxPopCurrent", &(void*&)pfn_cuCtxPopCurrent); get_driver_entry_point("cuCtxSynchronize", &(void*&)pfn_cuCtxSynchronize); get_driver_entry_point("cuCtxGetDevice", &(void*&)pfn_cuCtxGetDevice); get_driver_entry_point("cuCtxCreate", &(void*&)pfn_cuCtxCreate); get_driver_entry_point("cuCtxDestroy", &(void*&)pfn_cuCtxDestroy); get_driver_entry_point("cuCtxEnablePeerAccess", &(void*&)pfn_cuCtxEnablePeerAccess); get_driver_entry_point("cuCtxDisablePeerAccess", &(void*&)pfn_cuCtxDisablePeerAccess); get_driver_entry_point("cuStreamCreate", &(void*&)pfn_cuStreamCreate); get_driver_entry_point("cuStreamDestroy", &(void*&)pfn_cuStreamDestroy); get_driver_entry_point("cuStreamSynchronize", &(void*&)pfn_cuStreamSynchronize); get_driver_entry_point("cuStreamWaitEvent", &(void*&)pfn_cuStreamWaitEvent); get_driver_entry_point("cuStreamGetCtx", &(void*&)pfn_cuStreamGetCtx); get_driver_entry_point("cuStreamGetCaptureInfo", &(void*&)pfn_cuStreamGetCaptureInfo); get_driver_entry_point("cuStreamUpdateCaptureDependencies", &(void*&)pfn_cuStreamUpdateCaptureDependencies); get_driver_entry_point("cuEventCreate", &(void*&)pfn_cuEventCreate); get_driver_entry_point("cuEventDestroy", &(void*&)pfn_cuEventDestroy); get_driver_entry_point("cuEventRecord", &(void*&)pfn_cuEventRecord); get_driver_entry_point("cuEventRecordWithFlags", &(void*&)pfn_cuEventRecordWithFlags); get_driver_entry_point("cuEventSynchronize", &(void*&)pfn_cuEventSynchronize); get_driver_entry_point("cuModuleLoadDataEx", &(void*&)pfn_cuModuleLoadDataEx); get_driver_entry_point("cuModuleUnload", &(void*&)pfn_cuModuleUnload); get_driver_entry_point("cuModuleGetFunction", &(void*&)pfn_cuModuleGetFunction); get_driver_entry_point("cuLaunchKernel", &(void*&)pfn_cuLaunchKernel); get_driver_entry_point("cuMemcpyPeerAsync", &(void*&)pfn_cuMemcpyPeerAsync); get_driver_entry_point("cuPointerGetAttribute", &(void*&)pfn_cuPointerGetAttribute); get_driver_entry_point("cuGraphicsMapResources", &(void*&)pfn_cuGraphicsMapResources); get_driver_entry_point("cuGraphicsUnmapResources", &(void*&)pfn_cuGraphicsUnmapResources); get_driver_entry_point("cuGraphicsResourceGetMappedPointer", &(void*&)pfn_cuGraphicsResourceGetMappedPointer); get_driver_entry_point("cuGraphicsGLRegisterBuffer", &(void*&)pfn_cuGraphicsGLRegisterBuffer); get_driver_entry_point("cuGraphicsUnregisterResource", &(void*&)pfn_cuGraphicsUnregisterResource); if (pfn_cuInit) cuda_driver_initialized = check_cu(pfn_cuInit(0)); return cuda_driver_initialized; } bool is_cuda_driver_initialized() { return cuda_driver_initialized; } bool check_cuda_result(cudaError_t code, const char* func, const char* file, int line) { if (code == cudaSuccess) return true; wp::set_error_string("Warp CUDA error %u: %s (in function %s, %s:%d)", unsigned(code), cudaGetErrorString(code), func, file, line); return false; } bool check_cu_result(CUresult result, const char* func, const char* file, int line) { if (result == CUDA_SUCCESS) return true; const char* errString = NULL; if (pfn_cuGetErrorString) pfn_cuGetErrorString(result, &errString); if (errString) wp::set_error_string("Warp CUDA error %u: %s (in function %s, %s:%d)", unsigned(result), errString, func, file, line); else wp::set_error_string("Warp CUDA error %u (in function %s, %s:%d)", unsigned(result), func, file, line); return false; } bool get_capture_dependencies(CUstream stream, std::vector<CUgraphNode>& dependencies_ret) { CUstreamCaptureStatus status; size_t num_dependencies = 0; const CUgraphNode* dependencies = NULL; dependencies_ret.clear(); if (check_cu(cuStreamGetCaptureInfo_f(stream, &status, NULL, NULL, &dependencies, &num_dependencies))) { if (dependencies && num_dependencies > 0) dependencies_ret.insert(dependencies_ret.begin(), dependencies, dependencies + num_dependencies); return true; } return false; } bool get_graph_leaf_nodes(cudaGraph_t graph, std::vector<cudaGraphNode_t>& leaf_nodes_ret) { if (!graph) return false; size_t node_count = 0; if (!check_cuda(cudaGraphGetNodes(graph, NULL, &node_count))) return false; std::vector<cudaGraphNode_t> nodes(node_count); if (!check_cuda(cudaGraphGetNodes(graph, nodes.data(), &node_count))) return false; leaf_nodes_ret.clear(); for (cudaGraphNode_t node : nodes) { size_t dependent_count; if (!check_cuda(cudaGraphNodeGetDependentNodes(node, NULL, &dependent_count))) return false; if (dependent_count == 0) leaf_nodes_ret.push_back(node); } return true; } #define DRIVER_ENTRY_POINT_ERROR driver_entry_point_error(__FUNCTION__) static CUresult driver_entry_point_error(const char* function) { fprintf(stderr, "Warp CUDA error: Function %s: a suitable driver entry point was not found\n", function); return (CUresult)cudaErrorCallRequiresNewerDriver; // this matches what cudart would do } CUresult cuDriverGetVersion_f(int* version) { return pfn_cuDriverGetVersion ? pfn_cuDriverGetVersion(version) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuGetErrorName_f(CUresult result, const char** pstr) { return pfn_cuGetErrorName ? pfn_cuGetErrorName(result, pstr) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuGetErrorString_f(CUresult result, const char** pstr) { return pfn_cuGetErrorString ? pfn_cuGetErrorString(result, pstr) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuInit_f(unsigned int flags) { return pfn_cuInit ? pfn_cuInit(flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuDeviceGet_f(CUdevice *dev, int ordinal) { return pfn_cuDeviceGet ? pfn_cuDeviceGet(dev, ordinal) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuDeviceGetCount_f(int* count) { if (pfn_cuDeviceGetCount) return pfn_cuDeviceGetCount(count); // allow calling this function even if CUDA is not available if (count) *count = 0; return CUDA_SUCCESS; } CUresult cuDeviceGetName_f(char* name, int len, CUdevice dev) { return pfn_cuDeviceGetName ? pfn_cuDeviceGetName(name, len, dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuDeviceGetAttribute_f(int* value, CUdevice_attribute attrib, CUdevice dev) { return pfn_cuDeviceGetAttribute ? pfn_cuDeviceGetAttribute(value, attrib, dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuDeviceGetUuid_f(CUuuid* uuid, CUdevice dev) { return pfn_cuDeviceGetUuid ? pfn_cuDeviceGetUuid(uuid, dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuDevicePrimaryCtxRetain_f(CUcontext* ctx, CUdevice dev) { return pfn_cuDevicePrimaryCtxRetain ? pfn_cuDevicePrimaryCtxRetain(ctx, dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuDevicePrimaryCtxRelease_f(CUdevice dev) { return pfn_cuDevicePrimaryCtxRelease ? pfn_cuDevicePrimaryCtxRelease(dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuDeviceCanAccessPeer_f(int* can_access, CUdevice dev, CUdevice peer_dev) { return pfn_cuDeviceCanAccessPeer ? pfn_cuDeviceCanAccessPeer(can_access, dev, peer_dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuMemGetInfo_f(size_t* free, size_t* total) { return pfn_cuMemGetInfo ? pfn_cuMemGetInfo(free, total) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxGetCurrent_f(CUcontext* ctx) { return pfn_cuCtxGetCurrent ? pfn_cuCtxGetCurrent(ctx) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxSetCurrent_f(CUcontext ctx) { return pfn_cuCtxSetCurrent ? pfn_cuCtxSetCurrent(ctx) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxPushCurrent_f(CUcontext ctx) { return pfn_cuCtxPushCurrent ? pfn_cuCtxPushCurrent(ctx) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxPopCurrent_f(CUcontext* ctx) { return pfn_cuCtxPopCurrent ? pfn_cuCtxPopCurrent(ctx) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxSynchronize_f() { return pfn_cuCtxSynchronize ? pfn_cuCtxSynchronize() : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxGetDevice_f(CUdevice* dev) { return pfn_cuCtxGetDevice ? pfn_cuCtxGetDevice(dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxCreate_f(CUcontext* ctx, unsigned int flags, CUdevice dev) { return pfn_cuCtxCreate ? pfn_cuCtxCreate(ctx, flags, dev) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxDestroy_f(CUcontext ctx) { return pfn_cuCtxDestroy ? pfn_cuCtxDestroy(ctx) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxEnablePeerAccess_f(CUcontext peer_ctx, unsigned int flags) { return pfn_cuCtxEnablePeerAccess ? pfn_cuCtxEnablePeerAccess(peer_ctx, flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuCtxDisablePeerAccess_f(CUcontext peer_ctx) { return pfn_cuCtxDisablePeerAccess ? pfn_cuCtxDisablePeerAccess(peer_ctx) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuStreamCreate_f(CUstream* stream, unsigned int flags) { return pfn_cuStreamCreate ? pfn_cuStreamCreate(stream, flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuStreamDestroy_f(CUstream stream) { return pfn_cuStreamDestroy ? pfn_cuStreamDestroy(stream) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuStreamSynchronize_f(CUstream stream) { return pfn_cuStreamSynchronize ? pfn_cuStreamSynchronize(stream) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuStreamWaitEvent_f(CUstream stream, CUevent event, unsigned int flags) { return pfn_cuStreamWaitEvent ? pfn_cuStreamWaitEvent(stream, event, flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuStreamGetCtx_f(CUstream stream, CUcontext* pctx) { return pfn_cuStreamGetCtx ? pfn_cuStreamGetCtx(stream, pctx) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuStreamGetCaptureInfo_f(CUstream stream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out) { return pfn_cuStreamGetCaptureInfo ? pfn_cuStreamGetCaptureInfo(stream, captureStatus_out, id_out, graph_out, dependencies_out, numDependencies_out) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuStreamUpdateCaptureDependencies_f(CUstream stream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags) { return pfn_cuStreamUpdateCaptureDependencies ? pfn_cuStreamUpdateCaptureDependencies(stream, dependencies, numDependencies, flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuEventCreate_f(CUevent* event, unsigned int flags) { return pfn_cuEventCreate ? pfn_cuEventCreate(event, flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuEventDestroy_f(CUevent event) { return pfn_cuEventDestroy ? pfn_cuEventDestroy(event) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuEventRecord_f(CUevent event, CUstream stream) { return pfn_cuEventRecord ? pfn_cuEventRecord(event, stream) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuEventRecordWithFlags_f(CUevent event, CUstream stream, unsigned int flags) { return pfn_cuEventRecordWithFlags ? pfn_cuEventRecordWithFlags(event, stream, flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuEventSynchronize_f(CUevent event) { return pfn_cuEventSynchronize ? pfn_cuEventSynchronize(event) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuModuleLoadDataEx_f(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues) { return pfn_cuModuleLoadDataEx ? pfn_cuModuleLoadDataEx(module, image, numOptions, options, optionValues) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuModuleUnload_f(CUmodule hmod) { return pfn_cuModuleUnload ? pfn_cuModuleUnload(hmod) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuModuleGetFunction_f(CUfunction *hfunc, CUmodule hmod, const char *name) { return pfn_cuModuleGetFunction ? pfn_cuModuleGetFunction(hfunc, hmod, name) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuLaunchKernel_f(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra) { return pfn_cuLaunchKernel ? pfn_cuLaunchKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, hStream, kernelParams, extra) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuMemcpyPeerAsync_f(CUdeviceptr dst_ptr, CUcontext dst_ctx, CUdeviceptr src_ptr, CUcontext src_ctx, size_t n, CUstream stream) { return pfn_cuMemcpyPeerAsync ? pfn_cuMemcpyPeerAsync(dst_ptr, dst_ctx, src_ptr, src_ctx, n, stream) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuPointerGetAttribute_f(void* data, CUpointer_attribute attribute, CUdeviceptr ptr) { return pfn_cuPointerGetAttribute ? pfn_cuPointerGetAttribute(data, attribute, ptr) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuGraphicsMapResources_f(unsigned int count, CUgraphicsResource* resources, CUstream stream) { return pfn_cuGraphicsMapResources ? pfn_cuGraphicsMapResources(count, resources, stream) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuGraphicsUnmapResources_f(unsigned int count, CUgraphicsResource* resources, CUstream hStream) { return pfn_cuGraphicsUnmapResources ? pfn_cuGraphicsUnmapResources(count, resources, hStream) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuGraphicsResourceGetMappedPointer_f(CUdeviceptr* pDevPtr, size_t* pSize, CUgraphicsResource resource) { return pfn_cuGraphicsResourceGetMappedPointer ? pfn_cuGraphicsResourceGetMappedPointer(pDevPtr, pSize, resource) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuGraphicsGLRegisterBuffer_f(CUgraphicsResource *pCudaResource, unsigned int buffer, unsigned int flags) { return pfn_cuGraphicsGLRegisterBuffer ? pfn_cuGraphicsGLRegisterBuffer(pCudaResource, (wp::GLuint) buffer, flags) : DRIVER_ENTRY_POINT_ERROR; } CUresult cuGraphicsUnregisterResource_f(CUgraphicsResource resource) { return pfn_cuGraphicsUnregisterResource ? pfn_cuGraphicsUnregisterResource(resource) : DRIVER_ENTRY_POINT_ERROR; } #endif // WP_ENABLE_CUDA
22,272
C++
39.496364
262
0.757947
NVIDIA/warp/warp/native/error.h
/** Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once namespace wp { // functions related to error reporting // get error string from Python const char* get_error_string(); // set error message for Python // these functions also print the error message if error output is enabled void set_error_string(const char* fmt, ...); void append_error_string(const char* fmt, ...); // allow disabling printing errors, which is handy during tests that expect failure void set_error_output_enabled(bool enable); bool is_error_output_enabled(); }
934
C
32.392856
83
0.770878
NVIDIA/warp/warp/native/exports.h
namespace wp { extern "C" { WP_API void builtin_min_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2h_vec2h(vec2h& x, vec2h& y, vec2h* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3h_vec3h(vec3h& x, vec3h& y, vec3h* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4h_vec4h(vec4h& x, vec4h& y, vec4h* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2f_vec2f(vec2f& x, vec2f& y, vec2f* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3f_vec3f(vec3f& x, vec3f& y, vec3f* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4f_vec4f(vec4f& x, vec4f& y, vec4f* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2d_vec2d(vec2d& x, vec2d& y, vec2d* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3d_vec3d(vec3d& x, vec3d& y, vec3d* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4d_vec4d(vec4d& x, vec4d& y, vec4d* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2s_vec2s(vec2s& x, vec2s& y, vec2s* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3s_vec3s(vec3s& x, vec3s& y, vec3s* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4s_vec4s(vec4s& x, vec4s& y, vec4s* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2i_vec2i(vec2i& x, vec2i& y, vec2i* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3i_vec3i(vec3i& x, vec3i& y, vec3i* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4i_vec4i(vec4i& x, vec4i& y, vec4i* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2l_vec2l(vec2l& x, vec2l& y, vec2l* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3l_vec3l(vec3l& x, vec3l& y, vec3l* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4l_vec4l(vec4l& x, vec4l& y, vec4l* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2b_vec2b(vec2b& x, vec2b& y, vec2b* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3b_vec3b(vec3b& x, vec3b& y, vec3b* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4b_vec4b(vec4b& x, vec4b& y, vec4b* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2us_vec2us(vec2us& x, vec2us& y, vec2us* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3us_vec3us(vec3us& x, vec3us& y, vec3us* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4us_vec4us(vec4us& x, vec4us& y, vec4us* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2ui_vec2ui(vec2ui& x, vec2ui& y, vec2ui* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3ui_vec3ui(vec3ui& x, vec3ui& y, vec3ui* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4ui_vec4ui(vec4ui& x, vec4ui& y, vec4ui* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2ul_vec2ul(vec2ul& x, vec2ul& y, vec2ul* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3ul_vec3ul(vec3ul& x, vec3ul& y, vec3ul* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4ul_vec4ul(vec4ul& x, vec4ul& y, vec4ul* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2ub_vec2ub(vec2ub& x, vec2ub& y, vec2ub* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec3ub_vec3ub(vec3ub& x, vec3ub& y, vec3ub* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec4ub_vec4ub(vec4ub& x, vec4ub& y, vec4ub* ret) { *ret = wp::min(x, y); } WP_API void builtin_min_vec2h(vec2h& v, float16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3h(vec3h& v, float16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4h(vec4h& v, float16* ret) { *ret = wp::min(v); } WP_API void builtin_min_spatial_vectorh(spatial_vectorh& v, float16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2f(vec2f& v, float32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3f(vec3f& v, float32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4f(vec4f& v, float32* ret) { *ret = wp::min(v); } WP_API void builtin_min_spatial_vectorf(spatial_vectorf& v, float32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2d(vec2d& v, float64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3d(vec3d& v, float64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4d(vec4d& v, float64* ret) { *ret = wp::min(v); } WP_API void builtin_min_spatial_vectord(spatial_vectord& v, float64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2s(vec2s& v, int16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3s(vec3s& v, int16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4s(vec4s& v, int16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2i(vec2i& v, int32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3i(vec3i& v, int32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4i(vec4i& v, int32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2l(vec2l& v, int64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3l(vec3l& v, int64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4l(vec4l& v, int64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2b(vec2b& v, int8* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3b(vec3b& v, int8* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4b(vec4b& v, int8* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2us(vec2us& v, uint16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3us(vec3us& v, uint16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4us(vec4us& v, uint16* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2ui(vec2ui& v, uint32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3ui(vec3ui& v, uint32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4ui(vec4ui& v, uint32* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2ul(vec2ul& v, uint64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3ul(vec3ul& v, uint64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4ul(vec4ul& v, uint64* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec2ub(vec2ub& v, uint8* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec3ub(vec3ub& v, uint8* ret) { *ret = wp::min(v); } WP_API void builtin_min_vec4ub(vec4ub& v, uint8* ret) { *ret = wp::min(v); } WP_API void builtin_max_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2h_vec2h(vec2h& x, vec2h& y, vec2h* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3h_vec3h(vec3h& x, vec3h& y, vec3h* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4h_vec4h(vec4h& x, vec4h& y, vec4h* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2f_vec2f(vec2f& x, vec2f& y, vec2f* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3f_vec3f(vec3f& x, vec3f& y, vec3f* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4f_vec4f(vec4f& x, vec4f& y, vec4f* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2d_vec2d(vec2d& x, vec2d& y, vec2d* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3d_vec3d(vec3d& x, vec3d& y, vec3d* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4d_vec4d(vec4d& x, vec4d& y, vec4d* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2s_vec2s(vec2s& x, vec2s& y, vec2s* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3s_vec3s(vec3s& x, vec3s& y, vec3s* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4s_vec4s(vec4s& x, vec4s& y, vec4s* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2i_vec2i(vec2i& x, vec2i& y, vec2i* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3i_vec3i(vec3i& x, vec3i& y, vec3i* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4i_vec4i(vec4i& x, vec4i& y, vec4i* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2l_vec2l(vec2l& x, vec2l& y, vec2l* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3l_vec3l(vec3l& x, vec3l& y, vec3l* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4l_vec4l(vec4l& x, vec4l& y, vec4l* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2b_vec2b(vec2b& x, vec2b& y, vec2b* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3b_vec3b(vec3b& x, vec3b& y, vec3b* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4b_vec4b(vec4b& x, vec4b& y, vec4b* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2us_vec2us(vec2us& x, vec2us& y, vec2us* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3us_vec3us(vec3us& x, vec3us& y, vec3us* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4us_vec4us(vec4us& x, vec4us& y, vec4us* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2ui_vec2ui(vec2ui& x, vec2ui& y, vec2ui* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3ui_vec3ui(vec3ui& x, vec3ui& y, vec3ui* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4ui_vec4ui(vec4ui& x, vec4ui& y, vec4ui* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2ul_vec2ul(vec2ul& x, vec2ul& y, vec2ul* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3ul_vec3ul(vec3ul& x, vec3ul& y, vec3ul* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4ul_vec4ul(vec4ul& x, vec4ul& y, vec4ul* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2ub_vec2ub(vec2ub& x, vec2ub& y, vec2ub* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec3ub_vec3ub(vec3ub& x, vec3ub& y, vec3ub* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec4ub_vec4ub(vec4ub& x, vec4ub& y, vec4ub* ret) { *ret = wp::max(x, y); } WP_API void builtin_max_vec2h(vec2h& v, float16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3h(vec3h& v, float16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4h(vec4h& v, float16* ret) { *ret = wp::max(v); } WP_API void builtin_max_spatial_vectorh(spatial_vectorh& v, float16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2f(vec2f& v, float32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3f(vec3f& v, float32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4f(vec4f& v, float32* ret) { *ret = wp::max(v); } WP_API void builtin_max_spatial_vectorf(spatial_vectorf& v, float32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2d(vec2d& v, float64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3d(vec3d& v, float64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4d(vec4d& v, float64* ret) { *ret = wp::max(v); } WP_API void builtin_max_spatial_vectord(spatial_vectord& v, float64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2s(vec2s& v, int16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3s(vec3s& v, int16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4s(vec4s& v, int16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2i(vec2i& v, int32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3i(vec3i& v, int32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4i(vec4i& v, int32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2l(vec2l& v, int64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3l(vec3l& v, int64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4l(vec4l& v, int64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2b(vec2b& v, int8* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3b(vec3b& v, int8* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4b(vec4b& v, int8* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2us(vec2us& v, uint16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3us(vec3us& v, uint16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4us(vec4us& v, uint16* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2ui(vec2ui& v, uint32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3ui(vec3ui& v, uint32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4ui(vec4ui& v, uint32* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2ul(vec2ul& v, uint64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3ul(vec3ul& v, uint64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4ul(vec4ul& v, uint64* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec2ub(vec2ub& v, uint8* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec3ub(vec3ub& v, uint8* ret) { *ret = wp::max(v); } WP_API void builtin_max_vec4ub(vec4ub& v, uint8* ret) { *ret = wp::max(v); } WP_API void builtin_clamp_float16_float16_float16(float16 x, float16 a, float16 b, float16* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_float32_float32_float32(float32 x, float32 a, float32 b, float32* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_float64_float64_float64(float64 x, float64 a, float64 b, float64* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_int16_int16_int16(int16 x, int16 a, int16 b, int16* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_int32_int32_int32(int32 x, int32 a, int32 b, int32* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_int64_int64_int64(int64 x, int64 a, int64 b, int64* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_int8_int8_int8(int8 x, int8 a, int8 b, int8* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_uint16_uint16_uint16(uint16 x, uint16 a, uint16 b, uint16* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_uint32_uint32_uint32(uint32 x, uint32 a, uint32 b, uint32* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_uint64_uint64_uint64(uint64 x, uint64 a, uint64 b, uint64* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_clamp_uint8_uint8_uint8(uint8 x, uint8 a, uint8 b, uint8* ret) { *ret = wp::clamp(x, a, b); } WP_API void builtin_abs_float16(float16 x, float16* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_float32(float32 x, float32* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_float64(float64 x, float64* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_int16(int16 x, int16* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_int32(int32 x, int32* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_int64(int64 x, int64* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_int8(int8 x, int8* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_uint16(uint16 x, uint16* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_uint32(uint32 x, uint32* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_uint64(uint64 x, uint64* ret) { *ret = wp::abs(x); } WP_API void builtin_abs_uint8(uint8 x, uint8* ret) { *ret = wp::abs(x); } WP_API void builtin_sign_float16(float16 x, float16* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_float32(float32 x, float32* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_float64(float64 x, float64* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_int16(int16 x, int16* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_int32(int32 x, int32* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_int64(int64 x, int64* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_int8(int8 x, int8* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_uint16(uint16 x, uint16* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_uint32(uint32 x, uint32* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_uint64(uint64 x, uint64* ret) { *ret = wp::sign(x); } WP_API void builtin_sign_uint8(uint8 x, uint8* ret) { *ret = wp::sign(x); } WP_API void builtin_step_float16(float16 x, float16* ret) { *ret = wp::step(x); } WP_API void builtin_step_float32(float32 x, float32* ret) { *ret = wp::step(x); } WP_API void builtin_step_float64(float64 x, float64* ret) { *ret = wp::step(x); } WP_API void builtin_step_int16(int16 x, int16* ret) { *ret = wp::step(x); } WP_API void builtin_step_int32(int32 x, int32* ret) { *ret = wp::step(x); } WP_API void builtin_step_int64(int64 x, int64* ret) { *ret = wp::step(x); } WP_API void builtin_step_int8(int8 x, int8* ret) { *ret = wp::step(x); } WP_API void builtin_step_uint16(uint16 x, uint16* ret) { *ret = wp::step(x); } WP_API void builtin_step_uint32(uint32 x, uint32* ret) { *ret = wp::step(x); } WP_API void builtin_step_uint64(uint64 x, uint64* ret) { *ret = wp::step(x); } WP_API void builtin_step_uint8(uint8 x, uint8* ret) { *ret = wp::step(x); } WP_API void builtin_nonzero_float16(float16 x, float16* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_float32(float32 x, float32* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_float64(float64 x, float64* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_int16(int16 x, int16* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_int32(int32 x, int32* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_int64(int64 x, int64* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_int8(int8 x, int8* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_uint16(uint16 x, uint16* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_uint32(uint32 x, uint32* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_uint64(uint64 x, uint64* ret) { *ret = wp::nonzero(x); } WP_API void builtin_nonzero_uint8(uint8 x, uint8* ret) { *ret = wp::nonzero(x); } WP_API void builtin_sin_float16(float16 x, float16* ret) { *ret = wp::sin(x); } WP_API void builtin_sin_float32(float32 x, float32* ret) { *ret = wp::sin(x); } WP_API void builtin_sin_float64(float64 x, float64* ret) { *ret = wp::sin(x); } WP_API void builtin_cos_float16(float16 x, float16* ret) { *ret = wp::cos(x); } WP_API void builtin_cos_float32(float32 x, float32* ret) { *ret = wp::cos(x); } WP_API void builtin_cos_float64(float64 x, float64* ret) { *ret = wp::cos(x); } WP_API void builtin_acos_float16(float16 x, float16* ret) { *ret = wp::acos(x); } WP_API void builtin_acos_float32(float32 x, float32* ret) { *ret = wp::acos(x); } WP_API void builtin_acos_float64(float64 x, float64* ret) { *ret = wp::acos(x); } WP_API void builtin_asin_float16(float16 x, float16* ret) { *ret = wp::asin(x); } WP_API void builtin_asin_float32(float32 x, float32* ret) { *ret = wp::asin(x); } WP_API void builtin_asin_float64(float64 x, float64* ret) { *ret = wp::asin(x); } WP_API void builtin_sqrt_float16(float16 x, float16* ret) { *ret = wp::sqrt(x); } WP_API void builtin_sqrt_float32(float32 x, float32* ret) { *ret = wp::sqrt(x); } WP_API void builtin_sqrt_float64(float64 x, float64* ret) { *ret = wp::sqrt(x); } WP_API void builtin_cbrt_float16(float16 x, float16* ret) { *ret = wp::cbrt(x); } WP_API void builtin_cbrt_float32(float32 x, float32* ret) { *ret = wp::cbrt(x); } WP_API void builtin_cbrt_float64(float64 x, float64* ret) { *ret = wp::cbrt(x); } WP_API void builtin_tan_float16(float16 x, float16* ret) { *ret = wp::tan(x); } WP_API void builtin_tan_float32(float32 x, float32* ret) { *ret = wp::tan(x); } WP_API void builtin_tan_float64(float64 x, float64* ret) { *ret = wp::tan(x); } WP_API void builtin_atan_float16(float16 x, float16* ret) { *ret = wp::atan(x); } WP_API void builtin_atan_float32(float32 x, float32* ret) { *ret = wp::atan(x); } WP_API void builtin_atan_float64(float64 x, float64* ret) { *ret = wp::atan(x); } WP_API void builtin_atan2_float16_float16(float16 y, float16 x, float16* ret) { *ret = wp::atan2(y, x); } WP_API void builtin_atan2_float32_float32(float32 y, float32 x, float32* ret) { *ret = wp::atan2(y, x); } WP_API void builtin_atan2_float64_float64(float64 y, float64 x, float64* ret) { *ret = wp::atan2(y, x); } WP_API void builtin_sinh_float16(float16 x, float16* ret) { *ret = wp::sinh(x); } WP_API void builtin_sinh_float32(float32 x, float32* ret) { *ret = wp::sinh(x); } WP_API void builtin_sinh_float64(float64 x, float64* ret) { *ret = wp::sinh(x); } WP_API void builtin_cosh_float16(float16 x, float16* ret) { *ret = wp::cosh(x); } WP_API void builtin_cosh_float32(float32 x, float32* ret) { *ret = wp::cosh(x); } WP_API void builtin_cosh_float64(float64 x, float64* ret) { *ret = wp::cosh(x); } WP_API void builtin_tanh_float16(float16 x, float16* ret) { *ret = wp::tanh(x); } WP_API void builtin_tanh_float32(float32 x, float32* ret) { *ret = wp::tanh(x); } WP_API void builtin_tanh_float64(float64 x, float64* ret) { *ret = wp::tanh(x); } WP_API void builtin_degrees_float16(float16 x, float16* ret) { *ret = wp::degrees(x); } WP_API void builtin_degrees_float32(float32 x, float32* ret) { *ret = wp::degrees(x); } WP_API void builtin_degrees_float64(float64 x, float64* ret) { *ret = wp::degrees(x); } WP_API void builtin_radians_float16(float16 x, float16* ret) { *ret = wp::radians(x); } WP_API void builtin_radians_float32(float32 x, float32* ret) { *ret = wp::radians(x); } WP_API void builtin_radians_float64(float64 x, float64* ret) { *ret = wp::radians(x); } WP_API void builtin_log_float16(float16 x, float16* ret) { *ret = wp::log(x); } WP_API void builtin_log_float32(float32 x, float32* ret) { *ret = wp::log(x); } WP_API void builtin_log_float64(float64 x, float64* ret) { *ret = wp::log(x); } WP_API void builtin_log2_float16(float16 x, float16* ret) { *ret = wp::log2(x); } WP_API void builtin_log2_float32(float32 x, float32* ret) { *ret = wp::log2(x); } WP_API void builtin_log2_float64(float64 x, float64* ret) { *ret = wp::log2(x); } WP_API void builtin_log10_float16(float16 x, float16* ret) { *ret = wp::log10(x); } WP_API void builtin_log10_float32(float32 x, float32* ret) { *ret = wp::log10(x); } WP_API void builtin_log10_float64(float64 x, float64* ret) { *ret = wp::log10(x); } WP_API void builtin_exp_float16(float16 x, float16* ret) { *ret = wp::exp(x); } WP_API void builtin_exp_float32(float32 x, float32* ret) { *ret = wp::exp(x); } WP_API void builtin_exp_float64(float64 x, float64* ret) { *ret = wp::exp(x); } WP_API void builtin_pow_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::pow(x, y); } WP_API void builtin_pow_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::pow(x, y); } WP_API void builtin_pow_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::pow(x, y); } WP_API void builtin_round_float16(float16 x, float16* ret) { *ret = wp::round(x); } WP_API void builtin_round_float32(float32 x, float32* ret) { *ret = wp::round(x); } WP_API void builtin_round_float64(float64 x, float64* ret) { *ret = wp::round(x); } WP_API void builtin_rint_float16(float16 x, float16* ret) { *ret = wp::rint(x); } WP_API void builtin_rint_float32(float32 x, float32* ret) { *ret = wp::rint(x); } WP_API void builtin_rint_float64(float64 x, float64* ret) { *ret = wp::rint(x); } WP_API void builtin_trunc_float16(float16 x, float16* ret) { *ret = wp::trunc(x); } WP_API void builtin_trunc_float32(float32 x, float32* ret) { *ret = wp::trunc(x); } WP_API void builtin_trunc_float64(float64 x, float64* ret) { *ret = wp::trunc(x); } WP_API void builtin_floor_float16(float16 x, float16* ret) { *ret = wp::floor(x); } WP_API void builtin_floor_float32(float32 x, float32* ret) { *ret = wp::floor(x); } WP_API void builtin_floor_float64(float64 x, float64* ret) { *ret = wp::floor(x); } WP_API void builtin_ceil_float16(float16 x, float16* ret) { *ret = wp::ceil(x); } WP_API void builtin_ceil_float32(float32 x, float32* ret) { *ret = wp::ceil(x); } WP_API void builtin_ceil_float64(float64 x, float64* ret) { *ret = wp::ceil(x); } WP_API void builtin_frac_float16(float16 x, float16* ret) { *ret = wp::frac(x); } WP_API void builtin_frac_float32(float32 x, float32* ret) { *ret = wp::frac(x); } WP_API void builtin_frac_float64(float64 x, float64* ret) { *ret = wp::frac(x); } WP_API void builtin_isfinite_float16(float16 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_float32(float32 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_float64(float64 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_int16(int16 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_int32(int32 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_int64(int64 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_int8(int8 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_uint16(uint16 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_uint32(uint32 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_uint64(uint64 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_uint8(uint8 x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2h(vec2h& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3h(vec3h& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4h(vec4h& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_spatial_vectorh(spatial_vectorh& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2f(vec2f& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3f(vec3f& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4f(vec4f& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_spatial_vectorf(spatial_vectorf& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2d(vec2d& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3d(vec3d& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4d(vec4d& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_spatial_vectord(spatial_vectord& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2s(vec2s& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3s(vec3s& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4s(vec4s& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2i(vec2i& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3i(vec3i& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4i(vec4i& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2l(vec2l& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3l(vec3l& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4l(vec4l& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2b(vec2b& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3b(vec3b& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4b(vec4b& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2us(vec2us& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3us(vec3us& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4us(vec4us& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2ui(vec2ui& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3ui(vec3ui& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4ui(vec4ui& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2ul(vec2ul& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3ul(vec3ul& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4ul(vec4ul& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec2ub(vec2ub& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec3ub(vec3ub& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_vec4ub(vec4ub& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_quath(quath& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_quatf(quatf& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_quatd(quatd& x, bool* ret) { *ret = wp::isfinite(x); } WP_API void builtin_isfinite_mat22h(mat22h& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat33h(mat33h& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat44h(mat44h& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_spatial_matrixh(spatial_matrixh& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat22f(mat22f& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat33f(mat33f& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat44f(mat44f& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_spatial_matrixf(spatial_matrixf& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat22d(mat22d& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat33d(mat33d& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_mat44d(mat44d& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isfinite_spatial_matrixd(spatial_matrixd& m, bool* ret) { *ret = wp::isfinite(m); } WP_API void builtin_isnan_float16(float16 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_float32(float32 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_float64(float64 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_int16(int16 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_int32(int32 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_int64(int64 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_int8(int8 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_uint16(uint16 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_uint32(uint32 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_uint64(uint64 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_uint8(uint8 x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2h(vec2h& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3h(vec3h& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4h(vec4h& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_spatial_vectorh(spatial_vectorh& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2f(vec2f& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3f(vec3f& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4f(vec4f& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_spatial_vectorf(spatial_vectorf& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2d(vec2d& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3d(vec3d& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4d(vec4d& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_spatial_vectord(spatial_vectord& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2s(vec2s& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3s(vec3s& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4s(vec4s& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2i(vec2i& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3i(vec3i& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4i(vec4i& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2l(vec2l& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3l(vec3l& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4l(vec4l& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2b(vec2b& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3b(vec3b& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4b(vec4b& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2us(vec2us& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3us(vec3us& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4us(vec4us& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2ui(vec2ui& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3ui(vec3ui& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4ui(vec4ui& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2ul(vec2ul& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3ul(vec3ul& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4ul(vec4ul& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec2ub(vec2ub& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec3ub(vec3ub& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_vec4ub(vec4ub& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_quath(quath& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_quatf(quatf& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_quatd(quatd& x, bool* ret) { *ret = wp::isnan(x); } WP_API void builtin_isnan_mat22h(mat22h& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat33h(mat33h& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat44h(mat44h& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_spatial_matrixh(spatial_matrixh& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat22f(mat22f& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat33f(mat33f& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat44f(mat44f& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_spatial_matrixf(spatial_matrixf& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat22d(mat22d& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat33d(mat33d& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_mat44d(mat44d& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isnan_spatial_matrixd(spatial_matrixd& m, bool* ret) { *ret = wp::isnan(m); } WP_API void builtin_isinf_float16(float16 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_float32(float32 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_float64(float64 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_int16(int16 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_int32(int32 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_int64(int64 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_int8(int8 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_uint16(uint16 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_uint32(uint32 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_uint64(uint64 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_uint8(uint8 x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2h(vec2h& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3h(vec3h& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4h(vec4h& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_spatial_vectorh(spatial_vectorh& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2f(vec2f& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3f(vec3f& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4f(vec4f& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_spatial_vectorf(spatial_vectorf& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2d(vec2d& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3d(vec3d& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4d(vec4d& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_spatial_vectord(spatial_vectord& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2s(vec2s& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3s(vec3s& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4s(vec4s& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2i(vec2i& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3i(vec3i& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4i(vec4i& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2l(vec2l& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3l(vec3l& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4l(vec4l& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2b(vec2b& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3b(vec3b& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4b(vec4b& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2us(vec2us& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3us(vec3us& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4us(vec4us& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2ui(vec2ui& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3ui(vec3ui& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4ui(vec4ui& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2ul(vec2ul& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3ul(vec3ul& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4ul(vec4ul& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec2ub(vec2ub& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec3ub(vec3ub& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_vec4ub(vec4ub& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_quath(quath& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_quatf(quatf& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_quatd(quatd& x, bool* ret) { *ret = wp::isinf(x); } WP_API void builtin_isinf_mat22h(mat22h& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat33h(mat33h& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat44h(mat44h& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_spatial_matrixh(spatial_matrixh& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat22f(mat22f& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat33f(mat33f& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat44f(mat44f& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_spatial_matrixf(spatial_matrixf& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat22d(mat22d& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat33d(mat33d& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_mat44d(mat44d& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_isinf_spatial_matrixd(spatial_matrixd& m, bool* ret) { *ret = wp::isinf(m); } WP_API void builtin_dot_vec2h_vec2h(vec2h& x, vec2h& y, float16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3h_vec3h(vec3h& x, vec3h& y, float16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4h_vec4h(vec4h& x, vec4h& y, float16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, float16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2f_vec2f(vec2f& x, vec2f& y, float32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3f_vec3f(vec3f& x, vec3f& y, float32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4f_vec4f(vec4f& x, vec4f& y, float32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, float32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2d_vec2d(vec2d& x, vec2d& y, float64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3d_vec3d(vec3d& x, vec3d& y, float64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4d_vec4d(vec4d& x, vec4d& y, float64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, float64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2s_vec2s(vec2s& x, vec2s& y, int16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3s_vec3s(vec3s& x, vec3s& y, int16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4s_vec4s(vec4s& x, vec4s& y, int16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2i_vec2i(vec2i& x, vec2i& y, int32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3i_vec3i(vec3i& x, vec3i& y, int32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4i_vec4i(vec4i& x, vec4i& y, int32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2l_vec2l(vec2l& x, vec2l& y, int64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3l_vec3l(vec3l& x, vec3l& y, int64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4l_vec4l(vec4l& x, vec4l& y, int64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2b_vec2b(vec2b& x, vec2b& y, int8* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3b_vec3b(vec3b& x, vec3b& y, int8* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4b_vec4b(vec4b& x, vec4b& y, int8* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2us_vec2us(vec2us& x, vec2us& y, uint16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3us_vec3us(vec3us& x, vec3us& y, uint16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4us_vec4us(vec4us& x, vec4us& y, uint16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2ui_vec2ui(vec2ui& x, vec2ui& y, uint32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3ui_vec3ui(vec3ui& x, vec3ui& y, uint32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4ui_vec4ui(vec4ui& x, vec4ui& y, uint32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2ul_vec2ul(vec2ul& x, vec2ul& y, uint64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3ul_vec3ul(vec3ul& x, vec3ul& y, uint64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4ul_vec4ul(vec4ul& x, vec4ul& y, uint64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec2ub_vec2ub(vec2ub& x, vec2ub& y, uint8* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec3ub_vec3ub(vec3ub& x, vec3ub& y, uint8* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_vec4ub_vec4ub(vec4ub& x, vec4ub& y, uint8* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_quath_quath(quath& x, quath& y, float16* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_quatf_quatf(quatf& x, quatf& y, float32* ret) { *ret = wp::dot(x, y); } WP_API void builtin_dot_quatd_quatd(quatd& x, quatd& y, float64* ret) { *ret = wp::dot(x, y); } WP_API void builtin_ddot_mat22h_mat22h(mat22h& x, mat22h& y, float16* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat33h_mat33h(mat33h& x, mat33h& y, float16* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat44h_mat44h(mat44h& x, mat44h& y, float16* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_spatial_matrixh_spatial_matrixh(spatial_matrixh& x, spatial_matrixh& y, float16* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat22f_mat22f(mat22f& x, mat22f& y, float32* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat33f_mat33f(mat33f& x, mat33f& y, float32* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat44f_mat44f(mat44f& x, mat44f& y, float32* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_spatial_matrixf_spatial_matrixf(spatial_matrixf& x, spatial_matrixf& y, float32* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat22d_mat22d(mat22d& x, mat22d& y, float64* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat33d_mat33d(mat33d& x, mat33d& y, float64* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_mat44d_mat44d(mat44d& x, mat44d& y, float64* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_ddot_spatial_matrixd_spatial_matrixd(spatial_matrixd& x, spatial_matrixd& y, float64* ret) { *ret = wp::ddot(x, y); } WP_API void builtin_argmin_vec2h(vec2h& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3h(vec3h& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4h(vec4h& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_spatial_vectorh(spatial_vectorh& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2f(vec2f& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3f(vec3f& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4f(vec4f& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_spatial_vectorf(spatial_vectorf& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2d(vec2d& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3d(vec3d& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4d(vec4d& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_spatial_vectord(spatial_vectord& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2s(vec2s& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3s(vec3s& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4s(vec4s& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2i(vec2i& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3i(vec3i& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4i(vec4i& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2l(vec2l& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3l(vec3l& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4l(vec4l& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2b(vec2b& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3b(vec3b& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4b(vec4b& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2us(vec2us& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3us(vec3us& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4us(vec4us& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2ui(vec2ui& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3ui(vec3ui& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4ui(vec4ui& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2ul(vec2ul& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3ul(vec3ul& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4ul(vec4ul& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec2ub(vec2ub& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec3ub(vec3ub& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmin_vec4ub(vec4ub& v, uint32* ret) { *ret = wp::argmin(v); } WP_API void builtin_argmax_vec2h(vec2h& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3h(vec3h& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4h(vec4h& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_spatial_vectorh(spatial_vectorh& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2f(vec2f& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3f(vec3f& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4f(vec4f& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_spatial_vectorf(spatial_vectorf& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2d(vec2d& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3d(vec3d& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4d(vec4d& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_spatial_vectord(spatial_vectord& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2s(vec2s& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3s(vec3s& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4s(vec4s& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2i(vec2i& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3i(vec3i& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4i(vec4i& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2l(vec2l& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3l(vec3l& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4l(vec4l& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2b(vec2b& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3b(vec3b& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4b(vec4b& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2us(vec2us& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3us(vec3us& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4us(vec4us& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2ui(vec2ui& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3ui(vec3ui& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4ui(vec4ui& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2ul(vec2ul& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3ul(vec3ul& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4ul(vec4ul& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec2ub(vec2ub& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec3ub(vec3ub& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_argmax_vec4ub(vec4ub& v, uint32* ret) { *ret = wp::argmax(v); } WP_API void builtin_outer_vec2h_vec2h(vec2h& x, vec2h& y, mat22h* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec3h_vec3h(vec3h& x, vec3h& y, mat33h* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec4h_vec4h(vec4h& x, vec4h& y, mat44h* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, spatial_matrixh* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec2f_vec2f(vec2f& x, vec2f& y, mat22f* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec3f_vec3f(vec3f& x, vec3f& y, mat33f* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec4f_vec4f(vec4f& x, vec4f& y, mat44f* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, spatial_matrixf* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec2d_vec2d(vec2d& x, vec2d& y, mat22d* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec3d_vec3d(vec3d& x, vec3d& y, mat33d* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_vec4d_vec4d(vec4d& x, vec4d& y, mat44d* ret) { *ret = wp::outer(x, y); } WP_API void builtin_outer_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, spatial_matrixd* ret) { *ret = wp::outer(x, y); } WP_API void builtin_cross_vec3h_vec3h(vec3h& x, vec3h& y, vec3h* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3f_vec3f(vec3f& x, vec3f& y, vec3f* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3d_vec3d(vec3d& x, vec3d& y, vec3d* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3s_vec3s(vec3s& x, vec3s& y, vec3s* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3i_vec3i(vec3i& x, vec3i& y, vec3i* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3l_vec3l(vec3l& x, vec3l& y, vec3l* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3b_vec3b(vec3b& x, vec3b& y, vec3b* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3us_vec3us(vec3us& x, vec3us& y, vec3us* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3ui_vec3ui(vec3ui& x, vec3ui& y, vec3ui* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3ul_vec3ul(vec3ul& x, vec3ul& y, vec3ul* ret) { *ret = wp::cross(x, y); } WP_API void builtin_cross_vec3ub_vec3ub(vec3ub& x, vec3ub& y, vec3ub* ret) { *ret = wp::cross(x, y); } WP_API void builtin_skew_vec3h(vec3h& x, mat33h* ret) { *ret = wp::skew(x); } WP_API void builtin_skew_vec3f(vec3f& x, mat33f* ret) { *ret = wp::skew(x); } WP_API void builtin_skew_vec3d(vec3d& x, mat33d* ret) { *ret = wp::skew(x); } WP_API void builtin_length_vec2h(vec2h& x, float16* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec3h(vec3h& x, float16* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec4h(vec4h& x, float16* ret) { *ret = wp::length(x); } WP_API void builtin_length_spatial_vectorh(spatial_vectorh& x, float16* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec2f(vec2f& x, float32* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec3f(vec3f& x, float32* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec4f(vec4f& x, float32* ret) { *ret = wp::length(x); } WP_API void builtin_length_spatial_vectorf(spatial_vectorf& x, float32* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec2d(vec2d& x, float64* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec3d(vec3d& x, float64* ret) { *ret = wp::length(x); } WP_API void builtin_length_vec4d(vec4d& x, float64* ret) { *ret = wp::length(x); } WP_API void builtin_length_spatial_vectord(spatial_vectord& x, float64* ret) { *ret = wp::length(x); } WP_API void builtin_length_quath(quath& x, float16* ret) { *ret = wp::length(x); } WP_API void builtin_length_quatf(quatf& x, float32* ret) { *ret = wp::length(x); } WP_API void builtin_length_quatd(quatd& x, float64* ret) { *ret = wp::length(x); } WP_API void builtin_length_sq_vec2h(vec2h& x, float16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3h(vec3h& x, float16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4h(vec4h& x, float16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_spatial_vectorh(spatial_vectorh& x, float16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2f(vec2f& x, float32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3f(vec3f& x, float32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4f(vec4f& x, float32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_spatial_vectorf(spatial_vectorf& x, float32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2d(vec2d& x, float64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3d(vec3d& x, float64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4d(vec4d& x, float64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_spatial_vectord(spatial_vectord& x, float64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2s(vec2s& x, int16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3s(vec3s& x, int16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4s(vec4s& x, int16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2i(vec2i& x, int32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3i(vec3i& x, int32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4i(vec4i& x, int32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2l(vec2l& x, int64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3l(vec3l& x, int64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4l(vec4l& x, int64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2b(vec2b& x, int8* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3b(vec3b& x, int8* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4b(vec4b& x, int8* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2us(vec2us& x, uint16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3us(vec3us& x, uint16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4us(vec4us& x, uint16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2ui(vec2ui& x, uint32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3ui(vec3ui& x, uint32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4ui(vec4ui& x, uint32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2ul(vec2ul& x, uint64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3ul(vec3ul& x, uint64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4ul(vec4ul& x, uint64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec2ub(vec2ub& x, uint8* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec3ub(vec3ub& x, uint8* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_vec4ub(vec4ub& x, uint8* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_quath(quath& x, float16* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_quatf(quatf& x, float32* ret) { *ret = wp::length_sq(x); } WP_API void builtin_length_sq_quatd(quatd& x, float64* ret) { *ret = wp::length_sq(x); } WP_API void builtin_normalize_vec2h(vec2h& x, vec2h* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec3h(vec3h& x, vec3h* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec4h(vec4h& x, vec4h* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_spatial_vectorh(spatial_vectorh& x, spatial_vectorh* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec2f(vec2f& x, vec2f* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec3f(vec3f& x, vec3f* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec4f(vec4f& x, vec4f* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_spatial_vectorf(spatial_vectorf& x, spatial_vectorf* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec2d(vec2d& x, vec2d* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec3d(vec3d& x, vec3d* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_vec4d(vec4d& x, vec4d* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_spatial_vectord(spatial_vectord& x, spatial_vectord* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_quath(quath& x, quath* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_quatf(quatf& x, quatf* ret) { *ret = wp::normalize(x); } WP_API void builtin_normalize_quatd(quatd& x, quatd* ret) { *ret = wp::normalize(x); } WP_API void builtin_transpose_mat22h(mat22h& m, mat22h* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat33h(mat33h& m, mat33h* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat44h(mat44h& m, mat44h* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_spatial_matrixh(spatial_matrixh& m, spatial_matrixh* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat22f(mat22f& m, mat22f* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat33f(mat33f& m, mat33f* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat44f(mat44f& m, mat44f* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_spatial_matrixf(spatial_matrixf& m, spatial_matrixf* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat22d(mat22d& m, mat22d* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat33d(mat33d& m, mat33d* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_mat44d(mat44d& m, mat44d* ret) { *ret = wp::transpose(m); } WP_API void builtin_transpose_spatial_matrixd(spatial_matrixd& m, spatial_matrixd* ret) { *ret = wp::transpose(m); } WP_API void builtin_inverse_mat22h(mat22h& m, mat22h* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat22f(mat22f& m, mat22f* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat22d(mat22d& m, mat22d* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat33h(mat33h& m, mat33h* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat33f(mat33f& m, mat33f* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat33d(mat33d& m, mat33d* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat44h(mat44h& m, mat44h* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat44f(mat44f& m, mat44f* ret) { *ret = wp::inverse(m); } WP_API void builtin_inverse_mat44d(mat44d& m, mat44d* ret) { *ret = wp::inverse(m); } WP_API void builtin_determinant_mat22h(mat22h& m, float16* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat22f(mat22f& m, float32* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat22d(mat22d& m, float64* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat33h(mat33h& m, float16* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat33f(mat33f& m, float32* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat33d(mat33d& m, float64* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat44h(mat44h& m, float16* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat44f(mat44f& m, float32* ret) { *ret = wp::determinant(m); } WP_API void builtin_determinant_mat44d(mat44d& m, float64* ret) { *ret = wp::determinant(m); } WP_API void builtin_trace_mat22h(mat22h& m, float16* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat33h(mat33h& m, float16* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat44h(mat44h& m, float16* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_spatial_matrixh(spatial_matrixh& m, float16* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat22f(mat22f& m, float32* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat33f(mat33f& m, float32* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat44f(mat44f& m, float32* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_spatial_matrixf(spatial_matrixf& m, float32* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat22d(mat22d& m, float64* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat33d(mat33d& m, float64* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_mat44d(mat44d& m, float64* ret) { *ret = wp::trace(m); } WP_API void builtin_trace_spatial_matrixd(spatial_matrixd& m, float64* ret) { *ret = wp::trace(m); } WP_API void builtin_diag_vec2h(vec2h& d, mat22h* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec3h(vec3h& d, mat33h* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec4h(vec4h& d, mat44h* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_spatial_vectorh(spatial_vectorh& d, spatial_matrixh* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec2f(vec2f& d, mat22f* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec3f(vec3f& d, mat33f* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec4f(vec4f& d, mat44f* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_spatial_vectorf(spatial_vectorf& d, spatial_matrixf* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec2d(vec2d& d, mat22d* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec3d(vec3d& d, mat33d* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_vec4d(vec4d& d, mat44d* ret) { *ret = wp::diag(d); } WP_API void builtin_diag_spatial_vectord(spatial_vectord& d, spatial_matrixd* ret) { *ret = wp::diag(d); } WP_API void builtin_get_diag_mat22h(mat22h& m, vec2h* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat33h(mat33h& m, vec3h* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat44h(mat44h& m, vec4h* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_spatial_matrixh(spatial_matrixh& m, spatial_vectorh* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat22f(mat22f& m, vec2f* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat33f(mat33f& m, vec3f* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat44f(mat44f& m, vec4f* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_spatial_matrixf(spatial_matrixf& m, spatial_vectorf* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat22d(mat22d& m, vec2d* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat33d(mat33d& m, vec3d* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_mat44d(mat44d& m, vec4d* ret) { *ret = wp::get_diag(m); } WP_API void builtin_get_diag_spatial_matrixd(spatial_matrixd& m, spatial_vectord* ret) { *ret = wp::get_diag(m); } WP_API void builtin_cw_mul_vec2h_vec2h(vec2h& x, vec2h& y, vec2h* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3h_vec3h(vec3h& x, vec3h& y, vec3h* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4h_vec4h(vec4h& x, vec4h& y, vec4h* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2f_vec2f(vec2f& x, vec2f& y, vec2f* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3f_vec3f(vec3f& x, vec3f& y, vec3f* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4f_vec4f(vec4f& x, vec4f& y, vec4f* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2d_vec2d(vec2d& x, vec2d& y, vec2d* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3d_vec3d(vec3d& x, vec3d& y, vec3d* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4d_vec4d(vec4d& x, vec4d& y, vec4d* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2s_vec2s(vec2s& x, vec2s& y, vec2s* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3s_vec3s(vec3s& x, vec3s& y, vec3s* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4s_vec4s(vec4s& x, vec4s& y, vec4s* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2i_vec2i(vec2i& x, vec2i& y, vec2i* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3i_vec3i(vec3i& x, vec3i& y, vec3i* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4i_vec4i(vec4i& x, vec4i& y, vec4i* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2l_vec2l(vec2l& x, vec2l& y, vec2l* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3l_vec3l(vec3l& x, vec3l& y, vec3l* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4l_vec4l(vec4l& x, vec4l& y, vec4l* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2b_vec2b(vec2b& x, vec2b& y, vec2b* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3b_vec3b(vec3b& x, vec3b& y, vec3b* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4b_vec4b(vec4b& x, vec4b& y, vec4b* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2us_vec2us(vec2us& x, vec2us& y, vec2us* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3us_vec3us(vec3us& x, vec3us& y, vec3us* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4us_vec4us(vec4us& x, vec4us& y, vec4us* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2ui_vec2ui(vec2ui& x, vec2ui& y, vec2ui* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3ui_vec3ui(vec3ui& x, vec3ui& y, vec3ui* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4ui_vec4ui(vec4ui& x, vec4ui& y, vec4ui* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2ul_vec2ul(vec2ul& x, vec2ul& y, vec2ul* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3ul_vec3ul(vec3ul& x, vec3ul& y, vec3ul* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4ul_vec4ul(vec4ul& x, vec4ul& y, vec4ul* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec2ub_vec2ub(vec2ub& x, vec2ub& y, vec2ub* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec3ub_vec3ub(vec3ub& x, vec3ub& y, vec3ub* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_vec4ub_vec4ub(vec4ub& x, vec4ub& y, vec4ub* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat22h_mat22h(mat22h& x, mat22h& y, mat22h* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat33h_mat33h(mat33h& x, mat33h& y, mat33h* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat44h_mat44h(mat44h& x, mat44h& y, mat44h* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_spatial_matrixh_spatial_matrixh(spatial_matrixh& x, spatial_matrixh& y, spatial_matrixh* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat22f_mat22f(mat22f& x, mat22f& y, mat22f* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat33f_mat33f(mat33f& x, mat33f& y, mat33f* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat44f_mat44f(mat44f& x, mat44f& y, mat44f* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_spatial_matrixf_spatial_matrixf(spatial_matrixf& x, spatial_matrixf& y, spatial_matrixf* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat22d_mat22d(mat22d& x, mat22d& y, mat22d* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat33d_mat33d(mat33d& x, mat33d& y, mat33d* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_mat44d_mat44d(mat44d& x, mat44d& y, mat44d* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_mul_spatial_matrixd_spatial_matrixd(spatial_matrixd& x, spatial_matrixd& y, spatial_matrixd* ret) { *ret = wp::cw_mul(x, y); } WP_API void builtin_cw_div_vec2h_vec2h(vec2h& x, vec2h& y, vec2h* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3h_vec3h(vec3h& x, vec3h& y, vec3h* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4h_vec4h(vec4h& x, vec4h& y, vec4h* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2f_vec2f(vec2f& x, vec2f& y, vec2f* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3f_vec3f(vec3f& x, vec3f& y, vec3f* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4f_vec4f(vec4f& x, vec4f& y, vec4f* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2d_vec2d(vec2d& x, vec2d& y, vec2d* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3d_vec3d(vec3d& x, vec3d& y, vec3d* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4d_vec4d(vec4d& x, vec4d& y, vec4d* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2s_vec2s(vec2s& x, vec2s& y, vec2s* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3s_vec3s(vec3s& x, vec3s& y, vec3s* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4s_vec4s(vec4s& x, vec4s& y, vec4s* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2i_vec2i(vec2i& x, vec2i& y, vec2i* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3i_vec3i(vec3i& x, vec3i& y, vec3i* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4i_vec4i(vec4i& x, vec4i& y, vec4i* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2l_vec2l(vec2l& x, vec2l& y, vec2l* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3l_vec3l(vec3l& x, vec3l& y, vec3l* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4l_vec4l(vec4l& x, vec4l& y, vec4l* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2b_vec2b(vec2b& x, vec2b& y, vec2b* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3b_vec3b(vec3b& x, vec3b& y, vec3b* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4b_vec4b(vec4b& x, vec4b& y, vec4b* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2us_vec2us(vec2us& x, vec2us& y, vec2us* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3us_vec3us(vec3us& x, vec3us& y, vec3us* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4us_vec4us(vec4us& x, vec4us& y, vec4us* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2ui_vec2ui(vec2ui& x, vec2ui& y, vec2ui* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3ui_vec3ui(vec3ui& x, vec3ui& y, vec3ui* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4ui_vec4ui(vec4ui& x, vec4ui& y, vec4ui* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2ul_vec2ul(vec2ul& x, vec2ul& y, vec2ul* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3ul_vec3ul(vec3ul& x, vec3ul& y, vec3ul* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4ul_vec4ul(vec4ul& x, vec4ul& y, vec4ul* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec2ub_vec2ub(vec2ub& x, vec2ub& y, vec2ub* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec3ub_vec3ub(vec3ub& x, vec3ub& y, vec3ub* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_vec4ub_vec4ub(vec4ub& x, vec4ub& y, vec4ub* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat22h_mat22h(mat22h& x, mat22h& y, mat22h* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat33h_mat33h(mat33h& x, mat33h& y, mat33h* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat44h_mat44h(mat44h& x, mat44h& y, mat44h* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_spatial_matrixh_spatial_matrixh(spatial_matrixh& x, spatial_matrixh& y, spatial_matrixh* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat22f_mat22f(mat22f& x, mat22f& y, mat22f* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat33f_mat33f(mat33f& x, mat33f& y, mat33f* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat44f_mat44f(mat44f& x, mat44f& y, mat44f* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_spatial_matrixf_spatial_matrixf(spatial_matrixf& x, spatial_matrixf& y, spatial_matrixf* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat22d_mat22d(mat22d& x, mat22d& y, mat22d* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat33d_mat33d(mat33d& x, mat33d& y, mat33d* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_mat44d_mat44d(mat44d& x, mat44d& y, mat44d* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_cw_div_spatial_matrixd_spatial_matrixd(spatial_matrixd& x, spatial_matrixd& y, spatial_matrixd* ret) { *ret = wp::cw_div(x, y); } WP_API void builtin_quat_identity(quatf* ret) { *ret = wp::quat_identity(); } WP_API void builtin_quat_from_axis_angle_vec3h_float16(vec3h& axis, float16 angle, quath* ret) { *ret = wp::quat_from_axis_angle(axis, angle); } WP_API void builtin_quat_from_axis_angle_vec3f_float32(vec3f& axis, float32 angle, quatf* ret) { *ret = wp::quat_from_axis_angle(axis, angle); } WP_API void builtin_quat_from_axis_angle_vec3d_float64(vec3d& axis, float64 angle, quatd* ret) { *ret = wp::quat_from_axis_angle(axis, angle); } WP_API void builtin_quat_from_matrix_mat33h(mat33h& m, quath* ret) { *ret = wp::quat_from_matrix(m); } WP_API void builtin_quat_from_matrix_mat33f(mat33f& m, quatf* ret) { *ret = wp::quat_from_matrix(m); } WP_API void builtin_quat_from_matrix_mat33d(mat33d& m, quatd* ret) { *ret = wp::quat_from_matrix(m); } WP_API void builtin_quat_rpy_float16_float16_float16(float16 roll, float16 pitch, float16 yaw, quath* ret) { *ret = wp::quat_rpy(roll, pitch, yaw); } WP_API void builtin_quat_rpy_float32_float32_float32(float32 roll, float32 pitch, float32 yaw, quatf* ret) { *ret = wp::quat_rpy(roll, pitch, yaw); } WP_API void builtin_quat_rpy_float64_float64_float64(float64 roll, float64 pitch, float64 yaw, quatd* ret) { *ret = wp::quat_rpy(roll, pitch, yaw); } WP_API void builtin_quat_inverse_quath(quath& q, quath* ret) { *ret = wp::quat_inverse(q); } WP_API void builtin_quat_inverse_quatf(quatf& q, quatf* ret) { *ret = wp::quat_inverse(q); } WP_API void builtin_quat_inverse_quatd(quatd& q, quatd* ret) { *ret = wp::quat_inverse(q); } WP_API void builtin_quat_rotate_quath_vec3h(quath& q, vec3h& p, vec3h* ret) { *ret = wp::quat_rotate(q, p); } WP_API void builtin_quat_rotate_quatf_vec3f(quatf& q, vec3f& p, vec3f* ret) { *ret = wp::quat_rotate(q, p); } WP_API void builtin_quat_rotate_quatd_vec3d(quatd& q, vec3d& p, vec3d* ret) { *ret = wp::quat_rotate(q, p); } WP_API void builtin_quat_rotate_inv_quath_vec3h(quath& q, vec3h& p, vec3h* ret) { *ret = wp::quat_rotate_inv(q, p); } WP_API void builtin_quat_rotate_inv_quatf_vec3f(quatf& q, vec3f& p, vec3f* ret) { *ret = wp::quat_rotate_inv(q, p); } WP_API void builtin_quat_rotate_inv_quatd_vec3d(quatd& q, vec3d& p, vec3d* ret) { *ret = wp::quat_rotate_inv(q, p); } WP_API void builtin_quat_slerp_quath_quath_float16(quath& q0, quath& q1, float16 t, quath* ret) { *ret = wp::quat_slerp(q0, q1, t); } WP_API void builtin_quat_slerp_quatf_quatf_float32(quatf& q0, quatf& q1, float32 t, quatf* ret) { *ret = wp::quat_slerp(q0, q1, t); } WP_API void builtin_quat_slerp_quatd_quatd_float64(quatd& q0, quatd& q1, float64 t, quatd* ret) { *ret = wp::quat_slerp(q0, q1, t); } WP_API void builtin_quat_to_matrix_quath(quath& q, mat33h* ret) { *ret = wp::quat_to_matrix(q); } WP_API void builtin_quat_to_matrix_quatf(quatf& q, mat33f* ret) { *ret = wp::quat_to_matrix(q); } WP_API void builtin_quat_to_matrix_quatd(quatd& q, mat33d* ret) { *ret = wp::quat_to_matrix(q); } WP_API void builtin_transform_identity(transformf* ret) { *ret = wp::transform_identity(); } WP_API void builtin_transform_get_translation_transformh(transformh& t, vec3h* ret) { *ret = wp::transform_get_translation(t); } WP_API void builtin_transform_get_translation_transformf(transformf& t, vec3f* ret) { *ret = wp::transform_get_translation(t); } WP_API void builtin_transform_get_translation_transformd(transformd& t, vec3d* ret) { *ret = wp::transform_get_translation(t); } WP_API void builtin_transform_get_rotation_transformh(transformh& t, quath* ret) { *ret = wp::transform_get_rotation(t); } WP_API void builtin_transform_get_rotation_transformf(transformf& t, quatf* ret) { *ret = wp::transform_get_rotation(t); } WP_API void builtin_transform_get_rotation_transformd(transformd& t, quatd* ret) { *ret = wp::transform_get_rotation(t); } WP_API void builtin_transform_multiply_transformh_transformh(transformh& a, transformh& b, transformh* ret) { *ret = wp::transform_multiply(a, b); } WP_API void builtin_transform_multiply_transformf_transformf(transformf& a, transformf& b, transformf* ret) { *ret = wp::transform_multiply(a, b); } WP_API void builtin_transform_multiply_transformd_transformd(transformd& a, transformd& b, transformd* ret) { *ret = wp::transform_multiply(a, b); } WP_API void builtin_transform_point_transformh_vec3h(transformh& t, vec3h& p, vec3h* ret) { *ret = wp::transform_point(t, p); } WP_API void builtin_transform_point_transformf_vec3f(transformf& t, vec3f& p, vec3f* ret) { *ret = wp::transform_point(t, p); } WP_API void builtin_transform_point_transformd_vec3d(transformd& t, vec3d& p, vec3d* ret) { *ret = wp::transform_point(t, p); } WP_API void builtin_transform_point_mat44h_vec3h(mat44h& m, vec3h& p, vec3h* ret) { *ret = wp::transform_point(m, p); } WP_API void builtin_transform_point_mat44f_vec3f(mat44f& m, vec3f& p, vec3f* ret) { *ret = wp::transform_point(m, p); } WP_API void builtin_transform_point_mat44d_vec3d(mat44d& m, vec3d& p, vec3d* ret) { *ret = wp::transform_point(m, p); } WP_API void builtin_transform_vector_transformh_vec3h(transformh& t, vec3h& v, vec3h* ret) { *ret = wp::transform_vector(t, v); } WP_API void builtin_transform_vector_transformf_vec3f(transformf& t, vec3f& v, vec3f* ret) { *ret = wp::transform_vector(t, v); } WP_API void builtin_transform_vector_transformd_vec3d(transformd& t, vec3d& v, vec3d* ret) { *ret = wp::transform_vector(t, v); } WP_API void builtin_transform_vector_mat44h_vec3h(mat44h& m, vec3h& v, vec3h* ret) { *ret = wp::transform_vector(m, v); } WP_API void builtin_transform_vector_mat44f_vec3f(mat44f& m, vec3f& v, vec3f* ret) { *ret = wp::transform_vector(m, v); } WP_API void builtin_transform_vector_mat44d_vec3d(mat44d& m, vec3d& v, vec3d* ret) { *ret = wp::transform_vector(m, v); } WP_API void builtin_transform_inverse_transformh(transformh& t, transformh* ret) { *ret = wp::transform_inverse(t); } WP_API void builtin_transform_inverse_transformf(transformf& t, transformf* ret) { *ret = wp::transform_inverse(t); } WP_API void builtin_transform_inverse_transformd(transformd& t, transformd* ret) { *ret = wp::transform_inverse(t); } WP_API void builtin_spatial_dot_spatial_vectorh_spatial_vectorh(spatial_vectorh& a, spatial_vectorh& b, float16* ret) { *ret = wp::spatial_dot(a, b); } WP_API void builtin_spatial_dot_spatial_vectorf_spatial_vectorf(spatial_vectorf& a, spatial_vectorf& b, float32* ret) { *ret = wp::spatial_dot(a, b); } WP_API void builtin_spatial_dot_spatial_vectord_spatial_vectord(spatial_vectord& a, spatial_vectord& b, float64* ret) { *ret = wp::spatial_dot(a, b); } WP_API void builtin_spatial_cross_spatial_vectorh_spatial_vectorh(spatial_vectorh& a, spatial_vectorh& b, spatial_vectorh* ret) { *ret = wp::spatial_cross(a, b); } WP_API void builtin_spatial_cross_spatial_vectorf_spatial_vectorf(spatial_vectorf& a, spatial_vectorf& b, spatial_vectorf* ret) { *ret = wp::spatial_cross(a, b); } WP_API void builtin_spatial_cross_spatial_vectord_spatial_vectord(spatial_vectord& a, spatial_vectord& b, spatial_vectord* ret) { *ret = wp::spatial_cross(a, b); } WP_API void builtin_spatial_cross_dual_spatial_vectorh_spatial_vectorh(spatial_vectorh& a, spatial_vectorh& b, spatial_vectorh* ret) { *ret = wp::spatial_cross_dual(a, b); } WP_API void builtin_spatial_cross_dual_spatial_vectorf_spatial_vectorf(spatial_vectorf& a, spatial_vectorf& b, spatial_vectorf* ret) { *ret = wp::spatial_cross_dual(a, b); } WP_API void builtin_spatial_cross_dual_spatial_vectord_spatial_vectord(spatial_vectord& a, spatial_vectord& b, spatial_vectord* ret) { *ret = wp::spatial_cross_dual(a, b); } WP_API void builtin_spatial_top_spatial_vectorh(spatial_vectorh& a, vec3h* ret) { *ret = wp::spatial_top(a); } WP_API void builtin_spatial_top_spatial_vectorf(spatial_vectorf& a, vec3f* ret) { *ret = wp::spatial_top(a); } WP_API void builtin_spatial_top_spatial_vectord(spatial_vectord& a, vec3d* ret) { *ret = wp::spatial_top(a); } WP_API void builtin_spatial_bottom_spatial_vectorh(spatial_vectorh& a, vec3h* ret) { *ret = wp::spatial_bottom(a); } WP_API void builtin_spatial_bottom_spatial_vectorf(spatial_vectorf& a, vec3f* ret) { *ret = wp::spatial_bottom(a); } WP_API void builtin_spatial_bottom_spatial_vectord(spatial_vectord& a, vec3d* ret) { *ret = wp::spatial_bottom(a); } WP_API void builtin_bvh_query_aabb_uint64_vec3f_vec3f(uint64 id, vec3f& lower, vec3f& upper, bvh_query_t* ret) { *ret = wp::bvh_query_aabb(id, lower, upper); } WP_API void builtin_bvh_query_ray_uint64_vec3f_vec3f(uint64 id, vec3f& start, vec3f& dir, bvh_query_t* ret) { *ret = wp::bvh_query_ray(id, start, dir); } WP_API void builtin_bvh_query_next_bvh_query_t_int32(bvh_query_t query, int32 index, bool* ret) { *ret = wp::bvh_query_next(query, index); } WP_API void builtin_mesh_query_point_uint64_vec3f_float32_float32_int32_float32_float32(uint64 id, vec3f& point, float32 max_dist, float32 inside, int32 face, float32 bary_u, float32 bary_v, bool* ret) { *ret = wp::mesh_query_point(id, point, max_dist, inside, face, bary_u, bary_v); } WP_API void builtin_mesh_query_point_uint64_vec3f_float32(uint64 id, vec3f& point, float32 max_dist, mesh_query_point_t* ret) { *ret = wp::mesh_query_point(id, point, max_dist); } WP_API void builtin_mesh_query_point_no_sign_uint64_vec3f_float32_int32_float32_float32(uint64 id, vec3f& point, float32 max_dist, int32 face, float32 bary_u, float32 bary_v, bool* ret) { *ret = wp::mesh_query_point_no_sign(id, point, max_dist, face, bary_u, bary_v); } WP_API void builtin_mesh_query_point_no_sign_uint64_vec3f_float32(uint64 id, vec3f& point, float32 max_dist, mesh_query_point_t* ret) { *ret = wp::mesh_query_point_no_sign(id, point, max_dist); } WP_API void builtin_mesh_query_furthest_point_no_sign_uint64_vec3f_float32_int32_float32_float32(uint64 id, vec3f& point, float32 min_dist, int32 face, float32 bary_u, float32 bary_v, bool* ret) { *ret = wp::mesh_query_furthest_point_no_sign(id, point, min_dist, face, bary_u, bary_v); } WP_API void builtin_mesh_query_furthest_point_no_sign_uint64_vec3f_float32(uint64 id, vec3f& point, float32 min_dist, mesh_query_point_t* ret) { *ret = wp::mesh_query_furthest_point_no_sign(id, point, min_dist); } WP_API void builtin_mesh_query_point_sign_normal_uint64_vec3f_float32_float32_int32_float32_float32_float32(uint64 id, vec3f& point, float32 max_dist, float32 inside, int32 face, float32 bary_u, float32 bary_v, float32 epsilon, bool* ret) { *ret = wp::mesh_query_point_sign_normal(id, point, max_dist, inside, face, bary_u, bary_v, epsilon); } WP_API void builtin_mesh_query_point_sign_normal_uint64_vec3f_float32_float32(uint64 id, vec3f& point, float32 max_dist, float32 epsilon, mesh_query_point_t* ret) { *ret = wp::mesh_query_point_sign_normal(id, point, max_dist, epsilon); } WP_API void builtin_mesh_query_point_sign_winding_number_uint64_vec3f_float32_float32_int32_float32_float32_float32_float32(uint64 id, vec3f& point, float32 max_dist, float32 inside, int32 face, float32 bary_u, float32 bary_v, float32 accuracy, float32 threshold, bool* ret) { *ret = wp::mesh_query_point_sign_winding_number(id, point, max_dist, inside, face, bary_u, bary_v, accuracy, threshold); } WP_API void builtin_mesh_query_point_sign_winding_number_uint64_vec3f_float32_float32_float32(uint64 id, vec3f& point, float32 max_dist, float32 accuracy, float32 threshold, mesh_query_point_t* ret) { *ret = wp::mesh_query_point_sign_winding_number(id, point, max_dist, accuracy, threshold); } WP_API void builtin_mesh_query_ray_uint64_vec3f_vec3f_float32_float32_float32_float32_float32_vec3f_int32(uint64 id, vec3f& start, vec3f& dir, float32 max_t, float32 t, float32 bary_u, float32 bary_v, float32 sign, vec3f& normal, int32 face, bool* ret) { *ret = wp::mesh_query_ray(id, start, dir, max_t, t, bary_u, bary_v, sign, normal, face); } WP_API void builtin_mesh_query_ray_uint64_vec3f_vec3f_float32(uint64 id, vec3f& start, vec3f& dir, float32 max_t, mesh_query_ray_t* ret) { *ret = wp::mesh_query_ray(id, start, dir, max_t); } WP_API void builtin_mesh_query_aabb_uint64_vec3f_vec3f(uint64 id, vec3f& lower, vec3f& upper, mesh_query_aabb_t* ret) { *ret = wp::mesh_query_aabb(id, lower, upper); } WP_API void builtin_mesh_query_aabb_next_mesh_query_aabb_t_int32(mesh_query_aabb_t query, int32 index, bool* ret) { *ret = wp::mesh_query_aabb_next(query, index); } WP_API void builtin_mesh_eval_position_uint64_int32_float32_float32(uint64 id, int32 face, float32 bary_u, float32 bary_v, vec3f* ret) { *ret = wp::mesh_eval_position(id, face, bary_u, bary_v); } WP_API void builtin_mesh_eval_velocity_uint64_int32_float32_float32(uint64 id, int32 face, float32 bary_u, float32 bary_v, vec3f* ret) { *ret = wp::mesh_eval_velocity(id, face, bary_u, bary_v); } WP_API void builtin_hash_grid_query_uint64_vec3f_float32(uint64 id, vec3f& point, float32 max_dist, hash_grid_query_t* ret) { *ret = wp::hash_grid_query(id, point, max_dist); } WP_API void builtin_hash_grid_query_next_hash_grid_query_t_int32(hash_grid_query_t query, int32 index, bool* ret) { *ret = wp::hash_grid_query_next(query, index); } WP_API void builtin_hash_grid_point_id_uint64_int32(uint64 id, int32 index, int* ret) { *ret = wp::hash_grid_point_id(id, index); } WP_API void builtin_intersect_tri_tri_vec3f_vec3f_vec3f_vec3f_vec3f_vec3f(vec3f& v0, vec3f& v1, vec3f& v2, vec3f& u0, vec3f& u1, vec3f& u2, int* ret) { *ret = wp::intersect_tri_tri(v0, v1, v2, u0, u1, u2); } WP_API void builtin_mesh_get_uint64(uint64 id, Mesh* ret) { *ret = wp::mesh_get(id); } WP_API void builtin_mesh_eval_face_normal_uint64_int32(uint64 id, int32 face, vec3f* ret) { *ret = wp::mesh_eval_face_normal(id, face); } WP_API void builtin_mesh_get_point_uint64_int32(uint64 id, int32 index, vec3f* ret) { *ret = wp::mesh_get_point(id, index); } WP_API void builtin_mesh_get_velocity_uint64_int32(uint64 id, int32 index, vec3f* ret) { *ret = wp::mesh_get_velocity(id, index); } WP_API void builtin_mesh_get_index_uint64_int32(uint64 id, int32 index, int* ret) { *ret = wp::mesh_get_index(id, index); } WP_API void builtin_closest_point_edge_edge_vec3f_vec3f_vec3f_vec3f_float32(vec3f& p1, vec3f& q1, vec3f& p2, vec3f& q2, float32 epsilon, vec3f* ret) { *ret = wp::closest_point_edge_edge(p1, q1, p2, q2, epsilon); } WP_API void builtin_iter_next_range_t(range_t range, int* ret) { *ret = wp::iter_next(range); } WP_API void builtin_iter_next_hash_grid_query_t(hash_grid_query_t query, int* ret) { *ret = wp::iter_next(query); } WP_API void builtin_iter_next_mesh_query_aabb_t(mesh_query_aabb_t query, int* ret) { *ret = wp::iter_next(query); } WP_API void builtin_volume_sample_f_uint64_vec3f_int32(uint64 id, vec3f& uvw, int32 sampling_mode, float* ret) { *ret = wp::volume_sample_f(id, uvw, sampling_mode); } WP_API void builtin_volume_sample_grad_f_uint64_vec3f_int32_vec3f(uint64 id, vec3f& uvw, int32 sampling_mode, vec3f& grad, float* ret) { *ret = wp::volume_sample_grad_f(id, uvw, sampling_mode, grad); } WP_API void builtin_volume_lookup_f_uint64_int32_int32_int32(uint64 id, int32 i, int32 j, int32 k, float* ret) { *ret = wp::volume_lookup_f(id, i, j, k); } WP_API void builtin_volume_sample_v_uint64_vec3f_int32(uint64 id, vec3f& uvw, int32 sampling_mode, vec3f* ret) { *ret = wp::volume_sample_v(id, uvw, sampling_mode); } WP_API void builtin_volume_lookup_v_uint64_int32_int32_int32(uint64 id, int32 i, int32 j, int32 k, vec3f* ret) { *ret = wp::volume_lookup_v(id, i, j, k); } WP_API void builtin_volume_sample_i_uint64_vec3f(uint64 id, vec3f& uvw, int* ret) { *ret = wp::volume_sample_i(id, uvw); } WP_API void builtin_volume_lookup_i_uint64_int32_int32_int32(uint64 id, int32 i, int32 j, int32 k, int* ret) { *ret = wp::volume_lookup_i(id, i, j, k); } WP_API void builtin_volume_lookup_index_uint64_int32_int32_int32(uint64 id, int32 i, int32 j, int32 k, int32* ret) { *ret = wp::volume_lookup_index(id, i, j, k); } WP_API void builtin_volume_index_to_world_uint64_vec3f(uint64 id, vec3f& uvw, vec3f* ret) { *ret = wp::volume_index_to_world(id, uvw); } WP_API void builtin_volume_world_to_index_uint64_vec3f(uint64 id, vec3f& xyz, vec3f* ret) { *ret = wp::volume_world_to_index(id, xyz); } WP_API void builtin_volume_index_to_world_dir_uint64_vec3f(uint64 id, vec3f& uvw, vec3f* ret) { *ret = wp::volume_index_to_world_dir(id, uvw); } WP_API void builtin_volume_world_to_index_dir_uint64_vec3f(uint64 id, vec3f& xyz, vec3f* ret) { *ret = wp::volume_world_to_index_dir(id, xyz); } WP_API void builtin_rand_init_int32(int32 seed, uint32* ret) { *ret = wp::rand_init(seed); } WP_API void builtin_rand_init_int32_int32(int32 seed, int32 offset, uint32* ret) { *ret = wp::rand_init(seed, offset); } WP_API void builtin_randi_uint32(uint32 state, int* ret) { *ret = wp::randi(state); } WP_API void builtin_randi_uint32_int32_int32(uint32 state, int32 min, int32 max, int* ret) { *ret = wp::randi(state, min, max); } WP_API void builtin_randf_uint32(uint32 state, float* ret) { *ret = wp::randf(state); } WP_API void builtin_randf_uint32_float32_float32(uint32 state, float32 min, float32 max, float* ret) { *ret = wp::randf(state, min, max); } WP_API void builtin_randn_uint32(uint32 state, float* ret) { *ret = wp::randn(state); } WP_API void builtin_sample_triangle_uint32(uint32 state, vec2f* ret) { *ret = wp::sample_triangle(state); } WP_API void builtin_sample_unit_ring_uint32(uint32 state, vec2f* ret) { *ret = wp::sample_unit_ring(state); } WP_API void builtin_sample_unit_disk_uint32(uint32 state, vec2f* ret) { *ret = wp::sample_unit_disk(state); } WP_API void builtin_sample_unit_sphere_surface_uint32(uint32 state, vec3f* ret) { *ret = wp::sample_unit_sphere_surface(state); } WP_API void builtin_sample_unit_sphere_uint32(uint32 state, vec3f* ret) { *ret = wp::sample_unit_sphere(state); } WP_API void builtin_sample_unit_hemisphere_surface_uint32(uint32 state, vec3f* ret) { *ret = wp::sample_unit_hemisphere_surface(state); } WP_API void builtin_sample_unit_hemisphere_uint32(uint32 state, vec3f* ret) { *ret = wp::sample_unit_hemisphere(state); } WP_API void builtin_sample_unit_square_uint32(uint32 state, vec2f* ret) { *ret = wp::sample_unit_square(state); } WP_API void builtin_sample_unit_cube_uint32(uint32 state, vec3f* ret) { *ret = wp::sample_unit_cube(state); } WP_API void builtin_poisson_uint32_float32(uint32 state, float32 lam, uint32* ret) { *ret = wp::poisson(state, lam); } WP_API void builtin_noise_uint32_float32(uint32 state, float32 x, float* ret) { *ret = wp::noise(state, x); } WP_API void builtin_noise_uint32_vec2f(uint32 state, vec2f& xy, float* ret) { *ret = wp::noise(state, xy); } WP_API void builtin_noise_uint32_vec3f(uint32 state, vec3f& xyz, float* ret) { *ret = wp::noise(state, xyz); } WP_API void builtin_noise_uint32_vec4f(uint32 state, vec4f& xyzt, float* ret) { *ret = wp::noise(state, xyzt); } WP_API void builtin_pnoise_uint32_float32_int32(uint32 state, float32 x, int32 px, float* ret) { *ret = wp::pnoise(state, x, px); } WP_API void builtin_pnoise_uint32_vec2f_int32_int32(uint32 state, vec2f& xy, int32 px, int32 py, float* ret) { *ret = wp::pnoise(state, xy, px, py); } WP_API void builtin_pnoise_uint32_vec3f_int32_int32_int32(uint32 state, vec3f& xyz, int32 px, int32 py, int32 pz, float* ret) { *ret = wp::pnoise(state, xyz, px, py, pz); } WP_API void builtin_pnoise_uint32_vec4f_int32_int32_int32_int32(uint32 state, vec4f& xyzt, int32 px, int32 py, int32 pz, int32 pt, float* ret) { *ret = wp::pnoise(state, xyzt, px, py, pz, pt); } WP_API void builtin_curlnoise_uint32_vec2f_uint32_float32_float32(uint32 state, vec2f& xy, uint32 octaves, float32 lacunarity, float32 gain, vec2f* ret) { *ret = wp::curlnoise(state, xy, octaves, lacunarity, gain); } WP_API void builtin_curlnoise_uint32_vec3f_uint32_float32_float32(uint32 state, vec3f& xyz, uint32 octaves, float32 lacunarity, float32 gain, vec3f* ret) { *ret = wp::curlnoise(state, xyz, octaves, lacunarity, gain); } WP_API void builtin_curlnoise_uint32_vec4f_uint32_float32_float32(uint32 state, vec4f& xyzt, uint32 octaves, float32 lacunarity, float32 gain, vec3f* ret) { *ret = wp::curlnoise(state, xyzt, octaves, lacunarity, gain); } WP_API void builtin_extract_vec2h_int32(vec2h& a, int32 i, float16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3h_int32(vec3h& a, int32 i, float16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4h_int32(vec4h& a, int32 i, float16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_spatial_vectorh_int32(spatial_vectorh& a, int32 i, float16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2f_int32(vec2f& a, int32 i, float32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3f_int32(vec3f& a, int32 i, float32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4f_int32(vec4f& a, int32 i, float32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_spatial_vectorf_int32(spatial_vectorf& a, int32 i, float32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2d_int32(vec2d& a, int32 i, float64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3d_int32(vec3d& a, int32 i, float64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4d_int32(vec4d& a, int32 i, float64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_spatial_vectord_int32(spatial_vectord& a, int32 i, float64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2s_int32(vec2s& a, int32 i, int16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3s_int32(vec3s& a, int32 i, int16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4s_int32(vec4s& a, int32 i, int16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2i_int32(vec2i& a, int32 i, int32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3i_int32(vec3i& a, int32 i, int32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4i_int32(vec4i& a, int32 i, int32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2l_int32(vec2l& a, int32 i, int64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3l_int32(vec3l& a, int32 i, int64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4l_int32(vec4l& a, int32 i, int64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2b_int32(vec2b& a, int32 i, int8* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3b_int32(vec3b& a, int32 i, int8* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4b_int32(vec4b& a, int32 i, int8* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2us_int32(vec2us& a, int32 i, uint16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3us_int32(vec3us& a, int32 i, uint16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4us_int32(vec4us& a, int32 i, uint16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2ui_int32(vec2ui& a, int32 i, uint32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3ui_int32(vec3ui& a, int32 i, uint32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4ui_int32(vec4ui& a, int32 i, uint32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2ul_int32(vec2ul& a, int32 i, uint64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3ul_int32(vec3ul& a, int32 i, uint64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4ul_int32(vec4ul& a, int32 i, uint64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec2ub_int32(vec2ub& a, int32 i, uint8* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec3ub_int32(vec3ub& a, int32 i, uint8* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_vec4ub_int32(vec4ub& a, int32 i, uint8* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_quath_int32(quath& a, int32 i, float16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_quatf_int32(quatf& a, int32 i, float32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_quatd_int32(quatd& a, int32 i, float64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat22h_int32(mat22h& a, int32 i, vec2h* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat33h_int32(mat33h& a, int32 i, vec3h* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat44h_int32(mat44h& a, int32 i, vec4h* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_spatial_matrixh_int32(spatial_matrixh& a, int32 i, spatial_vectorh* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat22f_int32(mat22f& a, int32 i, vec2f* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat33f_int32(mat33f& a, int32 i, vec3f* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat44f_int32(mat44f& a, int32 i, vec4f* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_spatial_matrixf_int32(spatial_matrixf& a, int32 i, spatial_vectorf* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat22d_int32(mat22d& a, int32 i, vec2d* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat33d_int32(mat33d& a, int32 i, vec3d* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat44d_int32(mat44d& a, int32 i, vec4d* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_spatial_matrixd_int32(spatial_matrixd& a, int32 i, spatial_vectord* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_mat22h_int32_int32(mat22h& a, int32 i, int32 j, float16* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat33h_int32_int32(mat33h& a, int32 i, int32 j, float16* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat44h_int32_int32(mat44h& a, int32 i, int32 j, float16* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_spatial_matrixh_int32_int32(spatial_matrixh& a, int32 i, int32 j, float16* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat22f_int32_int32(mat22f& a, int32 i, int32 j, float32* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat33f_int32_int32(mat33f& a, int32 i, int32 j, float32* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat44f_int32_int32(mat44f& a, int32 i, int32 j, float32* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_spatial_matrixf_int32_int32(spatial_matrixf& a, int32 i, int32 j, float32* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat22d_int32_int32(mat22d& a, int32 i, int32 j, float64* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat33d_int32_int32(mat33d& a, int32 i, int32 j, float64* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_mat44d_int32_int32(mat44d& a, int32 i, int32 j, float64* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_spatial_matrixd_int32_int32(spatial_matrixd& a, int32 i, int32 j, float64* ret) { *ret = wp::extract(a, i, j); } WP_API void builtin_extract_transformh_int32(transformh& a, int32 i, float16* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_transformf_int32(transformf& a, int32 i, float32* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_transformd_int32(transformd& a, int32 i, float64* ret) { *ret = wp::extract(a, i); } WP_API void builtin_extract_shape_t_int32(shape_t s, int32 i, int* ret) { *ret = wp::extract(s, i); } WP_API void builtin_lerp_float16_float16_float16(float16 a, float16 b, float16 t, float16* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_float32_float32_float32(float32 a, float32 b, float32 t, float32* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_float64_float64_float64(float64 a, float64 b, float64 t, float64* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec2h_vec2h_float16(vec2h& a, vec2h& b, float16 t, vec2h* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec3h_vec3h_float16(vec3h& a, vec3h& b, float16 t, vec3h* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec4h_vec4h_float16(vec4h& a, vec4h& b, float16 t, vec4h* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_spatial_vectorh_spatial_vectorh_float16(spatial_vectorh& a, spatial_vectorh& b, float16 t, spatial_vectorh* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec2f_vec2f_float32(vec2f& a, vec2f& b, float32 t, vec2f* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec3f_vec3f_float32(vec3f& a, vec3f& b, float32 t, vec3f* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec4f_vec4f_float32(vec4f& a, vec4f& b, float32 t, vec4f* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_spatial_vectorf_spatial_vectorf_float32(spatial_vectorf& a, spatial_vectorf& b, float32 t, spatial_vectorf* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec2d_vec2d_float64(vec2d& a, vec2d& b, float64 t, vec2d* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec3d_vec3d_float64(vec3d& a, vec3d& b, float64 t, vec3d* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_vec4d_vec4d_float64(vec4d& a, vec4d& b, float64 t, vec4d* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_spatial_vectord_spatial_vectord_float64(spatial_vectord& a, spatial_vectord& b, float64 t, spatial_vectord* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat22h_mat22h_float16(mat22h& a, mat22h& b, float16 t, mat22h* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat33h_mat33h_float16(mat33h& a, mat33h& b, float16 t, mat33h* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat44h_mat44h_float16(mat44h& a, mat44h& b, float16 t, mat44h* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_spatial_matrixh_spatial_matrixh_float16(spatial_matrixh& a, spatial_matrixh& b, float16 t, spatial_matrixh* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat22f_mat22f_float32(mat22f& a, mat22f& b, float32 t, mat22f* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat33f_mat33f_float32(mat33f& a, mat33f& b, float32 t, mat33f* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat44f_mat44f_float32(mat44f& a, mat44f& b, float32 t, mat44f* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_spatial_matrixf_spatial_matrixf_float32(spatial_matrixf& a, spatial_matrixf& b, float32 t, spatial_matrixf* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat22d_mat22d_float64(mat22d& a, mat22d& b, float64 t, mat22d* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat33d_mat33d_float64(mat33d& a, mat33d& b, float64 t, mat33d* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_mat44d_mat44d_float64(mat44d& a, mat44d& b, float64 t, mat44d* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_spatial_matrixd_spatial_matrixd_float64(spatial_matrixd& a, spatial_matrixd& b, float64 t, spatial_matrixd* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_quath_quath_float16(quath& a, quath& b, float16 t, quath* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_quatf_quatf_float32(quatf& a, quatf& b, float32 t, quatf* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_quatd_quatd_float64(quatd& a, quatd& b, float64 t, quatd* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_transformh_transformh_float16(transformh& a, transformh& b, float16 t, transformh* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_transformf_transformf_float32(transformf& a, transformf& b, float32 t, transformf* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_lerp_transformd_transformd_float64(transformd& a, transformd& b, float64 t, transformd* ret) { *ret = wp::lerp(a, b, t); } WP_API void builtin_smoothstep_float16_float16_float16(float16 edge0, float16 edge1, float16 x, float16* ret) { *ret = wp::smoothstep(edge0, edge1, x); } WP_API void builtin_smoothstep_float32_float32_float32(float32 edge0, float32 edge1, float32 x, float32* ret) { *ret = wp::smoothstep(edge0, edge1, x); } WP_API void builtin_smoothstep_float64_float64_float64(float64 edge0, float64 edge1, float64 x, float64* ret) { *ret = wp::smoothstep(edge0, edge1, x); } WP_API void builtin_add_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2h_vec2h(vec2h& x, vec2h& y, vec2h* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3h_vec3h(vec3h& x, vec3h& y, vec3h* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4h_vec4h(vec4h& x, vec4h& y, vec4h* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2f_vec2f(vec2f& x, vec2f& y, vec2f* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3f_vec3f(vec3f& x, vec3f& y, vec3f* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4f_vec4f(vec4f& x, vec4f& y, vec4f* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2d_vec2d(vec2d& x, vec2d& y, vec2d* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3d_vec3d(vec3d& x, vec3d& y, vec3d* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4d_vec4d(vec4d& x, vec4d& y, vec4d* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2s_vec2s(vec2s& x, vec2s& y, vec2s* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3s_vec3s(vec3s& x, vec3s& y, vec3s* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4s_vec4s(vec4s& x, vec4s& y, vec4s* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2i_vec2i(vec2i& x, vec2i& y, vec2i* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3i_vec3i(vec3i& x, vec3i& y, vec3i* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4i_vec4i(vec4i& x, vec4i& y, vec4i* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2l_vec2l(vec2l& x, vec2l& y, vec2l* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3l_vec3l(vec3l& x, vec3l& y, vec3l* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4l_vec4l(vec4l& x, vec4l& y, vec4l* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2b_vec2b(vec2b& x, vec2b& y, vec2b* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3b_vec3b(vec3b& x, vec3b& y, vec3b* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4b_vec4b(vec4b& x, vec4b& y, vec4b* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2us_vec2us(vec2us& x, vec2us& y, vec2us* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3us_vec3us(vec3us& x, vec3us& y, vec3us* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4us_vec4us(vec4us& x, vec4us& y, vec4us* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2ui_vec2ui(vec2ui& x, vec2ui& y, vec2ui* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3ui_vec3ui(vec3ui& x, vec3ui& y, vec3ui* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4ui_vec4ui(vec4ui& x, vec4ui& y, vec4ui* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2ul_vec2ul(vec2ul& x, vec2ul& y, vec2ul* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3ul_vec3ul(vec3ul& x, vec3ul& y, vec3ul* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4ul_vec4ul(vec4ul& x, vec4ul& y, vec4ul* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec2ub_vec2ub(vec2ub& x, vec2ub& y, vec2ub* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec3ub_vec3ub(vec3ub& x, vec3ub& y, vec3ub* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_vec4ub_vec4ub(vec4ub& x, vec4ub& y, vec4ub* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_quath_quath(quath& x, quath& y, quath* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_quatf_quatf(quatf& x, quatf& y, quatf* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_quatd_quatd(quatd& x, quatd& y, quatd* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat22h_mat22h(mat22h& x, mat22h& y, mat22h* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat33h_mat33h(mat33h& x, mat33h& y, mat33h* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat44h_mat44h(mat44h& x, mat44h& y, mat44h* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_spatial_matrixh_spatial_matrixh(spatial_matrixh& x, spatial_matrixh& y, spatial_matrixh* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat22f_mat22f(mat22f& x, mat22f& y, mat22f* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat33f_mat33f(mat33f& x, mat33f& y, mat33f* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat44f_mat44f(mat44f& x, mat44f& y, mat44f* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_spatial_matrixf_spatial_matrixf(spatial_matrixf& x, spatial_matrixf& y, spatial_matrixf* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat22d_mat22d(mat22d& x, mat22d& y, mat22d* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat33d_mat33d(mat33d& x, mat33d& y, mat33d* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_mat44d_mat44d(mat44d& x, mat44d& y, mat44d* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_spatial_matrixd_spatial_matrixd(spatial_matrixd& x, spatial_matrixd& y, spatial_matrixd* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_transformh_transformh(transformh& x, transformh& y, transformh* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_transformf_transformf(transformf& x, transformf& y, transformf* ret) { *ret = wp::add(x, y); } WP_API void builtin_add_transformd_transformd(transformd& x, transformd& y, transformd* ret) { *ret = wp::add(x, y); } WP_API void builtin_sub_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2h_vec2h(vec2h& x, vec2h& y, vec2h* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3h_vec3h(vec3h& x, vec3h& y, vec3h* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4h_vec4h(vec4h& x, vec4h& y, vec4h* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_spatial_vectorh_spatial_vectorh(spatial_vectorh& x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2f_vec2f(vec2f& x, vec2f& y, vec2f* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3f_vec3f(vec3f& x, vec3f& y, vec3f* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4f_vec4f(vec4f& x, vec4f& y, vec4f* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_spatial_vectorf_spatial_vectorf(spatial_vectorf& x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2d_vec2d(vec2d& x, vec2d& y, vec2d* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3d_vec3d(vec3d& x, vec3d& y, vec3d* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4d_vec4d(vec4d& x, vec4d& y, vec4d* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_spatial_vectord_spatial_vectord(spatial_vectord& x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2s_vec2s(vec2s& x, vec2s& y, vec2s* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3s_vec3s(vec3s& x, vec3s& y, vec3s* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4s_vec4s(vec4s& x, vec4s& y, vec4s* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2i_vec2i(vec2i& x, vec2i& y, vec2i* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3i_vec3i(vec3i& x, vec3i& y, vec3i* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4i_vec4i(vec4i& x, vec4i& y, vec4i* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2l_vec2l(vec2l& x, vec2l& y, vec2l* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3l_vec3l(vec3l& x, vec3l& y, vec3l* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4l_vec4l(vec4l& x, vec4l& y, vec4l* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2b_vec2b(vec2b& x, vec2b& y, vec2b* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3b_vec3b(vec3b& x, vec3b& y, vec3b* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4b_vec4b(vec4b& x, vec4b& y, vec4b* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2us_vec2us(vec2us& x, vec2us& y, vec2us* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3us_vec3us(vec3us& x, vec3us& y, vec3us* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4us_vec4us(vec4us& x, vec4us& y, vec4us* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2ui_vec2ui(vec2ui& x, vec2ui& y, vec2ui* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3ui_vec3ui(vec3ui& x, vec3ui& y, vec3ui* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4ui_vec4ui(vec4ui& x, vec4ui& y, vec4ui* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2ul_vec2ul(vec2ul& x, vec2ul& y, vec2ul* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3ul_vec3ul(vec3ul& x, vec3ul& y, vec3ul* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4ul_vec4ul(vec4ul& x, vec4ul& y, vec4ul* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec2ub_vec2ub(vec2ub& x, vec2ub& y, vec2ub* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec3ub_vec3ub(vec3ub& x, vec3ub& y, vec3ub* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_vec4ub_vec4ub(vec4ub& x, vec4ub& y, vec4ub* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat22h_mat22h(mat22h& x, mat22h& y, mat22h* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat33h_mat33h(mat33h& x, mat33h& y, mat33h* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat44h_mat44h(mat44h& x, mat44h& y, mat44h* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_spatial_matrixh_spatial_matrixh(spatial_matrixh& x, spatial_matrixh& y, spatial_matrixh* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat22f_mat22f(mat22f& x, mat22f& y, mat22f* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat33f_mat33f(mat33f& x, mat33f& y, mat33f* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat44f_mat44f(mat44f& x, mat44f& y, mat44f* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_spatial_matrixf_spatial_matrixf(spatial_matrixf& x, spatial_matrixf& y, spatial_matrixf* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat22d_mat22d(mat22d& x, mat22d& y, mat22d* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat33d_mat33d(mat33d& x, mat33d& y, mat33d* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_mat44d_mat44d(mat44d& x, mat44d& y, mat44d* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_spatial_matrixd_spatial_matrixd(spatial_matrixd& x, spatial_matrixd& y, spatial_matrixd* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_quath_quath(quath& x, quath& y, quath* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_quatf_quatf(quatf& x, quatf& y, quatf* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_quatd_quatd(quatd& x, quatd& y, quatd* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_transformh_transformh(transformh& x, transformh& y, transformh* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_transformf_transformf(transformf& x, transformf& y, transformf* ret) { *ret = wp::sub(x, y); } WP_API void builtin_sub_transformd_transformd(transformd& x, transformd& y, transformd* ret) { *ret = wp::sub(x, y); } WP_API void builtin_bit_and_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_and_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_and_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_and_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_and_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_and_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_and_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_and_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::bit_and(x, y); } WP_API void builtin_bit_or_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_or_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_or_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_or_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_or_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_or_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_or_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_or_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::bit_or(x, y); } WP_API void builtin_bit_xor_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_bit_xor_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_bit_xor_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_bit_xor_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_bit_xor_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_bit_xor_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_bit_xor_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_bit_xor_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::bit_xor(x, y); } WP_API void builtin_lshift_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_lshift_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_lshift_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_lshift_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_lshift_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_lshift_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_lshift_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_lshift_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::lshift(x, y); } WP_API void builtin_rshift_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_rshift_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_rshift_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_rshift_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_rshift_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_rshift_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_rshift_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_rshift_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::rshift(x, y); } WP_API void builtin_invert_int16(int16 x, int16* ret) { *ret = wp::invert(x); } WP_API void builtin_invert_int32(int32 x, int32* ret) { *ret = wp::invert(x); } WP_API void builtin_invert_int64(int64 x, int64* ret) { *ret = wp::invert(x); } WP_API void builtin_invert_int8(int8 x, int8* ret) { *ret = wp::invert(x); } WP_API void builtin_invert_uint16(uint16 x, uint16* ret) { *ret = wp::invert(x); } WP_API void builtin_invert_uint32(uint32 x, uint32* ret) { *ret = wp::invert(x); } WP_API void builtin_invert_uint64(uint64 x, uint64* ret) { *ret = wp::invert(x); } WP_API void builtin_invert_uint8(uint8 x, uint8* ret) { *ret = wp::invert(x); } WP_API void builtin_mul_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2h_float16(vec2h& x, float16 y, vec2h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3h_float16(vec3h& x, float16 y, vec3h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4h_float16(vec4h& x, float16 y, vec4h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_vectorh_float16(spatial_vectorh& x, float16 y, spatial_vectorh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2f_float32(vec2f& x, float32 y, vec2f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3f_float32(vec3f& x, float32 y, vec3f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4f_float32(vec4f& x, float32 y, vec4f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_vectorf_float32(spatial_vectorf& x, float32 y, spatial_vectorf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2d_float64(vec2d& x, float64 y, vec2d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3d_float64(vec3d& x, float64 y, vec3d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4d_float64(vec4d& x, float64 y, vec4d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_vectord_float64(spatial_vectord& x, float64 y, spatial_vectord* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2s_int16(vec2s& x, int16 y, vec2s* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3s_int16(vec3s& x, int16 y, vec3s* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4s_int16(vec4s& x, int16 y, vec4s* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2i_int32(vec2i& x, int32 y, vec2i* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3i_int32(vec3i& x, int32 y, vec3i* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4i_int32(vec4i& x, int32 y, vec4i* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2l_int64(vec2l& x, int64 y, vec2l* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3l_int64(vec3l& x, int64 y, vec3l* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4l_int64(vec4l& x, int64 y, vec4l* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2b_int8(vec2b& x, int8 y, vec2b* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3b_int8(vec3b& x, int8 y, vec3b* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4b_int8(vec4b& x, int8 y, vec4b* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2us_uint16(vec2us& x, uint16 y, vec2us* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3us_uint16(vec3us& x, uint16 y, vec3us* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4us_uint16(vec4us& x, uint16 y, vec4us* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2ui_uint32(vec2ui& x, uint32 y, vec2ui* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3ui_uint32(vec3ui& x, uint32 y, vec3ui* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4ui_uint32(vec4ui& x, uint32 y, vec4ui* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2ul_uint64(vec2ul& x, uint64 y, vec2ul* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3ul_uint64(vec3ul& x, uint64 y, vec3ul* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4ul_uint64(vec4ul& x, uint64 y, vec4ul* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2ub_uint8(vec2ub& x, uint8 y, vec2ub* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3ub_uint8(vec3ub& x, uint8 y, vec3ub* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4ub_uint8(vec4ub& x, uint8 y, vec4ub* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_vec2h(float16 x, vec2h& y, vec2h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_vec3h(float16 x, vec3h& y, vec3h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_vec4h(float16 x, vec4h& y, vec4h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_spatial_vectorh(float16 x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_vec2f(float32 x, vec2f& y, vec2f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_vec3f(float32 x, vec3f& y, vec3f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_vec4f(float32 x, vec4f& y, vec4f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_spatial_vectorf(float32 x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_vec2d(float64 x, vec2d& y, vec2d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_vec3d(float64 x, vec3d& y, vec3d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_vec4d(float64 x, vec4d& y, vec4d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_spatial_vectord(float64 x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int16_vec2s(int16 x, vec2s& y, vec2s* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int16_vec3s(int16 x, vec3s& y, vec3s* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int16_vec4s(int16 x, vec4s& y, vec4s* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int32_vec2i(int32 x, vec2i& y, vec2i* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int32_vec3i(int32 x, vec3i& y, vec3i* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int32_vec4i(int32 x, vec4i& y, vec4i* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int64_vec2l(int64 x, vec2l& y, vec2l* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int64_vec3l(int64 x, vec3l& y, vec3l* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int64_vec4l(int64 x, vec4l& y, vec4l* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int8_vec2b(int8 x, vec2b& y, vec2b* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int8_vec3b(int8 x, vec3b& y, vec3b* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_int8_vec4b(int8 x, vec4b& y, vec4b* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint16_vec2us(uint16 x, vec2us& y, vec2us* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint16_vec3us(uint16 x, vec3us& y, vec3us* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint16_vec4us(uint16 x, vec4us& y, vec4us* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint32_vec2ui(uint32 x, vec2ui& y, vec2ui* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint32_vec3ui(uint32 x, vec3ui& y, vec3ui* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint32_vec4ui(uint32 x, vec4ui& y, vec4ui* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint64_vec2ul(uint64 x, vec2ul& y, vec2ul* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint64_vec3ul(uint64 x, vec3ul& y, vec3ul* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint64_vec4ul(uint64 x, vec4ul& y, vec4ul* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint8_vec2ub(uint8 x, vec2ub& y, vec2ub* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint8_vec3ub(uint8 x, vec3ub& y, vec3ub* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_uint8_vec4ub(uint8 x, vec4ub& y, vec4ub* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_quath_float16(quath& x, float16 y, quath* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_quatf_float32(quatf& x, float32 y, quatf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_quatd_float64(quatd& x, float64 y, quatd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_quath(float16 x, quath& y, quath* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_quatf(float32 x, quatf& y, quatf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_quatd(float64 x, quatd& y, quatd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_quath_quath(quath& x, quath& y, quath* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_quatf_quatf(quatf& x, quatf& y, quatf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_quatd_quatd(quatd& x, quatd& y, quatd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_mat22h(float16 x, mat22h& y, mat22h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_mat33h(float16 x, mat33h& y, mat33h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_mat44h(float16 x, mat44h& y, mat44h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_spatial_matrixh(float16 x, spatial_matrixh& y, spatial_matrixh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_mat22f(float32 x, mat22f& y, mat22f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_mat33f(float32 x, mat33f& y, mat33f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_mat44f(float32 x, mat44f& y, mat44f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_spatial_matrixf(float32 x, spatial_matrixf& y, spatial_matrixf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_mat22d(float64 x, mat22d& y, mat22d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_mat33d(float64 x, mat33d& y, mat33d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_mat44d(float64 x, mat44d& y, mat44d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_spatial_matrixd(float64 x, spatial_matrixd& y, spatial_matrixd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22h_float16(mat22h& x, float16 y, mat22h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33h_float16(mat33h& x, float16 y, mat33h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44h_float16(mat44h& x, float16 y, mat44h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixh_float16(spatial_matrixh& x, float16 y, spatial_matrixh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22f_float32(mat22f& x, float32 y, mat22f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33f_float32(mat33f& x, float32 y, mat33f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44f_float32(mat44f& x, float32 y, mat44f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixf_float32(spatial_matrixf& x, float32 y, spatial_matrixf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22d_float64(mat22d& x, float64 y, mat22d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33d_float64(mat33d& x, float64 y, mat33d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44d_float64(mat44d& x, float64 y, mat44d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixd_float64(spatial_matrixd& x, float64 y, spatial_matrixd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22h_vec2h(mat22h& x, vec2h& y, vec2h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33h_vec3h(mat33h& x, vec3h& y, vec3h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44h_vec4h(mat44h& x, vec4h& y, vec4h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixh_spatial_vectorh(spatial_matrixh& x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22f_vec2f(mat22f& x, vec2f& y, vec2f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33f_vec3f(mat33f& x, vec3f& y, vec3f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44f_vec4f(mat44f& x, vec4f& y, vec4f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixf_spatial_vectorf(spatial_matrixf& x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22d_vec2d(mat22d& x, vec2d& y, vec2d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33d_vec3d(mat33d& x, vec3d& y, vec3d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44d_vec4d(mat44d& x, vec4d& y, vec4d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixd_spatial_vectord(spatial_matrixd& x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2h_mat22h(vec2h& x, mat22h& y, vec2h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3h_mat33h(vec3h& x, mat33h& y, vec3h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4h_mat44h(vec4h& x, mat44h& y, vec4h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_vectorh_spatial_matrixh(spatial_vectorh& x, spatial_matrixh& y, spatial_vectorh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2f_mat22f(vec2f& x, mat22f& y, vec2f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3f_mat33f(vec3f& x, mat33f& y, vec3f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4f_mat44f(vec4f& x, mat44f& y, vec4f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_vectorf_spatial_matrixf(spatial_vectorf& x, spatial_matrixf& y, spatial_vectorf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec2d_mat22d(vec2d& x, mat22d& y, vec2d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec3d_mat33d(vec3d& x, mat33d& y, vec3d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_vec4d_mat44d(vec4d& x, mat44d& y, vec4d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_vectord_spatial_matrixd(spatial_vectord& x, spatial_matrixd& y, spatial_vectord* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22h_mat22h(mat22h& x, mat22h& y, mat22h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33h_mat33h(mat33h& x, mat33h& y, mat33h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44h_mat44h(mat44h& x, mat44h& y, mat44h* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixh_spatial_matrixh(spatial_matrixh& x, spatial_matrixh& y, spatial_matrixh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22f_mat22f(mat22f& x, mat22f& y, mat22f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33f_mat33f(mat33f& x, mat33f& y, mat33f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44f_mat44f(mat44f& x, mat44f& y, mat44f* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixf_spatial_matrixf(spatial_matrixf& x, spatial_matrixf& y, spatial_matrixf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat22d_mat22d(mat22d& x, mat22d& y, mat22d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat33d_mat33d(mat33d& x, mat33d& y, mat33d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_mat44d_mat44d(mat44d& x, mat44d& y, mat44d* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_spatial_matrixd_spatial_matrixd(spatial_matrixd& x, spatial_matrixd& y, spatial_matrixd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_transformh_transformh(transformh& x, transformh& y, transformh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_transformf_transformf(transformf& x, transformf& y, transformf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_transformd_transformd(transformd& x, transformd& y, transformd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float16_transformh(float16 x, transformh& y, transformh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float32_transformf(float32 x, transformf& y, transformf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_float64_transformd(float64 x, transformd& y, transformd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_transformh_float16(transformh& x, float16 y, transformh* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_transformf_float32(transformf& x, float32 y, transformf* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mul_transformd_float64(transformd& x, float64 y, transformd* ret) { *ret = wp::mul(x, y); } WP_API void builtin_mod_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::mod(x, y); } WP_API void builtin_mod_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::mod(x, y); } WP_API void builtin_div_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2h_float16(vec2h& x, float16 y, vec2h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3h_float16(vec3h& x, float16 y, vec3h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4h_float16(vec4h& x, float16 y, vec4h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_spatial_vectorh_float16(spatial_vectorh& x, float16 y, spatial_vectorh* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2f_float32(vec2f& x, float32 y, vec2f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3f_float32(vec3f& x, float32 y, vec3f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4f_float32(vec4f& x, float32 y, vec4f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_spatial_vectorf_float32(spatial_vectorf& x, float32 y, spatial_vectorf* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2d_float64(vec2d& x, float64 y, vec2d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3d_float64(vec3d& x, float64 y, vec3d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4d_float64(vec4d& x, float64 y, vec4d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_spatial_vectord_float64(spatial_vectord& x, float64 y, spatial_vectord* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2s_int16(vec2s& x, int16 y, vec2s* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3s_int16(vec3s& x, int16 y, vec3s* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4s_int16(vec4s& x, int16 y, vec4s* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2i_int32(vec2i& x, int32 y, vec2i* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3i_int32(vec3i& x, int32 y, vec3i* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4i_int32(vec4i& x, int32 y, vec4i* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2l_int64(vec2l& x, int64 y, vec2l* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3l_int64(vec3l& x, int64 y, vec3l* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4l_int64(vec4l& x, int64 y, vec4l* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2b_int8(vec2b& x, int8 y, vec2b* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3b_int8(vec3b& x, int8 y, vec3b* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4b_int8(vec4b& x, int8 y, vec4b* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2us_uint16(vec2us& x, uint16 y, vec2us* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3us_uint16(vec3us& x, uint16 y, vec3us* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4us_uint16(vec4us& x, uint16 y, vec4us* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2ui_uint32(vec2ui& x, uint32 y, vec2ui* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3ui_uint32(vec3ui& x, uint32 y, vec3ui* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4ui_uint32(vec4ui& x, uint32 y, vec4ui* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2ul_uint64(vec2ul& x, uint64 y, vec2ul* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3ul_uint64(vec3ul& x, uint64 y, vec3ul* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4ul_uint64(vec4ul& x, uint64 y, vec4ul* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec2ub_uint8(vec2ub& x, uint8 y, vec2ub* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec3ub_uint8(vec3ub& x, uint8 y, vec3ub* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_vec4ub_uint8(vec4ub& x, uint8 y, vec4ub* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_vec2h(float16 x, vec2h& y, vec2h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_vec3h(float16 x, vec3h& y, vec3h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_vec4h(float16 x, vec4h& y, vec4h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_spatial_vectorh(float16 x, spatial_vectorh& y, spatial_vectorh* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_vec2f(float32 x, vec2f& y, vec2f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_vec3f(float32 x, vec3f& y, vec3f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_vec4f(float32 x, vec4f& y, vec4f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_spatial_vectorf(float32 x, spatial_vectorf& y, spatial_vectorf* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_vec2d(float64 x, vec2d& y, vec2d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_vec3d(float64 x, vec3d& y, vec3d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_vec4d(float64 x, vec4d& y, vec4d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_spatial_vectord(float64 x, spatial_vectord& y, spatial_vectord* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int16_vec2s(int16 x, vec2s& y, vec2s* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int16_vec3s(int16 x, vec3s& y, vec3s* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int16_vec4s(int16 x, vec4s& y, vec4s* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int32_vec2i(int32 x, vec2i& y, vec2i* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int32_vec3i(int32 x, vec3i& y, vec3i* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int32_vec4i(int32 x, vec4i& y, vec4i* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int64_vec2l(int64 x, vec2l& y, vec2l* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int64_vec3l(int64 x, vec3l& y, vec3l* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int64_vec4l(int64 x, vec4l& y, vec4l* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int8_vec2b(int8 x, vec2b& y, vec2b* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int8_vec3b(int8 x, vec3b& y, vec3b* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_int8_vec4b(int8 x, vec4b& y, vec4b* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint16_vec2us(uint16 x, vec2us& y, vec2us* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint16_vec3us(uint16 x, vec3us& y, vec3us* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint16_vec4us(uint16 x, vec4us& y, vec4us* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint32_vec2ui(uint32 x, vec2ui& y, vec2ui* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint32_vec3ui(uint32 x, vec3ui& y, vec3ui* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint32_vec4ui(uint32 x, vec4ui& y, vec4ui* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint64_vec2ul(uint64 x, vec2ul& y, vec2ul* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint64_vec3ul(uint64 x, vec3ul& y, vec3ul* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint64_vec4ul(uint64 x, vec4ul& y, vec4ul* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint8_vec2ub(uint8 x, vec2ub& y, vec2ub* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint8_vec3ub(uint8 x, vec3ub& y, vec3ub* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_uint8_vec4ub(uint8 x, vec4ub& y, vec4ub* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat22h_float16(mat22h& x, float16 y, mat22h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat33h_float16(mat33h& x, float16 y, mat33h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat44h_float16(mat44h& x, float16 y, mat44h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_spatial_matrixh_float16(spatial_matrixh& x, float16 y, spatial_matrixh* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat22f_float32(mat22f& x, float32 y, mat22f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat33f_float32(mat33f& x, float32 y, mat33f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat44f_float32(mat44f& x, float32 y, mat44f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_spatial_matrixf_float32(spatial_matrixf& x, float32 y, spatial_matrixf* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat22d_float64(mat22d& x, float64 y, mat22d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat33d_float64(mat33d& x, float64 y, mat33d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_mat44d_float64(mat44d& x, float64 y, mat44d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_spatial_matrixd_float64(spatial_matrixd& x, float64 y, spatial_matrixd* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_mat22h(float16 x, mat22h& y, mat22h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_mat33h(float16 x, mat33h& y, mat33h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_mat44h(float16 x, mat44h& y, mat44h* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_spatial_matrixh(float16 x, spatial_matrixh& y, spatial_matrixh* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_mat22f(float32 x, mat22f& y, mat22f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_mat33f(float32 x, mat33f& y, mat33f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_mat44f(float32 x, mat44f& y, mat44f* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_spatial_matrixf(float32 x, spatial_matrixf& y, spatial_matrixf* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_mat22d(float64 x, mat22d& y, mat22d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_mat33d(float64 x, mat33d& y, mat33d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_mat44d(float64 x, mat44d& y, mat44d* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_spatial_matrixd(float64 x, spatial_matrixd& y, spatial_matrixd* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_quath_float16(quath& x, float16 y, quath* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_quatf_float32(quatf& x, float32 y, quatf* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_quatd_float64(quatd& x, float64 y, quatd* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float16_quath(float16 x, quath& y, quath* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float32_quatf(float32 x, quatf& y, quatf* ret) { *ret = wp::div(x, y); } WP_API void builtin_div_float64_quatd(float64 x, quatd& y, quatd* ret) { *ret = wp::div(x, y); } WP_API void builtin_floordiv_float16_float16(float16 x, float16 y, float16* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_float32_float32(float32 x, float32 y, float32* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_float64_float64(float64 x, float64 y, float64* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_int16_int16(int16 x, int16 y, int16* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_int32_int32(int32 x, int32 y, int32* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_int64_int64(int64 x, int64 y, int64* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_int8_int8(int8 x, int8 y, int8* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_uint16_uint16(uint16 x, uint16 y, uint16* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_uint32_uint32(uint32 x, uint32 y, uint32* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_uint64_uint64(uint64 x, uint64 y, uint64* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_floordiv_uint8_uint8(uint8 x, uint8 y, uint8* ret) { *ret = wp::floordiv(x, y); } WP_API void builtin_pos_float16(float16 x, float16* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_float32(float32 x, float32* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_float64(float64 x, float64* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_int16(int16 x, int16* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_int32(int32 x, int32* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_int64(int64 x, int64* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_int8(int8 x, int8* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_uint16(uint16 x, uint16* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_uint32(uint32 x, uint32* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_uint64(uint64 x, uint64* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_uint8(uint8 x, uint8* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2h(vec2h& x, vec2h* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3h(vec3h& x, vec3h* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4h(vec4h& x, vec4h* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_spatial_vectorh(spatial_vectorh& x, spatial_vectorh* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2f(vec2f& x, vec2f* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3f(vec3f& x, vec3f* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4f(vec4f& x, vec4f* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_spatial_vectorf(spatial_vectorf& x, spatial_vectorf* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2d(vec2d& x, vec2d* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3d(vec3d& x, vec3d* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4d(vec4d& x, vec4d* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_spatial_vectord(spatial_vectord& x, spatial_vectord* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2s(vec2s& x, vec2s* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3s(vec3s& x, vec3s* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4s(vec4s& x, vec4s* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2i(vec2i& x, vec2i* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3i(vec3i& x, vec3i* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4i(vec4i& x, vec4i* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2l(vec2l& x, vec2l* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3l(vec3l& x, vec3l* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4l(vec4l& x, vec4l* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2b(vec2b& x, vec2b* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3b(vec3b& x, vec3b* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4b(vec4b& x, vec4b* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2us(vec2us& x, vec2us* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3us(vec3us& x, vec3us* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4us(vec4us& x, vec4us* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2ui(vec2ui& x, vec2ui* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3ui(vec3ui& x, vec3ui* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4ui(vec4ui& x, vec4ui* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2ul(vec2ul& x, vec2ul* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3ul(vec3ul& x, vec3ul* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4ul(vec4ul& x, vec4ul* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec2ub(vec2ub& x, vec2ub* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec3ub(vec3ub& x, vec3ub* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_vec4ub(vec4ub& x, vec4ub* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_quath(quath& x, quath* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_quatf(quatf& x, quatf* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_quatd(quatd& x, quatd* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat22h(mat22h& x, mat22h* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat33h(mat33h& x, mat33h* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat44h(mat44h& x, mat44h* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_spatial_matrixh(spatial_matrixh& x, spatial_matrixh* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat22f(mat22f& x, mat22f* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat33f(mat33f& x, mat33f* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat44f(mat44f& x, mat44f* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_spatial_matrixf(spatial_matrixf& x, spatial_matrixf* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat22d(mat22d& x, mat22d* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat33d(mat33d& x, mat33d* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_mat44d(mat44d& x, mat44d* ret) { *ret = wp::pos(x); } WP_API void builtin_pos_spatial_matrixd(spatial_matrixd& x, spatial_matrixd* ret) { *ret = wp::pos(x); } WP_API void builtin_neg_float16(float16 x, float16* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_float32(float32 x, float32* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_float64(float64 x, float64* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_int16(int16 x, int16* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_int32(int32 x, int32* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_int64(int64 x, int64* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_int8(int8 x, int8* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_uint16(uint16 x, uint16* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_uint32(uint32 x, uint32* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_uint64(uint64 x, uint64* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_uint8(uint8 x, uint8* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2h(vec2h& x, vec2h* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3h(vec3h& x, vec3h* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4h(vec4h& x, vec4h* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_spatial_vectorh(spatial_vectorh& x, spatial_vectorh* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2f(vec2f& x, vec2f* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3f(vec3f& x, vec3f* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4f(vec4f& x, vec4f* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_spatial_vectorf(spatial_vectorf& x, spatial_vectorf* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2d(vec2d& x, vec2d* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3d(vec3d& x, vec3d* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4d(vec4d& x, vec4d* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_spatial_vectord(spatial_vectord& x, spatial_vectord* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2s(vec2s& x, vec2s* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3s(vec3s& x, vec3s* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4s(vec4s& x, vec4s* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2i(vec2i& x, vec2i* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3i(vec3i& x, vec3i* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4i(vec4i& x, vec4i* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2l(vec2l& x, vec2l* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3l(vec3l& x, vec3l* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4l(vec4l& x, vec4l* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2b(vec2b& x, vec2b* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3b(vec3b& x, vec3b* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4b(vec4b& x, vec4b* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2us(vec2us& x, vec2us* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3us(vec3us& x, vec3us* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4us(vec4us& x, vec4us* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2ui(vec2ui& x, vec2ui* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3ui(vec3ui& x, vec3ui* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4ui(vec4ui& x, vec4ui* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2ul(vec2ul& x, vec2ul* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3ul(vec3ul& x, vec3ul* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4ul(vec4ul& x, vec4ul* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec2ub(vec2ub& x, vec2ub* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec3ub(vec3ub& x, vec3ub* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_vec4ub(vec4ub& x, vec4ub* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_quath(quath& x, quath* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_quatf(quatf& x, quatf* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_quatd(quatd& x, quatd* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat22h(mat22h& x, mat22h* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat33h(mat33h& x, mat33h* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat44h(mat44h& x, mat44h* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_spatial_matrixh(spatial_matrixh& x, spatial_matrixh* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat22f(mat22f& x, mat22f* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat33f(mat33f& x, mat33f* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat44f(mat44f& x, mat44f* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_spatial_matrixf(spatial_matrixf& x, spatial_matrixf* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat22d(mat22d& x, mat22d* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat33d(mat33d& x, mat33d* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_mat44d(mat44d& x, mat44d* ret) { *ret = wp::neg(x); } WP_API void builtin_neg_spatial_matrixd(spatial_matrixd& x, spatial_matrixd* ret) { *ret = wp::neg(x); } WP_API void builtin_unot_bool(bool b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_int8(int8 b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_uint8(uint8 b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_int16(int16 b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_uint16(uint16 b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_int32(int32 b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_uint32(uint32 b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_int64(int64 b, bool* ret) { *ret = wp::unot(b); } WP_API void builtin_unot_uint64(uint64 b, bool* ret) { *ret = wp::unot(b); } } // extern "C" } // namespace wp
168,519
C
97.090803
399
0.667385
NVIDIA/warp/warp/native/volume_impl.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "volume.h" // Helper functions for cpp/cu files, not to be exposed to user kernels namespace wp { namespace volume { inline CUDA_CALLABLE pnanovdb_leaf_handle_t get_leaf(const pnanovdb_buf_t buf, const uint32_t leaf_id) { const pnanovdb_tree_handle_t tree = get_tree(buf); const uint64_t first_leaf_offset = pnanovdb_tree_get_node_offset_leaf(buf, tree); const uint32_t leaf_stride = PNANOVDB_GRID_TYPE_GET(get_grid_type(buf), leaf_size); return {pnanovdb_address_offset64(tree.address, first_leaf_offset + uint64_t(leaf_id) * leaf_stride)}; } inline CUDA_CALLABLE pnanovdb_coord_t leaf_origin(const pnanovdb_buf_t buf, const pnanovdb_leaf_handle_t leaf) { pnanovdb_coord_t origin = pnanovdb_leaf_get_bbox_min(buf, leaf); // mask out last three bits corresponding to voxel coordinates within leaf constexpr uint32_t MASK = (1u << 3u) - 1u; origin.x &= ~MASK; origin.y &= ~MASK; origin.z &= ~MASK; return origin; } inline CUDA_CALLABLE uint64_t leaf_voxel_index(const pnanovdb_buf_t buf, const uint32_t leaf_id, const pnanovdb_coord_t &ijk) { const uint32_t grid_type = get_grid_type(buf); const pnanovdb_leaf_handle_t leaf = get_leaf(buf, leaf_id); const pnanovdb_address_t value_address = pnanovdb_leaf_get_value_address(grid_type, buf, leaf, &ijk); return volume::get_grid_voxel_index(grid_type, buf, value_address, ijk) - 1; } inline CUDA_CALLABLE pnanovdb_coord_t leaf_offset_to_local_coord(uint32_t offset) { pnanovdb_coord_t coord; coord.x = (offset >> 6) & 7; coord.y = (offset >> 3) & 7; coord.z = (offset >> 0) & 7; return coord; } } // namespace volume } // namespace wp
2,176
C
34.112903
110
0.699449
NVIDIA/warp/warp/native/intersect.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "builtin.h" namespace wp { CUDA_CALLABLE inline vec3 closest_point_to_aabb(const vec3& p, const vec3& lower, const vec3& upper) { vec3 c; { float v = p[0]; if (v < lower[0]) v = lower[0]; if (v > upper[0]) v = upper[0]; c[0] = v; } { float v = p[1]; if (v < lower[1]) v = lower[1]; if (v > upper[1]) v = upper[1]; c[1] = v; } { float v = p[2]; if (v < lower[2]) v = lower[2]; if (v > upper[2]) v = upper[2]; c[2] = v; } return c; } CUDA_CALLABLE inline vec2 closest_point_to_triangle(const vec3& a, const vec3& b, const vec3& c, const vec3& p) { vec3 ab = b-a; vec3 ac = c-a; vec3 ap = p-a; float u, v, w; float d1 = dot(ab, ap); float d2 = dot(ac, ap); if (d1 <= 0.0f && d2 <= 0.0f) { v = 0.0f; w = 0.0f; u = 1.0f - v - w; return vec2(u, v); } vec3 bp = p-b; float d3 = dot(ab, bp); float d4 = dot(ac, bp); if (d3 >= 0.0f && d4 <= d3) { v = 1.0f; w = 0.0f; u = 1.0f - v - w; return vec2(u, v); } float vc = d1*d4 - d3*d2; if (vc <= 0.0f && d1 >= 0.0f && d3 <= 0.0f) { v = d1 / (d1-d3); w = 0.0f; u = 1.0f - v - w; return vec2(u, v); } vec3 cp = p-c; float d5 = dot(ab, cp); float d6 = dot(ac, cp); if (d6 >= 0.0f && d5 <= d6) { v = 0.0f; w = 1.0f; u = 1.0f - v - w; return vec2(u, v); } float vb = d5*d2 - d1*d6; if (vb <= 0.0f && d2 >= 0.0f && d6 <= 0.0f) { v = 0.0f; w = d2 / (d2 - d6); u = 1.0f - v - w; return vec2(u, v); } float va = d3*d6 - d5*d4; if (va <= 0.0f && (d4 -d3) >= 0.0f && (d5-d6) >= 0.0f) { w = (d4-d3)/((d4-d3) + (d5-d6)); v = 1.0f - w; u = 1.0f - v - w; return vec2(u, v); } float denom = 1.0f / (va + vb + vc); v = vb * denom; w = vc * denom; u = 1.0f - v - w; return vec2(u, v); } CUDA_CALLABLE inline vec2 furthest_point_to_triangle(const vec3& a, const vec3& b, const vec3& c, const vec3& p) { vec3 pa = p-a; vec3 pb = p-b; vec3 pc = p-c; float dist_a = dot(pa, pa); float dist_b = dot(pb, pb); float dist_c = dot(pc, pc); if (dist_a > dist_b && dist_a > dist_c) return vec2(1.0f, 0.0f); // a is furthest if (dist_b > dist_c) return vec2(0.0f, 1.0f); // b is furthest return vec2(0.0f, 0.0f); // c is furthest } CUDA_CALLABLE inline bool intersect_ray_aabb(const vec3& pos, const vec3& rcp_dir, const vec3& lower, const vec3& upper, float& t) { float l1, l2, lmin, lmax; l1 = (lower[0] - pos[0]) * rcp_dir[0]; l2 = (upper[0] - pos[0]) * rcp_dir[0]; lmin = min(l1,l2); lmax = max(l1,l2); l1 = (lower[1] - pos[1]) * rcp_dir[1]; l2 = (upper[1] - pos[1]) * rcp_dir[1]; lmin = max(min(l1,l2), lmin); lmax = min(max(l1,l2), lmax); l1 = (lower[2] - pos[2]) * rcp_dir[2]; l2 = (upper[2] - pos[2]) * rcp_dir[2]; lmin = max(min(l1,l2), lmin); lmax = min(max(l1,l2), lmax); bool hit = ((lmax >= 0.f) & (lmax >= lmin)); if (hit) t = lmin; return hit; } // Moller and Trumbore's method CUDA_CALLABLE inline bool intersect_ray_tri_moller(const vec3& p, const vec3& dir, const vec3& a, const vec3& b, const vec3& c, float& t, float& u, float& v, float& w, float& sign, vec3* normal) { vec3 ab = b - a; vec3 ac = c - a; vec3 n = cross(ab, ac); float d = dot(-dir, n); float ood = 1.0f / d; // No need to check for division by zero here as infinity arithmetic will save us... vec3 ap = p - a; t = dot(ap, n) * ood; if (t < 0.0f) return false; vec3 e = cross(-dir, ap); v = dot(ac, e) * ood; if (v < 0.0f || v > 1.0f) // ...here... return false; w = -dot(ab, e) * ood; if (w < 0.0f || (v + w) > 1.0f) // ...and here return false; u = 1.0f - v - w; if (normal) *normal = n; sign = d; return true; } CUDA_CALLABLE inline bool intersect_ray_tri_rtcd(const vec3& p, const vec3& dir, const vec3& a, const vec3& b, const vec3& c, float& t, float& u, float& v, float& w, float& sign, vec3* normal) { const vec3 ab = b-a; const vec3 ac = c-a; // calculate normal vec3 n = cross(ab, ac); // need to solve a system of three equations to give t, u, v float d = dot(-dir, n); // if dir is parallel to triangle plane or points away from triangle if (d <= 0.0f) return false; vec3 ap = p-a; t = dot(ap, n); // ignores tris behind if (t < 0.0f) return false; // compute barycentric coordinates vec3 e = cross(-dir, ap); v = dot(ac, e); if (v < 0.0f || v > d) return false; w = -dot(ab, e); if (w < 0.0f || v + w > d) return false; float ood = 1.0f / d; t *= ood; v *= ood; w *= ood; u = 1.0f-v-w; // optionally write out normal (todo: this branch is a performance concern, should probably remove) if (normal) *normal = n; return true; } #ifndef __CUDA_ARCH__ // these are provided as built-ins by CUDA inline float __int_as_float(int i) { return *(float*)(&i); } inline int __float_as_int(float f) { return *(int*)(&f); } #endif CUDA_CALLABLE inline float xorf(float x, int y) { return __int_as_float(__float_as_int(x) ^ y); } CUDA_CALLABLE inline int sign_mask(float x) { return __float_as_int(x) & 0x80000000; } CUDA_CALLABLE inline int max_dim(vec3 a) { float x = abs(a[0]); float y = abs(a[1]); float z = abs(a[2]); return longest_axis(vec3(x, y, z)); } // computes the difference of products a*b - c*d using // FMA instructions for improved numerical precision CUDA_CALLABLE inline float diff_product(float a, float b, float c, float d) { float cd = c * d; float diff = fmaf(a, b, -cd); float error = fmaf(-c, d, cd); return diff + error; } // http://jcgt.org/published/0002/01/05/ CUDA_CALLABLE inline bool intersect_ray_tri_woop(const vec3& p, const vec3& dir, const vec3& a, const vec3& b, const vec3& c, float& t, float& u, float& v, float& sign, vec3* normal) { // todo: precompute for ray int kz = max_dim(dir); int kx = kz+1; if (kx == 3) kx = 0; int ky = kx+1; if (ky == 3) ky = 0; if (dir[kz] < 0.0f) { float tmp = kx; kx = ky; ky = tmp; } float Sx = dir[kx]/dir[kz]; float Sy = dir[ky]/dir[kz]; float Sz = 1.0f/dir[kz]; // todo: end precompute const vec3 A = a-p; const vec3 B = b-p; const vec3 C = c-p; const float Ax = A[kx] - Sx*A[kz]; const float Ay = A[ky] - Sy*A[kz]; const float Bx = B[kx] - Sx*B[kz]; const float By = B[ky] - Sy*B[kz]; const float Cx = C[kx] - Sx*C[kz]; const float Cy = C[ky] - Sy*C[kz]; float U = diff_product(Cx, By, Cy, Bx); float V = diff_product(Ax, Cy, Ay, Cx); float W = diff_product(Bx, Ay, By, Ax); if (U == 0.0f || V == 0.0f || W == 0.0f) { double CxBy = (double)Cx*(double)By; double CyBx = (double)Cy*(double)Bx; U = (float)(CxBy - CyBx); double AxCy = (double)Ax*(double)Cy; double AyCx = (double)Ay*(double)Cx; V = (float)(AxCy - AyCx); double BxAy = (double)Bx*(double)Ay; double ByAx = (double)By*(double)Ax; W = (float)(BxAy - ByAx); } if ((U<0.0f || V<0.0f || W<0.0f) && (U>0.0f || V>0.0f || W>0.0f)) { return false; } float det = U+V+W; if (det == 0.0f) { return false; } const float Az = Sz*A[kz]; const float Bz = Sz*B[kz]; const float Cz = Sz*C[kz]; const float T = U*Az + V*Bz + W*Cz; int det_sign = sign_mask(det); if (xorf(T,det_sign) < 0.0f)// || xorf(T,det_sign) > hit.t * xorf(det, det_sign)) // early out if hit.t is specified { return false; } const float rcpDet = 1.0f/det; u = U*rcpDet; v = V*rcpDet; t = T*rcpDet; sign = det; // optionally write out normal (todo: this branch is a performance concern, should probably remove) if (normal) { const vec3 ab = b-a; const vec3 ac = c-a; // calculate normal *normal = cross(ab, ac); } return true; } CUDA_CALLABLE inline void adj_intersect_ray_tri_woop( const vec3& p, const vec3& dir, const vec3& a, const vec3& b, const vec3& c, float t, float u, float v, float sign, const vec3& normal, vec3& adj_p, vec3& adj_dir, vec3& adj_a, vec3& adj_b, vec3& adj_c, float& adj_t, float& adj_u, float& adj_v, float& adj_sign, vec3& adj_normal, bool& adj_ret) { // todo: precompute for ray int kz = max_dim(dir); int kx = kz+1; if (kx == 3) kx = 0; int ky = kx+1; if (ky == 3) ky = 0; if (dir[kz] < 0.0f) { float tmp = kx; kx = ky; ky = tmp; } const float Dx = dir[kx]; const float Dy = dir[ky]; const float Dz = dir[kz]; const float Sx = dir[kx]/dir[kz]; const float Sy = dir[ky]/dir[kz]; const float Sz = 1.0f/dir[kz]; // todo: end precompute const vec3 A = a-p; const vec3 B = b-p; const vec3 C = c-p; const float Ax = A[kx] - Sx*A[kz]; const float Ay = A[ky] - Sy*A[kz]; const float Bx = B[kx] - Sx*B[kz]; const float By = B[ky] - Sy*B[kz]; const float Cx = C[kx] - Sx*C[kz]; const float Cy = C[ky] - Sy*C[kz]; float U = Cx*By - Cy*Bx; float V = Ax*Cy - Ay*Cx; float W = Bx*Ay - By*Ax; if (U == 0.0f || V == 0.0f || W == 0.0f) { double CxBy = (double)Cx*(double)By; double CyBx = (double)Cy*(double)Bx; U = (float)(CxBy - CyBx); double AxCy = (double)Ax*(double)Cy; double AyCx = (double)Ay*(double)Cx; V = (float)(AxCy - AyCx); double BxAy = (double)Bx*(double)Ay; double ByAx = (double)By*(double)Ax; W = (float)(BxAy - ByAx); } if ((U<0.0f || V<0.0f || W<0.0f) && (U>0.0f || V>0.0f || W>0.0f)) return; float det = U+V+W; if (det == 0.0f) return; const float Az = Sz*A[kz]; const float Bz = Sz*B[kz]; const float Cz = Sz*C[kz]; const float T = U*Az + V*Bz + W*Cz; int det_sign = sign_mask(det); if (xorf(T,det_sign) < 0.0f)// || xorf(T,det_sign) > hit.t * xorf(det, det_sign)) // early out if hit.t is specified return; const float rcpDet = (1.f / det); const float rcpDetSq = rcpDet * rcpDet; // adj_p const float dAx_dpx = -1.f; const float dBx_dpx = -1.f; const float dCx_dpx = -1.f; const float dAy_dpx = 0.f; const float dBy_dpx = 0.f; const float dCy_dpx = 0.f; const float dAz_dpx = 0.f; const float dBz_dpx = 0.f; const float dCz_dpx = 0.f; const float dAx_dpy = 0.f; const float dBx_dpy = 0.f; const float dCx_dpy = 0.f; const float dAy_dpy = -1.f; const float dBy_dpy = -1.f; const float dCy_dpy = -1.f; const float dAz_dpy = 0.f; const float dBz_dpy = 0.f; const float dCz_dpy = 0.f; const float dAx_dpz = Sx; const float dBx_dpz = Sx; const float dCx_dpz = Sx; const float dAy_dpz = Sy; const float dBy_dpz = Sy; const float dCy_dpz = Sy; const float dAz_dpz = -Sz; const float dBz_dpz = -Sz; const float dCz_dpz = -Sz; const float dU_dpx = Cx * dBy_dpx + By * dCx_dpx - Cy * dBx_dpx - Bx * dCy_dpx; const float dU_dpy = Cx * dBy_dpy + By * dCx_dpy - Cy * dBx_dpy - Bx * dCy_dpy; const float dU_dpz = Cx * dBy_dpz + By * dCx_dpz - Cy * dBx_dpz - Bx * dCy_dpz; const vec3 dU_dp = vec3(dU_dpx, dU_dpy, dU_dpz); const float dV_dpx = Ax * dCy_dpx + Cy * dAx_dpx - Ay * dCx_dpx - Cx * dAy_dpx; const float dV_dpy = Ax * dCy_dpy + Cy * dAx_dpy - Ay * dCx_dpy - Cx * dAy_dpy; const float dV_dpz = Ax * dCy_dpz + Cy * dAx_dpz - Ay * dCx_dpz - Cx * dAy_dpz; const vec3 dV_dp = vec3(dV_dpx, dV_dpy, dV_dpz); const float dW_dpx = Bx * dAy_dpx + Ay * dBx_dpx - By * dAx_dpx - Ax * dBy_dpx; const float dW_dpy = Bx * dAy_dpy + Ay * dBx_dpy - By * dAx_dpy - Ax * dBy_dpy; const float dW_dpz = Bx * dAy_dpz + Ay * dBx_dpz - By * dAx_dpz - Ax * dBy_dpz; const vec3 dW_dp = vec3(dW_dpx, dW_dpy, dW_dpz); const float dT_dpx = dU_dpx * Az + U * dAz_dpx + dV_dpx * Bz + V * dBz_dpx + dW_dpx * Cz + W * dCz_dpx; const float dT_dpy = dU_dpy * Az + U * dAz_dpy + dV_dpy * Bz + V * dBz_dpy + dW_dpy * Cz + W * dCz_dpy; const float dT_dpz = dU_dpz * Az + U * dAz_dpz + dV_dpz * Bz + V * dBz_dpz + dW_dpz * Cz + W * dCz_dpz; const vec3 dT_dp = vec3(dT_dpx, dT_dpy, dT_dpz); const float dDet_dpx = dU_dpx + dV_dpx + dW_dpx; const float dDet_dpy = dU_dpy + dV_dpy + dW_dpy; const float dDet_dpz = dU_dpz + dV_dpz + dW_dpz; const vec3 dDet_dp = vec3(dDet_dpx, dDet_dpy, dDet_dpz); const vec3 du_dp = rcpDet * dU_dp + -U * rcpDetSq * dDet_dp; const vec3 dv_dp = rcpDet * dV_dp + -V * rcpDetSq * dDet_dp; const vec3 dt_dp = rcpDet * dT_dp + -T * rcpDetSq * dDet_dp; vec3 adj_p_swapped = adj_u*du_dp + adj_v*dv_dp + adj_t*dt_dp; adj_p[kx] += adj_p_swapped[0]; adj_p[ky] += adj_p_swapped[1]; adj_p[kz] += adj_p_swapped[2]; // adj_dir const float dAx_dDx = -Sz * A[kz]; const float dBx_dDx = -Sz * B[kz]; const float dCx_dDx = -Sz * C[kz]; const float dAy_dDx = 0.f; const float dBy_dDx = 0.f; const float dCy_dDx = 0.f; const float dAz_dDx = 0.f; const float dBz_dDx = 0.f; const float dCz_dDx = 0.f; const float dAx_dDy = 0.f; const float dBx_dDy = 0.f; const float dCx_dDy = 0.f; const float dAy_dDy = -Sz * A[kz]; const float dBy_dDy = -Sz * B[kz]; const float dCy_dDy = -Sz * C[kz]; const float dAz_dDy = 0.f; const float dBz_dDy = 0.f; const float dCz_dDy = 0.f; const float dAx_dDz = Dx * Sz * Sz * A[kz]; const float dBx_dDz = Dx * Sz * Sz * B[kz]; const float dCx_dDz = Dx * Sz * Sz * C[kz]; const float dAy_dDz = Dy * Sz * Sz * A[kz]; const float dBy_dDz = Dy * Sz * Sz * B[kz]; const float dCy_dDz = Dy * Sz * Sz * C[kz]; const float dAz_dDz = -Sz * Sz * A[kz]; const float dBz_dDz = -Sz * Sz * B[kz]; const float dCz_dDz = -Sz * Sz * C[kz]; const float dU_dDx = Cx * dBy_dDx + By * dCx_dDx - Cy * dBx_dDx - Bx * dCy_dDx; const float dU_dDy = Cx * dBy_dDy + By * dCx_dDy - Cy * dBx_dDy - Bx * dCy_dDy; const float dU_dDz = Cx * dBy_dDz + By * dCx_dDz - Cy * dBx_dDz - Bx * dCy_dDz; const vec3 dU_dD = vec3(dU_dDx, dU_dDy, dU_dDz); const float dV_dDx = Ax * dCy_dDx + Cy * dAx_dDx - Ay * dCx_dDx - Cx * dAy_dDx; const float dV_dDy = Ax * dCy_dDy + Cy * dAx_dDy - Ay * dCx_dDy - Cx * dAy_dDy; const float dV_dDz = Ax * dCy_dDz + Cy * dAx_dDz - Ay * dCx_dDz - Cx * dAy_dDz; const vec3 dV_dD = vec3(dV_dDx, dV_dDy, dV_dDz); const float dW_dDx = Bx * dAy_dDx + Ay * dBx_dDx - By * dAx_dDx - Ax * dBy_dDx; const float dW_dDy = Bx * dAy_dDy + Ay * dBx_dDy - By * dAx_dDy - Ax * dBy_dDy; const float dW_dDz = Bx * dAy_dDz + Ay * dBx_dDz - By * dAx_dDz - Ax * dBy_dDz; const vec3 dW_dD = vec3(dW_dDx, dW_dDy, dW_dDz); const float dT_dDx = dU_dDx * Az + U * dAz_dDx + dV_dDx * Bz + V * dBz_dDx + dW_dDx * Cz + W * dCz_dDx; const float dT_dDy = dU_dDy * Az + U * dAz_dDy + dV_dDy * Bz + V * dBz_dDy + dW_dDy * Cz + W * dCz_dDy; const float dT_dDz = dU_dDz * Az + U * dAz_dDz + dV_dDz * Bz + V * dBz_dDz + dW_dDz * Cz + W * dCz_dDz; const vec3 dT_dD = vec3(dT_dDx, dT_dDy, dT_dDz); const float dDet_dDx = dU_dDx + dV_dDx + dW_dDx; const float dDet_dDy = dU_dDy + dV_dDy + dW_dDy; const float dDet_dDz = dU_dDz + dV_dDz + dW_dDz; const vec3 dDet_dD = vec3(dDet_dDx, dDet_dDy, dDet_dDz); const vec3 du_dD = rcpDet * dU_dD + -U * rcpDetSq * dDet_dD; const vec3 dv_dD = rcpDet * dV_dD + -V * rcpDetSq * dDet_dD; const vec3 dt_dD = rcpDet * dT_dD + -T * rcpDetSq * dDet_dD; vec3 adj_dir_swapped = adj_u*du_dD + adj_v*dv_dD + adj_t*dt_dD; adj_dir[kx] += adj_dir_swapped[0]; adj_dir[ky] += adj_dir_swapped[1]; adj_dir[kz] += adj_dir_swapped[2]; } // Möller's method #include "intersect_tri.h" CUDA_CALLABLE inline int intersect_tri_tri( vec3& v0, vec3& v1, vec3& v2, vec3& u0, vec3& u1, vec3& u2) { return NoDivTriTriIsect(&v0[0], &v1[0], &v2[0], &u0[0], &u1[0], &u2[0]); } CUDA_CALLABLE inline void adj_intersect_tri_tri(const vec3& var_v0, const vec3& var_v1, const vec3& var_v2, const vec3& var_u0, const vec3& var_u1, const vec3& var_u2, vec3& adj_v0, vec3& adj_v1, vec3& adj_v2, vec3& adj_u0, vec3& adj_u1, vec3& adj_u2, int adj_ret) {} CUDA_CALLABLE inline void adj_closest_point_to_triangle( const vec3& var_a, const vec3& var_b, const vec3& var_c, const vec3& var_p, vec3& adj_a, vec3& adj_b, vec3& adj_c, vec3& adj_p, vec2& adj_ret) { // primal vars vec3 var_0; vec3 var_1; vec3 var_2; float32 var_3; float32 var_4; const float32 var_5 = 0.0; bool var_6; bool var_7; bool var_8; const float32 var_9 = 1.0; vec2 var_10; vec3 var_11; float32 var_12; float32 var_13; bool var_14; bool var_15; bool var_16; vec2 var_17; vec2 var_18; float32 var_19; float32 var_20; float32 var_21; float32 var_22; float32 var_23; bool var_24; bool var_25; bool var_26; bool var_27; float32 var_28; vec2 var_29; vec2 var_30; vec3 var_31; float32 var_32; float32 var_33; bool var_34; bool var_35; bool var_36; vec2 var_37; vec2 var_38; float32 var_39; float32 var_40; float32 var_41; float32 var_42; float32 var_43; bool var_44; bool var_45; bool var_46; bool var_47; float32 var_48; vec2 var_49; vec2 var_50; float32 var_51; float32 var_52; float32 var_53; float32 var_54; float32 var_55; float32 var_56; float32 var_57; float32 var_58; bool var_59; float32 var_60; bool var_61; float32 var_62; bool var_63; bool var_64; float32 var_65; vec2 var_66; // vec2 var_67; float32 var_68; float32 var_69; float32 var_70; float32 var_71; float32 var_72; float32 var_73; float32 var_74; // vec2 var_75; //--------- // dual vars vec3 adj_0 = 0; vec3 adj_1 = 0; vec3 adj_2 = 0; float32 adj_3 = 0; float32 adj_4 = 0; float32 adj_5 = 0; //bool adj_6 = 0; //bool adj_7 = 0; //bool adj_8 = 0; float32 adj_9 = 0; vec2 adj_10 = 0; vec3 adj_11 = 0; float32 adj_12 = 0; float32 adj_13 = 0; //bool adj_14 = 0; //bool adj_15 = 0; bool adj_16 = 0; vec2 adj_17 = 0; vec2 adj_18 = 0; float32 adj_19 = 0; float32 adj_20 = 0; float32 adj_21 = 0; float32 adj_22 = 0; float32 adj_23 = 0; //bool adj_24 = 0; //bool adj_25 = 0; //bool adj_26 = 0; bool adj_27 = 0; float32 adj_28 = 0; vec2 adj_29 = 0; vec2 adj_30 = 0; vec3 adj_31 = 0; float32 adj_32 = 0; float32 adj_33 = 0; //bool adj_34 = 0; //bool adj_35 = 0; bool adj_36 = 0; vec2 adj_37 = 0; vec2 adj_38 = 0; float32 adj_39 = 0; float32 adj_40 = 0; float32 adj_41 = 0; float32 adj_42 = 0; float32 adj_43 = 0; //bool adj_44 = 0; //bool adj_45 = 0; //bool adj_46 = 0; bool adj_47 = 0; float32 adj_48 = 0; vec2 adj_49 = 0; vec2 adj_50 = 0; float32 adj_51 = 0; float32 adj_52 = 0; float32 adj_53 = 0; float32 adj_54 = 0; float32 adj_55 = 0; float32 adj_56 = 0; float32 adj_57 = 0; float32 adj_58 = 0; //bool adj_59 = 0; float32 adj_60 = 0; //bool adj_61 = 0; float32 adj_62 = 0; //bool adj_63 = 0; bool adj_64 = 0; float32 adj_65 = 0; vec2 adj_66 = 0; vec2 adj_67 = 0; float32 adj_68 = 0; float32 adj_69 = 0; float32 adj_70 = 0; float32 adj_71 = 0; float32 adj_72 = 0; float32 adj_73 = 0; float32 adj_74 = 0; vec2 adj_75 = 0; //--------- // forward var_0 = wp::sub(var_b, var_a); var_1 = wp::sub(var_c, var_a); var_2 = wp::sub(var_p, var_a); var_3 = wp::dot(var_0, var_2); var_4 = wp::dot(var_1, var_2); var_6 = (var_3 <= var_5); var_7 = (var_4 <= var_5); var_8 = var_6 && var_7; if (var_8) { var_10 = wp::vec2(var_9, var_5); goto label0; } var_11 = wp::sub(var_p, var_b); var_12 = wp::dot(var_0, var_11); var_13 = wp::dot(var_1, var_11); var_14 = (var_12 >= var_5); var_15 = (var_13 <= var_12); var_16 = var_14 && var_15; if (var_16) { var_17 = wp::vec2(var_5, var_9); goto label1; } var_18 = wp::select(var_16, var_10, var_17); var_19 = wp::mul(var_3, var_13); var_20 = wp::mul(var_12, var_4); var_21 = wp::sub(var_19, var_20); var_22 = wp::sub(var_3, var_12); var_23 = wp::div(var_3, var_22); var_24 = (var_21 <= var_5); var_25 = (var_3 >= var_5); var_26 = (var_12 <= var_5); var_27 = var_24 && var_25 && var_26; if (var_27) { var_28 = wp::sub(var_9, var_23); var_29 = wp::vec2(var_28, var_23); goto label2; } var_30 = wp::select(var_27, var_18, var_29); var_31 = wp::sub(var_p, var_c); var_32 = wp::dot(var_0, var_31); var_33 = wp::dot(var_1, var_31); var_34 = (var_33 >= var_5); var_35 = (var_32 <= var_33); var_36 = var_34 && var_35; if (var_36) { var_37 = wp::vec2(var_5, var_5); goto label3; } var_38 = wp::select(var_36, var_30, var_37); var_39 = wp::mul(var_32, var_4); var_40 = wp::mul(var_3, var_33); var_41 = wp::sub(var_39, var_40); var_42 = wp::sub(var_4, var_33); var_43 = wp::div(var_4, var_42); var_44 = (var_41 <= var_5); var_45 = (var_4 >= var_5); var_46 = (var_33 <= var_5); var_47 = var_44 && var_45 && var_46; if (var_47) { var_48 = wp::sub(var_9, var_43); var_49 = wp::vec2(var_48, var_5); goto label4; } var_50 = wp::select(var_47, var_38, var_49); var_51 = wp::mul(var_12, var_33); var_52 = wp::mul(var_32, var_13); var_53 = wp::sub(var_51, var_52); var_54 = wp::sub(var_13, var_12); var_55 = wp::sub(var_13, var_12); var_56 = wp::sub(var_32, var_33); var_57 = wp::add(var_55, var_56); var_58 = wp::div(var_54, var_57); var_59 = (var_53 <= var_5); var_60 = wp::sub(var_13, var_12); var_61 = (var_60 >= var_5); var_62 = wp::sub(var_32, var_33); var_63 = (var_62 >= var_5); var_64 = var_59 && var_61 && var_63; if (var_64) { var_65 = wp::sub(var_9, var_58); var_66 = wp::vec2(var_5, var_65); goto label5; } // var_67 = wp::select(var_64, var_50, var_66); var_68 = wp::add(var_53, var_41); var_69 = wp::add(var_68, var_21); var_70 = wp::div(var_9, var_69); var_71 = wp::mul(var_41, var_70); var_72 = wp::mul(var_21, var_70); var_73 = wp::sub(var_9, var_71); var_74 = wp::sub(var_73, var_72); // var_75 = wp::vec2(var_74, var_71); goto label6; //--------- // reverse label6:; adj_75 += adj_ret; wp::adj_vec2(var_74, var_71, adj_74, adj_71, adj_75); wp::adj_sub(var_73, var_72, adj_73, adj_72, adj_74); wp::adj_sub(var_9, var_71, adj_9, adj_71, adj_73); wp::adj_mul(var_21, var_70, adj_21, adj_70, adj_72); wp::adj_mul(var_41, var_70, adj_41, adj_70, adj_71); wp::adj_div(var_9, var_69, var_70, adj_9, adj_69, adj_70); wp::adj_add(var_68, var_21, adj_68, adj_21, adj_69); wp::adj_add(var_53, var_41, adj_53, adj_41, adj_68); wp::adj_select(var_64, var_50, var_66, adj_64, adj_50, adj_66, adj_67); if (var_64) { label5:; adj_66 += adj_ret; wp::adj_vec2(var_5, var_65, adj_5, adj_65, adj_66); wp::adj_sub(var_9, var_58, adj_9, adj_58, adj_65); } wp::adj_sub(var_32, var_33, adj_32, adj_33, adj_62); wp::adj_sub(var_13, var_12, adj_13, adj_12, adj_60); wp::adj_div(var_54, var_57, var_58, adj_54, adj_57, adj_58); wp::adj_add(var_55, var_56, adj_55, adj_56, adj_57); wp::adj_sub(var_32, var_33, adj_32, adj_33, adj_56); wp::adj_sub(var_13, var_12, adj_13, adj_12, adj_55); wp::adj_sub(var_13, var_12, adj_13, adj_12, adj_54); wp::adj_sub(var_51, var_52, adj_51, adj_52, adj_53); wp::adj_mul(var_32, var_13, adj_32, adj_13, adj_52); wp::adj_mul(var_12, var_33, adj_12, adj_33, adj_51); wp::adj_select(var_47, var_38, var_49, adj_47, adj_38, adj_49, adj_50); if (var_47) { label4:; adj_49 += adj_ret; wp::adj_vec2(var_48, var_5, adj_48, adj_5, adj_49); wp::adj_sub(var_9, var_43, adj_9, adj_43, adj_48); } wp::adj_div(var_4, var_42, var_43, adj_4, adj_42, adj_43); wp::adj_sub(var_4, var_33, adj_4, adj_33, adj_42); wp::adj_sub(var_39, var_40, adj_39, adj_40, adj_41); wp::adj_mul(var_3, var_33, adj_3, adj_33, adj_40); wp::adj_mul(var_32, var_4, adj_32, adj_4, adj_39); wp::adj_select(var_36, var_30, var_37, adj_36, adj_30, adj_37, adj_38); if (var_36) { label3:; adj_37 += adj_ret; wp::adj_vec2(var_5, var_5, adj_5, adj_5, adj_37); } wp::adj_dot(var_1, var_31, adj_1, adj_31, adj_33); wp::adj_dot(var_0, var_31, adj_0, adj_31, adj_32); wp::adj_sub(var_p, var_c, adj_p, adj_c, adj_31); wp::adj_select(var_27, var_18, var_29, adj_27, adj_18, adj_29, adj_30); if (var_27) { label2:; adj_29 += adj_ret; wp::adj_vec2(var_28, var_23, adj_28, adj_23, adj_29); wp::adj_sub(var_9, var_23, adj_9, adj_23, adj_28); } wp::adj_div(var_3, var_22, var_23, adj_3, adj_22, adj_23); wp::adj_sub(var_3, var_12, adj_3, adj_12, adj_22); wp::adj_sub(var_19, var_20, adj_19, adj_20, adj_21); wp::adj_mul(var_12, var_4, adj_12, adj_4, adj_20); wp::adj_mul(var_3, var_13, adj_3, adj_13, adj_19); wp::adj_select(var_16, var_10, var_17, adj_16, adj_10, adj_17, adj_18); if (var_16) { label1:; adj_17 += adj_ret; wp::adj_vec2(var_5, var_9, adj_5, adj_9, adj_17); } wp::adj_dot(var_1, var_11, adj_1, adj_11, adj_13); wp::adj_dot(var_0, var_11, adj_0, adj_11, adj_12); wp::adj_sub(var_p, var_b, adj_p, adj_b, adj_11); if (var_8) { label0:; adj_10 += adj_ret; wp::adj_vec2(var_9, var_5, adj_9, adj_5, adj_10); } wp::adj_dot(var_1, var_2, adj_1, adj_2, adj_4); wp::adj_dot(var_0, var_2, adj_0, adj_2, adj_3); wp::adj_sub(var_p, var_a, adj_p, adj_a, adj_2); wp::adj_sub(var_c, var_a, adj_c, adj_a, adj_1); wp::adj_sub(var_b, var_a, adj_b, adj_a, adj_0); return; } // ---------------------------------------------------------------- // jleaf: I needed to replace "float(" with "cast_float(" manually below because // "#define float(x) cast_float(x)"" in this header affects other files. // See adjoint in "intersect_adj.h" for the generated adjoint. /* Here is the original warp implementation that was used to generate this code: # https://books.google.ca/books?id=WGpL6Sk9qNAC&printsec=frontcover&hl=en#v=onepage&q=triangle&f=false # From 5.1.9 # p1 and q1 are points of edge 1. # p2 and q2 are points of edge 2. # epsilon zero tolerance for determining if points in an edge are degenerate # output: A single wp.vec3, containing s and t for edges 1 and 2 respectively, # and the distance between their closest points. @wp.func def closest_point_edge_edge( p1: wp.vec3, q1: wp.vec3, p2: wp.vec3, q2: wp.vec3, epsilon: float ): # direction vectors of each segment/edge d1 = q1 - p1 d2 = q2 - p2 r = p1 - p2 a = wp.dot(d1, d1) # squared length of segment s1, always nonnegative e = wp.dot(d2, d2) # squared length of segment s2, always nonnegative f = wp.dot(d2, r) s = float(0.0) t = float(0.0) dist = wp.length(p2 - p1) # Check if either or both segments degenerate into points if a <= epsilon and e <= epsilon: # both segments degenerate into points return wp.vec3(s, t, dist) if a <= epsilon: s = float(0.0) t = float(f / e) # s = 0 => t = (b*s + f) / e = f / e else: c = wp.dot(d1, r) if e <= epsilon: # second segment generates into a point s = wp.clamp(-c / a, 0.0, 1.0) # t = 0 => s = (b*t-c)/a = -c/a t = float(0.0) else: # The general nondegenerate case starts here b = wp.dot(d1, d2) denom = a * e - b * b # always nonnegative # if segments not parallel, compute closest point on L1 to L2 and # clamp to segment S1. Else pick arbitrary s (here 0) if denom != 0.0: s = wp.clamp((b * f - c * e) / denom, 0.0, 1.0) else: s = 0.0 # compute point on L2 closest to S1(s) using # t = dot((p1+d2*s) - p2,d2)/dot(d2,d2) = (b*s+f)/e t = (b * s + f) / e # if t in [0,1] done. Else clamp t, recompute s for the new value # of t using s = dot((p2+d2*t-p1,d1)/dot(d1,d1) = (t*b - c)/a # and clamp s to [0,1] if t < 0.0: t = 0.0 s = wp.clamp(-c / a, 0.0, 1.0) elif t > 1.0: t = 1.0 s = wp.clamp((b - c) / a, 0.0, 1.0) c1 = p1 + (q1 - p1) * s c2 = p2 + (q2 - p2) * t dist = wp.length(c2 - c1) return wp.vec3(s, t, dist) */ static CUDA_CALLABLE vec3 closest_point_edge_edge(vec3 var_p1, vec3 var_q1, vec3 var_p2, vec3 var_q2, float32 var_epsilon) { //--------- // primal vars vec3 var_0; vec3 var_1; vec3 var_2; float32 var_3; float32 var_4; float32 var_5; const float32 var_6 = 0.0; float32 var_7; float32 var_8; vec3 var_9; float32 var_10; bool var_11; bool var_12; bool var_13; vec3 var_14; bool var_15; float32 var_16; float32 var_17; float32 var_18; float32 var_19; float32 var_20; float32 var_21; bool var_22; float32 var_23; float32 var_24; const float32 var_25 = 1.0; float32 var_26; float32 var_27; float32 var_28; float32 var_29; float32 var_30; float32 var_31; float32 var_32; float32 var_33; bool var_34; float32 var_35; float32 var_36; float32 var_37; float32 var_38; float32 var_39; float32 var_40; float32 var_41; float32 var_42; float32 var_43; float32 var_44; bool var_45; float32 var_46; float32 var_47; float32 var_48; float32 var_49; float32 var_50; bool var_51; float32 var_52; float32 var_53; float32 var_54; float32 var_55; float32 var_56; float32 var_57; float32 var_58; float32 var_59; float32 var_60; float32 var_61; float32 var_62; vec3 var_63; vec3 var_64; vec3 var_65; vec3 var_66; vec3 var_67; vec3 var_68; vec3 var_69; float32 var_70; vec3 var_71; //--------- // forward var_0 = wp::sub(var_q1, var_p1); var_1 = wp::sub(var_q2, var_p2); var_2 = wp::sub(var_p1, var_p2); var_3 = wp::dot(var_0, var_0); var_4 = wp::dot(var_1, var_1); var_5 = wp::dot(var_1, var_2); var_7 = wp::cast_float(var_6); var_8 = wp::cast_float(var_6); var_9 = wp::sub(var_p2, var_p1); var_10 = wp::length(var_9); var_11 = (var_3 <= var_epsilon); var_12 = (var_4 <= var_epsilon); var_13 = var_11 && var_12; if (var_13) { var_14 = wp::vec3(var_7, var_8, var_10); return var_14; } var_15 = (var_3 <= var_epsilon); if (var_15) { var_16 = wp::cast_float(var_6); var_17 = wp::div(var_5, var_4); var_18 = wp::cast_float(var_17); } var_19 = wp::select(var_15, var_7, var_16); var_20 = wp::select(var_15, var_8, var_18); if (!var_15) { var_21 = wp::dot(var_0, var_2); var_22 = (var_4 <= var_epsilon); if (var_22) { var_23 = wp::neg(var_21); var_24 = wp::div(var_23, var_3); var_26 = wp::clamp(var_24, var_6, var_25); var_27 = wp::cast_float(var_6); } var_28 = wp::select(var_22, var_19, var_26); var_29 = wp::select(var_22, var_20, var_27); if (!var_22) { var_30 = wp::dot(var_0, var_1); var_31 = wp::mul(var_3, var_4); var_32 = wp::mul(var_30, var_30); var_33 = wp::sub(var_31, var_32); var_34 = (var_33 != var_6); if (var_34) { var_35 = wp::mul(var_30, var_5); var_36 = wp::mul(var_21, var_4); var_37 = wp::sub(var_35, var_36); var_38 = wp::div(var_37, var_33); var_39 = wp::clamp(var_38, var_6, var_25); } var_40 = wp::select(var_34, var_28, var_39); if (!var_34) { } var_41 = wp::select(var_34, var_6, var_40); var_42 = wp::mul(var_30, var_41); var_43 = wp::add(var_42, var_5); var_44 = wp::div(var_43, var_4); var_45 = (var_44 < var_6); if (var_45) { var_46 = wp::neg(var_21); var_47 = wp::div(var_46, var_3); var_48 = wp::clamp(var_47, var_6, var_25); } var_49 = wp::select(var_45, var_41, var_48); var_50 = wp::select(var_45, var_44, var_6); if (!var_45) { var_51 = (var_50 > var_25); if (var_51) { var_52 = wp::sub(var_30, var_21); var_53 = wp::div(var_52, var_3); var_54 = wp::clamp(var_53, var_6, var_25); } var_55 = wp::select(var_51, var_49, var_54); var_56 = wp::select(var_51, var_50, var_25); } var_57 = wp::select(var_45, var_55, var_49); var_58 = wp::select(var_45, var_56, var_50); } var_59 = wp::select(var_22, var_57, var_28); var_60 = wp::select(var_22, var_58, var_29); } var_61 = wp::select(var_15, var_59, var_19); var_62 = wp::select(var_15, var_60, var_20); var_63 = wp::sub(var_q1, var_p1); var_64 = wp::mul(var_63, var_61); var_65 = wp::add(var_p1, var_64); var_66 = wp::sub(var_q2, var_p2); var_67 = wp::mul(var_66, var_62); var_68 = wp::add(var_p2, var_67); var_69 = wp::sub(var_68, var_65); var_70 = wp::length(var_69); var_71 = wp::vec3(var_61, var_62, var_70); return var_71; } } // namespace wp
34,135
C
27.328631
194
0.542669
NVIDIA/warp/warp/native/cuda_util.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "builtin.h" #if WP_ENABLE_CUDA #include <cudaTypedefs.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <vector> #define check_cuda(code) (check_cuda_result(code, __FUNCTION__, __FILE__, __LINE__)) #define check_cu(code) (check_cu_result(code, __FUNCTION__, __FILE__, __LINE__)) #if defined(__CUDACC__) #if _DEBUG // helper for launching kernels (synchronize + error checking after each kernel) #define wp_launch_device(context, kernel, dim, args) { \ if (dim) { \ ContextGuard guard(context); \ cudaStream_t stream = (cudaStream_t)cuda_stream_get_current(); \ const int num_threads = 256; \ const int num_blocks = (dim+num_threads-1)/num_threads; \ begin_cuda_range(WP_TIMING_KERNEL_BUILTIN, stream, context, #kernel); \ kernel<<<num_blocks, 256, 0, stream>>>args; \ check_cuda(cuda_context_check(WP_CURRENT_CONTEXT)); \ end_cuda_range(WP_TIMING_KERNEL_BUILTIN, stream); }} #else // helper for launching kernels (no error checking) #define wp_launch_device(context, kernel, dim, args) { \ if (dim) { \ ContextGuard guard(context); \ cudaStream_t stream = (cudaStream_t)cuda_stream_get_current(); \ const int num_threads = 256; \ const int num_blocks = (dim+num_threads-1)/num_threads; \ begin_cuda_range(WP_TIMING_KERNEL_BUILTIN, stream, context, #kernel); \ kernel<<<num_blocks, 256, 0, stream>>>args; \ end_cuda_range(WP_TIMING_KERNEL_BUILTIN, stream); }} #endif // _DEBUG #endif // defined(__CUDACC__) CUresult cuDriverGetVersion_f(int* version); CUresult cuGetErrorName_f(CUresult result, const char** pstr); CUresult cuGetErrorString_f(CUresult result, const char** pstr); CUresult cuInit_f(unsigned int flags); CUresult cuDeviceGet_f(CUdevice *dev, int ordinal); CUresult cuDeviceGetCount_f(int* count); CUresult cuDeviceGetName_f(char* name, int len, CUdevice dev); CUresult cuDeviceGetAttribute_f(int* value, CUdevice_attribute attrib, CUdevice dev); CUresult cuDeviceGetUuid_f(CUuuid* uuid, CUdevice dev); CUresult cuDevicePrimaryCtxRetain_f(CUcontext* ctx, CUdevice dev); CUresult cuDevicePrimaryCtxRelease_f(CUdevice dev); CUresult cuDeviceCanAccessPeer_f(int* can_access, CUdevice dev, CUdevice peer_dev); CUresult cuMemGetInfo_f(size_t* free, size_t* total); CUresult cuCtxGetCurrent_f(CUcontext* ctx); CUresult cuCtxSetCurrent_f(CUcontext ctx); CUresult cuCtxPushCurrent_f(CUcontext ctx); CUresult cuCtxPopCurrent_f(CUcontext* ctx); CUresult cuCtxSynchronize_f(); CUresult cuCtxGetDevice_f(CUdevice* dev); CUresult cuCtxCreate_f(CUcontext* ctx, unsigned int flags, CUdevice dev); CUresult cuCtxDestroy_f(CUcontext ctx); CUresult cuCtxEnablePeerAccess_f(CUcontext peer_ctx, unsigned int flags); CUresult cuCtxDisablePeerAccess_f(CUcontext peer_ctx); CUresult cuStreamCreate_f(CUstream* stream, unsigned int flags); CUresult cuStreamDestroy_f(CUstream stream); CUresult cuStreamSynchronize_f(CUstream stream); CUresult cuStreamWaitEvent_f(CUstream stream, CUevent event, unsigned int flags); CUresult cuStreamGetCtx_f(CUstream stream, CUcontext* pctx); CUresult cuStreamGetCaptureInfo_f(CUstream stream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); CUresult cuStreamUpdateCaptureDependencies_f(CUstream stream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); CUresult cuEventCreate_f(CUevent* event, unsigned int flags); CUresult cuEventDestroy_f(CUevent event); CUresult cuEventRecord_f(CUevent event, CUstream stream); CUresult cuEventRecordWithFlags_f(CUevent event, CUstream stream, unsigned int flags); CUresult cuEventSynchronize_f(CUevent event); CUresult cuModuleUnload_f(CUmodule hmod); CUresult cuModuleLoadDataEx_f(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); CUresult cuModuleGetFunction_f(CUfunction *hfunc, CUmodule hmod, const char *name); CUresult cuLaunchKernel_f(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); CUresult cuMemcpyPeerAsync_f(CUdeviceptr dst_ptr, CUcontext dst_ctx, CUdeviceptr src_ptr, CUcontext src_ctx, size_t n, CUstream stream); CUresult cuPointerGetAttribute_f(void* data, CUpointer_attribute attribute, CUdeviceptr ptr); CUresult cuGraphicsMapResources_f(unsigned int count, CUgraphicsResource* resources, CUstream stream); CUresult cuGraphicsUnmapResources_f(unsigned int count, CUgraphicsResource* resources, CUstream hStream); CUresult cuGraphicsResourceGetMappedPointer_f(CUdeviceptr* pDevPtr, size_t* pSize, CUgraphicsResource resource); CUresult cuGraphicsGLRegisterBuffer_f(CUgraphicsResource *pCudaResource, unsigned int buffer, unsigned int flags); CUresult cuGraphicsUnregisterResource_f(CUgraphicsResource resource); bool init_cuda_driver(); bool is_cuda_driver_initialized(); bool check_cuda_result(cudaError_t code, const char* func, const char* file, int line); inline bool check_cuda_result(uint64_t code, const char* func, const char* file, int line) { return check_cuda_result(static_cast<cudaError_t>(code), func, file, line); } bool check_cu_result(CUresult result, const char* func, const char* file, int line); inline uint64_t get_capture_id(CUstream stream) { CUstreamCaptureStatus status; uint64_t id = 0; check_cu(cuStreamGetCaptureInfo_f(stream, &status, &id, NULL, NULL, NULL)); return id; } inline CUgraph get_capture_graph(CUstream stream) { CUstreamCaptureStatus status; CUgraph graph = NULL; check_cu(cuStreamGetCaptureInfo_f(stream, &status, NULL, &graph, NULL, NULL)); return graph; } bool get_capture_dependencies(CUstream stream, std::vector<CUgraphNode>& dependencies_ret); bool get_graph_leaf_nodes(cudaGraph_t graph, std::vector<cudaGraphNode_t>& leaf_nodes_ret); inline CUcontext get_stream_context(CUstream stream) { CUcontext context; if (check_cu(cuStreamGetCtx_f(stream, &context))) return context; else return NULL; } inline CUcontext get_stream_context(void* stream) { return get_stream_context(static_cast<CUstream>(stream)); } // // Scoped CUDA context guard // // Behaviour on entry // - If the given `context` is NULL, do nothing. // - If the given `context` is the same as the current context, do nothing. // - If the given `context` is different from the current context, make the given context current. // // Behaviour on exit // - If the current context did not change on entry, do nothing. // - If the `restore` flag was true on entry, make the previous context current. // // Default exit behaviour policy // - If the `restore` flag is omitted on entry, fall back on the global `always_restore` flag. // - This allows us to easily change the default behaviour of the guards. // class ContextGuard { public: // default policy for restoring contexts static bool always_restore; explicit ContextGuard(CUcontext context, bool restore=always_restore) : need_restore(false) { if (context) { if (check_cu(cuCtxGetCurrent_f(&prev_context)) && context != prev_context) need_restore = check_cu(cuCtxSetCurrent_f(context)) && restore; } } explicit ContextGuard(void* context, bool restore=always_restore) : ContextGuard(static_cast<CUcontext>(context), restore) { } ~ContextGuard() { if (need_restore) check_cu(cuCtxSetCurrent_f(prev_context)); } private: CUcontext prev_context; bool need_restore; }; // CUDA timing range used during event-based timing struct CudaTimingRange { void* context; const char* name; int flag; CUevent start; CUevent end; }; // Timing result used to pass timings to Python struct timing_result_t { void* context; const char* name; int flag; float elapsed; }; struct CudaTimingState { int flags; std::vector<CudaTimingRange> ranges; CudaTimingState* parent; CudaTimingState(int flags, CudaTimingState* parent) : flags(flags), parent(parent) { } }; // timing flags constexpr int WP_TIMING_KERNEL = 1; // Warp kernel constexpr int WP_TIMING_KERNEL_BUILTIN = 2; // internal kernel constexpr int WP_TIMING_MEMCPY = 4; // memcpy operation constexpr int WP_TIMING_MEMSET = 8; // memset operation constexpr int WP_TIMING_GRAPH = 16; // graph launch #define begin_cuda_range(_flag, _stream, _context, _name) \ CudaTimingRange _timing_range; \ bool _timing_enabled; \ if ((g_cuda_timing_state->flags & _flag) && !cuda_stream_is_capturing(_stream)) { \ ContextGuard guard(_context, true); \ _timing_enabled = true; \ _timing_range.context = _context ? _context : get_current_context(); \ _timing_range.name = _name; \ _timing_range.flag = _flag; \ check_cu(cuEventCreate_f(&_timing_range.start, CU_EVENT_DEFAULT)); \ check_cu(cuEventCreate_f(&_timing_range.end, CU_EVENT_DEFAULT)); \ check_cu(cuEventRecord_f(_timing_range.start, static_cast<CUstream>(_stream))); \ } else { \ _timing_enabled = false; \ } #define end_cuda_range(_flag, _stream) \ if (_timing_enabled) { \ check_cu(cuEventRecord_f(_timing_range.end, static_cast<CUstream>(_stream))); \ g_cuda_timing_state->ranges.push_back(_timing_range); \ } extern CudaTimingState* g_cuda_timing_state; #else typedef int CUdevice; typedef struct CUctx_st* CUcontext; typedef struct CUstream_st* CUstream; class ContextGuard { public: explicit ContextGuard(CUcontext context, bool restore=false) { (void)context; (void)restore; } explicit ContextGuard(void* context, bool restore=false) { (void)context; (void)restore; } }; #endif // WP_ENABLE_CUDA // Pass this value to device functions as the `context` parameter to bypass unnecessary context management. // This works in conjunction with ContextGuards, which do nothing if the given context is NULL. // Using this variable instead of passing NULL directly aids readability and makes the intent clear. constexpr void* WP_CURRENT_CONTEXT = NULL;
10,860
C
36.581315
263
0.723573
NVIDIA/warp/warp/native/svd.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ // The MIT License (MIT) // Copyright (c) 2014 Eric V. Jang // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // Source: https://github.com/ericjang/svd3/blob/master/svd3_cuda/svd3_cuda.h #pragma once #include "builtin.h" namespace wp { #define _gamma 5.828427124 // FOUR_GAMMA_SQUARED = sqrt(8)+3; #define _cstar 0.923879532 // cos(pi/8) #define _sstar 0.3826834323 // sin(p/8) #define _EPSILON 1e-6 // TODO: replace sqrt with rsqrt template<typename Type> inline CUDA_CALLABLE Type accurateSqrt(Type x) { return x / sqrt(x); } template<typename Type> inline CUDA_CALLABLE void condSwap(bool c, Type &X, Type &Y) { // used in step 2 Type Z = X; X = c ? Y : X; Y = c ? Z : Y; } template<typename Type> inline CUDA_CALLABLE void condNegSwap(bool c, Type &X, Type &Y) { // used in step 2 and 3 Type Z = -X; X = c ? Y : X; Y = c ? Z : Y; } // matrix multiplication M = A * B template<typename Type> inline CUDA_CALLABLE void multAB(Type a11, Type a12, Type a13, Type a21, Type a22, Type a23, Type a31, Type a32, Type a33, // Type b11, Type b12, Type b13, Type b21, Type b22, Type b23, Type b31, Type b32, Type b33, // Type &m11, Type &m12, Type &m13, Type &m21, Type &m22, Type &m23, Type &m31, Type &m32, Type &m33) { m11=a11*b11 + a12*b21 + a13*b31; m12=a11*b12 + a12*b22 + a13*b32; m13=a11*b13 + a12*b23 + a13*b33; m21=a21*b11 + a22*b21 + a23*b31; m22=a21*b12 + a22*b22 + a23*b32; m23=a21*b13 + a22*b23 + a23*b33; m31=a31*b11 + a32*b21 + a33*b31; m32=a31*b12 + a32*b22 + a33*b32; m33=a31*b13 + a32*b23 + a33*b33; } // matrix multiplication M = Transpose[A] * B template<typename Type> inline CUDA_CALLABLE void multAtB(Type a11, Type a12, Type a13, Type a21, Type a22, Type a23, Type a31, Type a32, Type a33, // Type b11, Type b12, Type b13, Type b21, Type b22, Type b23, Type b31, Type b32, Type b33, // Type &m11, Type &m12, Type &m13, Type &m21, Type &m22, Type &m23, Type &m31, Type &m32, Type &m33) { m11=a11*b11 + a21*b21 + a31*b31; m12=a11*b12 + a21*b22 + a31*b32; m13=a11*b13 + a21*b23 + a31*b33; m21=a12*b11 + a22*b21 + a32*b31; m22=a12*b12 + a22*b22 + a32*b32; m23=a12*b13 + a22*b23 + a32*b33; m31=a13*b11 + a23*b21 + a33*b31; m32=a13*b12 + a23*b22 + a33*b32; m33=a13*b13 + a23*b23 + a33*b33; } template<typename Type> inline CUDA_CALLABLE void quatToMat3(const Type * qV, Type &m11, Type &m12, Type &m13, Type &m21, Type &m22, Type &m23, Type &m31, Type &m32, Type &m33 ) { Type w = qV[3]; Type x = qV[0]; Type y = qV[1]; Type z = qV[2]; Type qxx = x*x; Type qyy = y*y; Type qzz = z*z; Type qxz = x*z; Type qxy = x*y; Type qyz = y*z; Type qwx = w*x; Type qwy = w*y; Type qwz = w*z; m11=Type(1) - Type(2)*(qyy + qzz); m12=Type(2)*(qxy - qwz); m13=Type(2)*(qxz + qwy); m21=Type(2)*(qxy + qwz); m22=Type(1) - Type(2)*(qxx + qzz); m23=Type(2)*(qyz - qwx); m31=Type(2)*(qxz - qwy); m32=Type(2)*(qyz + qwx); m33=Type(1) - Type(2)*(qxx + qyy); } template<typename Type> inline CUDA_CALLABLE void approximateGivensQuaternion(Type a11, Type a12, Type a22, Type &ch, Type &sh) { /* * Given givens angle computed by approximateGivensAngles, * compute the corresponding rotation quaternion. */ ch = Type(2)*(a11-a22); sh = a12; bool b = _gamma*sh*sh < ch*ch; Type w = Type(1) / sqrt(ch*ch+sh*sh); ch=b?w*ch:Type(_cstar); sh=b?w*sh:Type(_sstar); } template<typename Type> inline CUDA_CALLABLE void jacobiConjugation( const int x, const int y, const int z, Type &s11, Type &s21, Type &s22, Type &s31, Type &s32, Type &s33, Type * qV) { Type ch,sh; approximateGivensQuaternion(s11,s21,s22,ch,sh); Type scale = ch*ch+sh*sh; Type a = (ch*ch-sh*sh)/scale; Type b = (Type(2)*sh*ch)/scale; // make temp copy of S Type _s11 = s11; Type _s21 = s21; Type _s22 = s22; Type _s31 = s31; Type _s32 = s32; Type _s33 = s33; // perform conjugation S = Q'*S*Q // Q already implicitly solved from a, b s11 =a*(a*_s11 + b*_s21) + b*(a*_s21 + b*_s22); s21 =a*(-b*_s11 + a*_s21) + b*(-b*_s21 + a*_s22); s22=-b*(-b*_s11 + a*_s21) + a*(-b*_s21 + a*_s22); s31 =a*_s31 + b*_s32; s32=-b*_s31 + a*_s32; s33=_s33; // update cumulative rotation qV Type tmp[3]; tmp[0]=qV[0]*sh; tmp[1]=qV[1]*sh; tmp[2]=qV[2]*sh; sh *= qV[3]; qV[0] *= ch; qV[1] *= ch; qV[2] *= ch; qV[3] *= ch; // (x,y,z) corresponds to ((0,1,2),(1,2,0),(2,0,1)) // for (p,q) = ((0,1),(1,2),(0,2)) qV[z] += sh; qV[3] -= tmp[z]; // w qV[x] += tmp[y]; qV[y] -= tmp[x]; // re-arrange matrix for next iteration _s11 = s22; _s21 = s32; _s22 = s33; _s31 = s21; _s32 = s31; _s33 = s11; s11 = _s11; s21 = _s21; s22 = _s22; s31 = _s31; s32 = _s32; s33 = _s33; } template<typename Type> inline CUDA_CALLABLE Type dist2(Type x, Type y, Type z) { return x*x+y*y+z*z; } // finds transformation that diagonalizes a symmetric matrix template<typename Type> inline CUDA_CALLABLE void jacobiEigenanlysis( // symmetric matrix Type &s11, Type &s21, Type &s22, Type &s31, Type &s32, Type &s33, // quaternion representation of V Type * qV) { qV[3]=1; qV[0]=0;qV[1]=0;qV[2]=0; // follow same indexing convention as GLM for (int i=0;i<4;i++) { // we wish to eliminate the maximum off-diagonal element // on every iteration, but cycling over all 3 possible rotations // in fixed order (p,q) = (1,2) , (2,3), (1,3) still retains // asymptotic convergence jacobiConjugation(0,1,2,s11,s21,s22,s31,s32,s33,qV); // p,q = 0,1 jacobiConjugation(1,2,0,s11,s21,s22,s31,s32,s33,qV); // p,q = 1,2 jacobiConjugation(2,0,1,s11,s21,s22,s31,s32,s33,qV); // p,q = 0,2 } } template<typename Type> inline CUDA_CALLABLE void sortSingularValues(// matrix that we want to decompose Type &b11, Type &b12, Type &b13, Type &b21, Type &b22, Type &b23, Type &b31, Type &b32, Type &b33, // sort V simultaneously Type &v11, Type &v12, Type &v13, Type &v21, Type &v22, Type &v23, Type &v31, Type &v32, Type &v33) { Type rho1 = dist2(b11,b21,b31); Type rho2 = dist2(b12,b22,b32); Type rho3 = dist2(b13,b23,b33); bool c; c = rho1 < rho2; condNegSwap(c,b11,b12); condNegSwap(c,v11,v12); condNegSwap(c,b21,b22); condNegSwap(c,v21,v22); condNegSwap(c,b31,b32); condNegSwap(c,v31,v32); condSwap(c,rho1,rho2); c = rho1 < rho3; condNegSwap(c,b11,b13); condNegSwap(c,v11,v13); condNegSwap(c,b21,b23); condNegSwap(c,v21,v23); condNegSwap(c,b31,b33); condNegSwap(c,v31,v33); condSwap(c,rho1,rho3); c = rho2 < rho3; condNegSwap(c,b12,b13); condNegSwap(c,v12,v13); condNegSwap(c,b22,b23); condNegSwap(c,v22,v23); condNegSwap(c,b32,b33); condNegSwap(c,v32,v33); } template<typename Type> inline CUDA_CALLABLE void QRGivensQuaternion(Type a1, Type a2, Type &ch, Type &sh) { // a1 = pivot point on diagonal // a2 = lower triangular entry we want to annihilate Type epsilon = _EPSILON; Type rho = accurateSqrt(a1*a1 + a2*a2); sh = rho > epsilon ? a2 : Type(0); ch = abs(a1) + max(rho,epsilon); bool b = a1 < Type(0); condSwap(b,sh,ch); Type w = Type(1) / sqrt(ch*ch+sh*sh); ch *= w; sh *= w; } template<typename Type> inline CUDA_CALLABLE void QRDecomposition(// matrix that we want to decompose Type b11, Type b12, Type b13, Type b21, Type b22, Type b23, Type b31, Type b32, Type b33, // output Q Type &q11, Type &q12, Type &q13, Type &q21, Type &q22, Type &q23, Type &q31, Type &q32, Type &q33, // output R Type &r11, Type &r12, Type &r13, Type &r21, Type &r22, Type &r23, Type &r31, Type &r32, Type &r33) { Type ch1,sh1,ch2,sh2,ch3,sh3; Type a,b; // first givens rotation (ch,0,0,sh) QRGivensQuaternion(b11,b21,ch1,sh1); a=Type(1)-Type(2)*sh1*sh1; b=Type(2)*ch1*sh1; // apply B = Q' * B r11=a*b11+b*b21; r12=a*b12+b*b22; r13=a*b13+b*b23; r21=-b*b11+a*b21; r22=-b*b12+a*b22; r23=-b*b13+a*b23; r31=b31; r32=b32; r33=b33; // second givens rotation (ch,0,-sh,0) QRGivensQuaternion(r11,r31,ch2,sh2); a=Type(1)-Type(2)*sh2*sh2; b=Type(2)*ch2*sh2; // apply B = Q' * B; b11=a*r11+b*r31; b12=a*r12+b*r32; b13=a*r13+b*r33; b21=r21; b22=r22; b23=r23; b31=-b*r11+a*r31; b32=-b*r12+a*r32; b33=-b*r13+a*r33; // third givens rotation (ch,sh,0,0) QRGivensQuaternion(b22,b32,ch3,sh3); a=Type(1)-Type(2)*sh3*sh3; b=Type(2)*ch3*sh3; // R is now set to desired value r11=b11; r12=b12; r13=b13; r21=a*b21+b*b31; r22=a*b22+b*b32; r23=a*b23+b*b33; r31=-b*b21+a*b31; r32=-b*b22+a*b32; r33=-b*b23+a*b33; // construct the cumulative rotation Q=Q1 * Q2 * Q3 // the number of floating point operations for three quaternion multiplications // is more or less comparable to the explicit form of the joined matrix. // certainly more memory-efficient! Type sh12=sh1*sh1; Type sh22=sh2*sh2; Type sh32=sh3*sh3; q11=(Type(-1)+Type(2)*sh12)*(Type(-1)+Type(2)*sh22); q12=Type(4)*ch2*ch3*(Type(-1)+Type(2)*sh12)*sh2*sh3+Type(2)*ch1*sh1*(Type(-1)+Type(2)*sh32); q13=Type(4)*ch1*ch3*sh1*sh3-Type(2)*ch2*(Type(-1)+Type(2)*sh12)*sh2*(Type(-1)+Type(2)*sh32); q21=Type(2)*ch1*sh1*(Type(1)-Type(2)*sh22); q22=Type(-8)*ch1*ch2*ch3*sh1*sh2*sh3+(Type(-1)+Type(2)*sh12)*(Type(-1)+Type(2)*sh32); q23=Type(-2)*ch3*sh3+Type(4)*sh1*(ch3*sh1*sh3+ch1*ch2*sh2*(Type(-1)+Type(2)*sh32)); q31=Type(2)*ch2*sh2; q32=Type(2)*ch3*(Type(1)-Type(2)*sh22)*sh3; q33=(Type(-1)+Type(2)*sh22)*(Type(-1)+Type(2)*sh32); } template<typename Type> inline CUDA_CALLABLE void _svd(// input A Type a11, Type a12, Type a13, Type a21, Type a22, Type a23, Type a31, Type a32, Type a33, // output U Type &u11, Type &u12, Type &u13, Type &u21, Type &u22, Type &u23, Type &u31, Type &u32, Type &u33, // output S Type &s11, Type &s12, Type &s13, Type &s21, Type &s22, Type &s23, Type &s31, Type &s32, Type &s33, // output V Type &v11, Type &v12, Type &v13, Type &v21, Type &v22, Type &v23, Type &v31, Type &v32, Type &v33) { // normal equations matrix Type ATA11, ATA12, ATA13; Type ATA21, ATA22, ATA23; Type ATA31, ATA32, ATA33; multAtB(a11,a12,a13,a21,a22,a23,a31,a32,a33, a11,a12,a13,a21,a22,a23,a31,a32,a33, ATA11,ATA12,ATA13,ATA21,ATA22,ATA23,ATA31,ATA32,ATA33); // symmetric eigenalysis Type qV[4]; jacobiEigenanlysis( ATA11,ATA21,ATA22, ATA31,ATA32,ATA33,qV); quatToMat3(qV,v11,v12,v13,v21,v22,v23,v31,v32,v33); Type b11, b12, b13; Type b21, b22, b23; Type b31, b32, b33; multAB(a11,a12,a13,a21,a22,a23,a31,a32,a33, v11,v12,v13,v21,v22,v23,v31,v32,v33, b11, b12, b13, b21, b22, b23, b31, b32, b33); // sort singular values and find V sortSingularValues(b11, b12, b13, b21, b22, b23, b31, b32, b33, v11,v12,v13,v21,v22,v23,v31,v32,v33); // QR decomposition QRDecomposition(b11, b12, b13, b21, b22, b23, b31, b32, b33, u11, u12, u13, u21, u22, u23, u31, u32, u33, s11, s12, s13, s21, s22, s23, s31, s32, s33 ); } template<typename Type> inline CUDA_CALLABLE void svd3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& U, vec_t<3,Type>& sigma, mat_t<3,3,Type>& V) { Type s12, s13, s21, s23, s31, s32; _svd(A.data[0][0], A.data[0][1], A.data[0][2], A.data[1][0], A.data[1][1], A.data[1][2], A.data[2][0], A.data[2][1], A.data[2][2], U.data[0][0], U.data[0][1], U.data[0][2], U.data[1][0], U.data[1][1], U.data[1][2], U.data[2][0], U.data[2][1], U.data[2][2], sigma[0], s12, s13, s21, sigma[1], s23, s31, s32, sigma[2], V.data[0][0], V.data[0][1], V.data[0][2], V.data[1][0], V.data[1][1], V.data[1][2], V.data[2][0], V.data[2][1], V.data[2][2]); } template<typename Type> inline CUDA_CALLABLE void adj_svd3(const mat_t<3,3,Type>& A, const mat_t<3,3,Type>& U, const vec_t<3,Type>& sigma, const mat_t<3,3,Type>& V, mat_t<3,3,Type>& adj_A, const mat_t<3,3,Type>& adj_U, const vec_t<3,Type>& adj_sigma, const mat_t<3,3,Type>& adj_V) { Type sx2 = sigma[0] * sigma[0]; Type sy2 = sigma[1] * sigma[1]; Type sz2 = sigma[2] * sigma[2]; Type F01 = Type(1) / min(sy2 - sx2, Type(-1e-6f)); Type F02 = Type(1) / min(sz2 - sx2, Type(-1e-6f)); Type F12 = Type(1) / min(sz2 - sy2, Type(-1e-6f)); mat_t<3,3,Type> F = mat_t<3,3,Type>(0, F01, F02, -F01, 0, F12, -F02, -F12, 0); mat_t<3,3,Type> adj_sigma_mat = mat_t<3,3,Type>(adj_sigma[0], 0, 0, 0, adj_sigma[1], 0, 0, 0, adj_sigma[2]); mat_t<3,3,Type> s_mat = mat_t<3,3,Type>(sigma[0], 0, 0, 0, sigma[1], 0, 0, 0, sigma[2]); // https://github.com/pytorch/pytorch/blob/d7ddae8e4fe66fa1330317673438d1eb5aa99ca4/torch/csrc/autograd/FunctionsManual.cpp mat_t<3,3,Type> UT = transpose(U); mat_t<3,3,Type> VT = transpose(V); mat_t<3,3,Type> sigma_term = mul(U, mul(adj_sigma_mat, VT)); mat_t<3,3,Type> u_term = mul(mul(U, mul(cw_mul(F, (mul(UT, adj_U) - mul(transpose(adj_U), U))), s_mat)), VT); mat_t<3,3,Type> v_term = mul(U, mul(s_mat, mul(cw_mul(F, (mul(VT, adj_V) - mul(transpose(adj_V), V))), VT))); adj_A = adj_A + (u_term + v_term + sigma_term); } template<typename Type> inline CUDA_CALLABLE void qr3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& Q, mat_t<3,3,Type>& R) { QRDecomposition(A.data[0][0], A.data[0][1], A.data[0][2], A.data[1][0], A.data[1][1], A.data[1][2], A.data[2][0], A.data[2][1], A.data[2][2], Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2], R.data[0][0], R.data[0][1], R.data[0][2], R.data[1][0], R.data[1][1], R.data[1][2], R.data[2][0], R.data[2][1], R.data[2][2]); } template<typename Type> inline CUDA_CALLABLE void adj_qr3(const mat_t<3,3,Type>& A, const mat_t<3,3,Type>& Q, const mat_t<3,3,Type>& R, mat_t<3,3,Type>& adj_A, const mat_t<3,3,Type>& adj_Q, const mat_t<3,3,Type>& adj_R) { // Eq 3 of https://arxiv.org/pdf/2009.10071.pdf mat_t<3,3,Type> M = mul(R,transpose(adj_R)) - mul(transpose(adj_Q), Q); mat_t<3,3,Type> copyltuM = mat_t<3,3,Type>(M.data[0][0], M.data[1][0], M.data[2][0], M.data[1][0], M.data[1][1], M.data[2][1], M.data[2][0], M.data[2][1], M.data[2][2]); adj_A = adj_A + mul(adj_Q + mul(Q,copyltuM), inverse(transpose(R))); } template<typename Type> inline CUDA_CALLABLE void eig3(const mat_t<3,3,Type>& A, mat_t<3,3,Type>& Q, vec_t<3,Type>& d) { Type qV[4]; Type s11 = A.data[0][0]; Type s21 = A.data[1][0]; Type s22 = A.data[1][1]; Type s31 = A.data[2][0]; Type s32 = A.data[2][1]; Type s33 = A.data[2][2]; jacobiEigenanlysis(s11, s21, s22, s31, s32, s33, qV); quatToMat3(qV, Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2]); mat_t<3,3,Type> t; multAtB(Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2], A.data[0][0], A.data[0][1], A.data[0][2], A.data[1][0], A.data[1][1], A.data[1][2], A.data[2][0], A.data[2][1], A.data[2][2], t.data[0][0], t.data[0][1], t.data[0][2], t.data[1][0], t.data[1][1], t.data[1][2], t.data[2][0], t.data[2][1], t.data[2][2]); mat_t<3,3,Type> u; multAB(t.data[0][0], t.data[0][1], t.data[0][2], t.data[1][0], t.data[1][1], t.data[1][2], t.data[2][0], t.data[2][1], t.data[2][2], Q.data[0][0], Q.data[0][1], Q.data[0][2], Q.data[1][0], Q.data[1][1], Q.data[1][2], Q.data[2][0], Q.data[2][1], Q.data[2][2], u.data[0][0], u.data[0][1], u.data[0][2], u.data[1][0], u.data[1][1], u.data[1][2], u.data[2][0], u.data[2][1], u.data[2][2] ); d = vec_t<3,Type>(u.data[0][0], u.data[1][1], u.data[2][2]); } template<typename Type> inline CUDA_CALLABLE void adj_eig3(const mat_t<3,3,Type>& A, const mat_t<3,3,Type>& Q, const vec_t<3,Type>& d, mat_t<3,3,Type>& adj_A, const mat_t<3,3,Type>& adj_Q, const vec_t<3,Type>& adj_d) { // Page 10 of https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf mat_t<3,3,Type> D = mat_t<3,3,Type>(d[0], 0, 0, 0, d[1], 0, 0, 0, d[2]); mat_t<3,3,Type> D_bar = mat_t<3,3,Type>(adj_d[0], 0, 0, 0, adj_d[1], 0, 0, 0, adj_d[2]); Type dyx = d[1] - d[0]; Type dzx = d[2] - d[0]; Type dzy = d[2] - d[1]; if ((dyx < Type(0)) && (dyx > Type(-1e-6))) dyx = -1e-6; if ((dyx > Type(0)) && (dyx < Type(1e-6))) dyx = 1e-6; if ((dzx < Type(0)) && (dzx > Type(-1e-6))) dzx = -1e-6; if ((dzx > Type(0)) && (dzx < Type(1e-6))) dzx = 1e-6; if ((dzy < Type(0)) && (dzy > Type(-1e-6))) dzy = -1e-6; if ((dzy > Type(0)) && (dzy < Type(1e-6))) dzy = 1e-6; Type F01 = Type(1) / dyx; Type F02 = Type(1) / dzx; Type F12 = Type(1) / dzy; mat_t<3,3,Type> F = mat_t<3,3,Type>(0, F01, F02, -F01, 0, F12, -F02, -F12, 0); mat_t<3,3,Type> QT = transpose(Q); adj_A = adj_A + mul(Q, mul(D_bar + cw_mul(F, mul(QT, adj_Q)), QT)); } }
20,561
C
35.522202
147
0.541559
NVIDIA/warp/warp/native/quat.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "mat.h" namespace wp { template<typename Type> struct quat_t { // zero constructor for adjoint variable initialization inline CUDA_CALLABLE quat_t(Type x=Type(0), Type y=Type(0), Type z=Type(0), Type w=Type(0)) : x(x), y(y), z(z), w(w) {} explicit inline CUDA_CALLABLE quat_t(const vec_t<3,Type>& v, Type w=Type(0)) : x(v[0]), y(v[1]), z(v[2]), w(w) {} template<typename OtherType> explicit inline CUDA_CALLABLE quat_t(const quat_t<OtherType>& other) { x = static_cast<Type>(other.x); y = static_cast<Type>(other.y); z = static_cast<Type>(other.z); w = static_cast<Type>(other.w); } // imaginary part Type x; Type y; Type z; // real part Type w; }; using quat = quat_t<float>; using quath = quat_t<half>; using quatf = quat_t<float>; using quatd = quat_t<double>; template<typename Type> inline CUDA_CALLABLE bool operator==(const quat_t<Type>& a, const quat_t<Type>& b) { return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w; } template<typename Type> inline bool CUDA_CALLABLE isfinite(const quat_t<Type>& q) { return isfinite(q.x) && isfinite(q.y) && isfinite(q.z) && isfinite(q.w); } template<typename Type> inline void CUDA_CALLABLE adj_isfinite(const quat_t<Type>& q, quat_t<Type>& adj_q, const bool &adj_ret) { } template<typename Type> inline bool CUDA_CALLABLE isnan(const quat_t<Type>& q) { return isnan(q.x) || isnan(q.y) || isnan(q.z) || isnan(q.w); } template<typename Type> inline void CUDA_CALLABLE adj_isnan(const quat_t<Type>& q, quat_t<Type>& adj_q, const bool &adj_ret) { } template<typename Type> inline bool CUDA_CALLABLE isinf(const quat_t<Type>& q) { return isinf(q.x) || isinf(q.y) || isinf(q.z) || isinf(q.w); } template<typename Type> inline void CUDA_CALLABLE adj_isinf(const quat_t<Type>& q, quat_t<Type>& adj_q, const bool &adj_ret) { } template<typename Type> inline CUDA_CALLABLE quat_t<Type> atomic_add(quat_t<Type> * addr, quat_t<Type> value) { Type x = atomic_add(&(addr -> x), value.x); Type y = atomic_add(&(addr -> y), value.y); Type z = atomic_add(&(addr -> z), value.z); Type w = atomic_add(&(addr -> w), value.w); return quat_t<Type>(x, y, z, w); } template<typename Type> inline CUDA_CALLABLE void adj_quat_t(Type x, Type y, Type z, Type w, Type& adj_x, Type& adj_y, Type& adj_z, Type& adj_w, quat_t<Type> adj_ret) { adj_x += adj_ret.x; adj_y += adj_ret.y; adj_z += adj_ret.z; adj_w += adj_ret.w; } template<typename Type> inline CUDA_CALLABLE void adj_quat_t(const vec_t<3,Type>& v, Type w, vec_t<3,Type>& adj_v, Type& adj_w, quat_t<Type> adj_ret) { adj_v[0] += adj_ret.x; adj_v[1] += adj_ret.y; adj_v[2] += adj_ret.z; adj_w += adj_ret.w; } // casting constructor adjoint template<typename Type, typename OtherType> inline CUDA_CALLABLE void adj_quat_t(const quat_t<OtherType>& other, quat_t<OtherType>& adj_other, const quat_t<Type>& adj_ret) { adj_other.x += static_cast<OtherType>(adj_ret.x); adj_other.y += static_cast<OtherType>(adj_ret.y); adj_other.z += static_cast<OtherType>(adj_ret.z); adj_other.w += static_cast<OtherType>(adj_ret.w); } // forward methods template<typename Type> inline CUDA_CALLABLE quat_t<Type> quat_from_axis_angle(const vec_t<3,Type>& axis, Type angle) { Type half = angle*Type(Type(0.5)); Type w = cos(half); Type sin_theta_over_two = sin(half); vec_t<3,Type> v = axis*sin_theta_over_two; return quat_t<Type>(v[0], v[1], v[2], w); } template<typename Type> inline CUDA_CALLABLE void quat_to_axis_angle(const quat_t<Type>& q, vec_t<3,Type>& axis, Type& angle) { vec_t<3,Type> v = vec_t<3,Type>(q.x, q.y, q.z); axis = q.w < Type(0) ? -normalize(v) : normalize(v); angle = Type(2) * atan2(length(v), abs(q.w)); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> quat_rpy(Type roll, Type pitch, Type yaw) { Type cy = cos(yaw * Type(0.5)); Type sy = sin(yaw * Type(0.5)); Type cr = cos(roll * Type(0.5)); Type sr = sin(roll * Type(0.5)); Type cp = cos(pitch * Type(0.5)); Type sp = sin(pitch * Type(0.5)); Type w = (cy * cr * cp + sy * sr * sp); Type x = (cy * sr * cp - sy * cr * sp); Type y = (cy * cr * sp + sy * sr * cp); Type z = (sy * cr * cp - cy * sr * sp); return quat_t<Type>(x, y, z, w); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> quat_inverse(const quat_t<Type>& q) { return quat_t<Type>(-q.x, -q.y, -q.z, q.w); } template<typename Type> inline CUDA_CALLABLE Type dot(const quat_t<Type>& a, const quat_t<Type>& b) { return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w; } template<typename Type> inline CUDA_CALLABLE Type tensordot(const quat_t<Type>& a, const quat_t<Type>& b) { // corresponds to `np.tensordot()` with all axes being contracted return dot(a, b); } template<typename Type> inline CUDA_CALLABLE Type length(const quat_t<Type>& q) { return sqrt(dot(q, q)); } template<typename Type> inline CUDA_CALLABLE Type length_sq(const quat_t<Type>& q) { return dot(q, q); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> normalize(const quat_t<Type>& q) { Type l = length(q); if (l > Type(kEps)) { Type inv_l = Type(1)/l; return quat_t<Type>(q.x*inv_l, q.y*inv_l, q.z*inv_l, q.w*inv_l); } else { return quat_t<Type>(Type(0), Type(0), Type(0), Type(1)); } } template<typename Type> inline CUDA_CALLABLE quat_t<Type> add(const quat_t<Type>& a, const quat_t<Type>& b) { return quat_t<Type>(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> sub(const quat_t<Type>& a, const quat_t<Type>& b) { return quat_t<Type>(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);} template<typename Type> inline CUDA_CALLABLE quat_t<Type> mul(const quat_t<Type>& a, const quat_t<Type>& b) { return quat_t<Type>(a.w*b.x + b.w*a.x + a.y*b.z - b.y*a.z, a.w*b.y + b.w*a.y + a.z*b.x - b.z*a.x, a.w*b.z + b.w*a.z + a.x*b.y - b.x*a.y, a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> mul(const quat_t<Type>& a, Type s) { return quat_t<Type>(a.x*s, a.y*s, a.z*s, a.w*s); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> mul(Type s, const quat_t<Type>& a) { return mul(a, s); } // division template<typename Type> inline CUDA_CALLABLE quat_t<Type> div(quat_t<Type> q, Type s) { return quat_t<Type>(q.x/s, q.y/s, q.z/s, q.w/s); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> div(Type s, quat_t<Type> q) { return quat_t<Type>(s/q.x, s/q.y, s/q.z, s/q.w); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> operator / (quat_t<Type> a, Type s) { return div(a,s); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> operator / (Type s, quat_t<Type> a) { return div(s,a); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> operator*(Type s, const quat_t<Type>& a) { return mul(a, s); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> operator*(const quat_t<Type>& a, Type s) { return mul(a, s); } template<typename Type> inline CUDA_CALLABLE vec_t<3,Type> quat_rotate(const quat_t<Type>& q, const vec_t<3,Type>& x) { Type c = (Type(2)*q.w*q.w-Type(1)); Type d = Type(2)*(q.x*x.c[0] + q.y*x.c[1] + q.z*x.c[2]); return vec_t<3,Type>( x.c[0]*c + q.x*d + (q.y * x[2] - q.z * x[1])*q.w*Type(2), x.c[1]*c + q.y*d + (q.z * x[0] - q.x * x[2])*q.w*Type(2), x.c[2]*c + q.z*d + (q.x * x[1] - q.y * x[0])*q.w*Type(2) ); } template<typename Type> inline CUDA_CALLABLE vec_t<3,Type> quat_rotate_inv(const quat_t<Type>& q, const vec_t<3,Type>& x) { Type c = (Type(2)*q.w*q.w-Type(1)); Type d = Type(2)*(q.x*x.c[0] + q.y*x.c[1] + q.z*x.c[2]); return vec_t<3,Type>( x.c[0]*c + q.x*d - (q.y * x[2] - q.z * x[1])*q.w*Type(2), x.c[1]*c + q.y*d - (q.z * x[0] - q.x * x[2])*q.w*Type(2), x.c[2]*c + q.z*d - (q.x * x[1] - q.y * x[0])*q.w*Type(2) ); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> quat_slerp(const quat_t<Type>& q0, const quat_t<Type>& q1, Type t) { vec_t<3,Type> axis; Type angle; quat_to_axis_angle(mul(quat_inverse(q0), q1), axis, angle); return mul(q0, quat_from_axis_angle(axis, t * angle)); } template<typename Type> inline CUDA_CALLABLE mat_t<3,3,Type> quat_to_matrix(const quat_t<Type>& q) { vec_t<3,Type> c1 = quat_rotate(q, vec_t<3,Type>(1.0, 0.0, 0.0)); vec_t<3,Type> c2 = quat_rotate(q, vec_t<3,Type>(0.0, 1.0, 0.0)); vec_t<3,Type> c3 = quat_rotate(q, vec_t<3,Type>(0.0, 0.0, 1.0)); return mat_t<3,3,Type>(c1, c2, c3); } template<typename Type> inline CUDA_CALLABLE quat_t<Type> quat_from_matrix(const mat_t<3,3,Type>& m) { const Type tr = m.data[0][0] + m.data[1][1] + m.data[2][2]; Type x, y, z, w, h = Type(0); if (tr >= Type(0)) { h = sqrt(tr + Type(1)); w = Type(0.5) * h; h = Type(0.5) / h; x = (m.data[2][1] - m.data[1][2]) * h; y = (m.data[0][2] - m.data[2][0]) * h; z = (m.data[1][0] - m.data[0][1]) * h; } else { size_t max_diag = 0; if (m.data[1][1] > m.data[0][0]) { max_diag = 1; } if (m.data[2][2] > m.data[max_diag][max_diag]) { max_diag = 2; } if (max_diag == 0) { h = sqrt((m.data[0][0] - (m.data[1][1] + m.data[2][2])) + Type(1)); x = Type(0.5) * h; h = Type(0.5) / h; y = (m.data[0][1] + m.data[1][0]) * h; z = (m.data[2][0] + m.data[0][2]) * h; w = (m.data[2][1] - m.data[1][2]) * h; } else if (max_diag == 1) { h = sqrt((m.data[1][1] - (m.data[2][2] + m.data[0][0])) + Type(1)); y = Type(0.5) * h; h = Type(0.5) / h; z = (m.data[1][2] + m.data[2][1]) * h; x = (m.data[0][1] + m.data[1][0]) * h; w = (m.data[0][2] - m.data[2][0]) * h; } if (max_diag == 2) { h = sqrt((m.data[2][2] - (m.data[0][0] + m.data[1][1])) + Type(1)); z = Type(0.5) * h; h = Type(0.5) / h; x = (m.data[2][0] + m.data[0][2]) * h; y = (m.data[1][2] + m.data[2][1]) * h; w = (m.data[1][0] - m.data[0][1]) * h; } } return normalize(quat_t<Type>(x, y, z, w)); } template<typename Type> inline CUDA_CALLABLE Type extract(const quat_t<Type>& a, int idx) { #if FP_CHECK if (idx < 0 || idx > 3) { printf("quat_t index %d out of bounds at %s %d", idx, __FILE__, __LINE__); assert(0); } #endif /* * Because quat data is not stored in an array, we index the quaternion by checking all possible idx values. * (&a.x)[idx] would be the preferred access strategy, but this results in undefined behavior in the clang compiler * at optimization level 3. */ if (idx == 0) {return a.x;} else if (idx == 1) {return a.y;} else if (idx == 2) {return a.z;} else {return a.w;} } template<typename Type> CUDA_CALLABLE inline quat_t<Type> lerp(const quat_t<Type>& a, const quat_t<Type>& b, Type t) { return a*(Type(1)-t) + b*t; } template<typename Type> CUDA_CALLABLE inline void adj_lerp(const quat_t<Type>& a, const quat_t<Type>& b, Type t, quat_t<Type>& adj_a, quat_t<Type>& adj_b, Type& adj_t, const quat_t<Type>& adj_ret) { adj_a += adj_ret*(Type(1)-t); adj_b += adj_ret*t; adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_extract(const quat_t<Type>& a, int idx, quat_t<Type>& adj_a, int & adj_idx, Type & adj_ret) { #if FP_CHECK if (idx < 0 || idx > 3) { printf("quat_t index %d out of bounds at %s %d", idx, __FILE__, __LINE__); assert(0); } #endif // See wp::extract(const quat_t<Type>& a, int idx) note if (idx == 0) {adj_a.x += adj_ret;} else if (idx == 1) {adj_a.y += adj_ret;} else if (idx == 2) {adj_a.z += adj_ret;} else {adj_a.w += adj_ret;} } // backward methods template<typename Type> inline CUDA_CALLABLE void adj_quat_from_axis_angle(const vec_t<3,Type>& axis, Type angle, vec_t<3,Type>& adj_axis, Type& adj_angle, const quat_t<Type>& adj_ret) { vec_t<3,Type> v = vec_t<3,Type>(adj_ret.x, adj_ret.y, adj_ret.z); Type s = sin(angle*Type(0.5)); Type c = cos(angle*Type(0.5)); quat_t<Type> dqda = quat_t<Type>(axis[0]*c, axis[1]*c, axis[2]*c, -s)*Type(0.5); adj_axis += v*s; adj_angle += dot(dqda, adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_quat_to_axis_angle(const quat_t<Type>& q, vec_t<3,Type>& axis, Type& angle, quat_t<Type>& adj_q, const vec_t<3,Type>& adj_axis, const Type& adj_angle) { Type l = length(vec_t<3,Type>(q.x, q.y, q.z)); Type ax_qx = Type(0); Type ax_qy = Type(0); Type ax_qz = Type(0); Type ay_qx = Type(0); Type ay_qy = Type(0); Type ay_qz = Type(0); Type az_qx = Type(0); Type az_qy = Type(0); Type az_qz = Type(0); Type t_qx = Type(0); Type t_qy = Type(0); Type t_qz = Type(0); Type t_qw = Type(0); Type flip = q.w < Type(0) ? -1.0 : 1.0; if (l > Type(0)) { Type l_sq = l*l; Type l_inv = Type(1) / l; Type l_inv_sq = l_inv * l_inv; Type l_inv_cu = l_inv_sq * l_inv; Type C = flip * l_inv_cu; ax_qx = C * (q.y*q.y + q.z*q.z); ax_qy = -C * q.x*q.y; ax_qz = -C * q.x*q.z; ay_qx = -C * q.y*q.x; ay_qy = C * (q.x*q.x + q.z*q.z); ay_qz = -C * q.y*q.z; az_qx = -C * q.z*q.x; az_qy = -C * q.z*q.y; az_qz = C * (q.x*q.x + q.y*q.y); Type D = Type(2) * flip / (l_sq + q.w*q.w); t_qx = D * l_inv * q.x * q.w; t_qy = D * l_inv * q.y * q.w; t_qz = D * l_inv * q.z * q.w; t_qw = -D * l; } else { if (abs(q.w) > Type(kEps)) { Type t_qx = Type(2) / (sqrt(Type(3)) * abs(q.w)); Type t_qy = Type(2) / (sqrt(Type(3)) * abs(q.w)); Type t_qz = Type(2) / (sqrt(Type(3)) * abs(q.w)); } // o/w we have a null quat_t which cannot backpropagate } adj_q.x += ax_qx * adj_axis[0] + ay_qx * adj_axis[1] + az_qx * adj_axis[2] + t_qx * adj_angle; adj_q.y += ax_qy * adj_axis[0] + ay_qy * adj_axis[1] + az_qy * adj_axis[2] + t_qy * adj_angle; adj_q.z += ax_qz * adj_axis[0] + ay_qz * adj_axis[1] + az_qz * adj_axis[2] + t_qz * adj_angle; adj_q.w += t_qw * adj_angle; } template<typename Type> inline CUDA_CALLABLE void adj_quat_rpy(Type roll, Type pitch, Type yaw, Type& adj_roll, Type& adj_pitch, Type& adj_yaw, const quat_t<Type>& adj_ret) { Type cy = cos(yaw * Type(0.5)); Type sy = sin(yaw * Type(0.5)); Type cr = cos(roll * Type(0.5)); Type sr = sin(roll * Type(0.5)); Type cp = cos(pitch * Type(0.5)); Type sp = sin(pitch * Type(0.5)); Type w = (cy * cr * cp + sy * sr * sp); Type x = (cy * sr * cp - sy * cr * sp); Type y = (cy * cr * sp + sy * sr * cp); Type z = (sy * cr * cp - cy * sr * sp); Type dx_dr = Type(0.5) * w; Type dx_dp = -Type(0.5) * cy * sr * sp - Type(0.5) * sy * cr * cp; Type dx_dy = -Type(0.5) * y; Type dy_dr = Type(0.5) * z; Type dy_dp = Type(0.5) * cy * cr * cp - Type(0.5) * sy * sr * sp; Type dy_dy = Type(0.5) * x; Type dz_dr = -Type(0.5) * y; Type dz_dp = -Type(0.5) * sy * cr * sp - Type(0.5) * cy * sr * cp; Type dz_dy = Type(0.5) * w; Type dw_dr = -Type(0.5) * x; Type dw_dp = -Type(0.5) * cy * cr * sp + Type(0.5) * sy * sr * cp; Type dw_dy = -Type(0.5) * z; adj_roll += dot(quat_t<Type>(dx_dr, dy_dr, dz_dr, dw_dr), adj_ret); adj_pitch += dot(quat_t<Type>(dx_dp, dy_dp, dz_dp, dw_dp), adj_ret); adj_yaw += dot(quat_t<Type>(dx_dy, dy_dy, dz_dy, dw_dy), adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_dot(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const Type adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; } template<typename Type> inline CUDA_CALLABLE void tensordot(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const Type adj_ret) { adj_dot(a, b, adj_a, adj_b, adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_length(const quat_t<Type>& a, Type ret, quat_t<Type>& adj_a, const Type adj_ret) { if (ret > Type(kEps)) { Type inv_l = Type(1)/ret; adj_a += quat_t<Type>(a.x*inv_l, a.y*inv_l, a.z*inv_l, a.w*inv_l) * adj_ret; } } template<typename Type> inline CUDA_CALLABLE void adj_length_sq(const quat_t<Type>& a, quat_t<Type>& adj_a, const Type adj_ret) { adj_a += Type(2)*a*adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_normalize(const quat_t<Type>& q, quat_t<Type>& adj_q, const quat_t<Type>& adj_ret) { Type l = length(q); if (l > Type(kEps)) { Type l_inv = Type(1)/l; adj_q += adj_ret*l_inv - q*(l_inv*l_inv*l_inv*dot(q, adj_ret)); } } template<typename Type> inline CUDA_CALLABLE void adj_quat_inverse(const quat_t<Type>& q, quat_t<Type>& adj_q, const quat_t<Type>& adj_ret) { adj_q.x -= adj_ret.x; adj_q.y -= adj_ret.y; adj_q.z -= adj_ret.z; adj_q.w += adj_ret.w; } template<typename Type> inline CUDA_CALLABLE void adj_add(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret) { adj_a += adj_ret; adj_b += adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_sub(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_mul(const quat_t<Type>& a, const quat_t<Type>& b, quat_t<Type>& adj_a, quat_t<Type>& adj_b, const quat_t<Type>& adj_ret) { // shorthand const quat_t<Type>& r = adj_ret; adj_a += quat_t<Type>(b.w*r.x - b.x*r.w + b.y*r.z - b.z*r.y, b.w*r.y - b.y*r.w - b.x*r.z + b.z*r.x, b.w*r.z + b.x*r.y - b.y*r.x - b.z*r.w, b.w*r.w + b.x*r.x + b.y*r.y + b.z*r.z); adj_b += quat_t<Type>(a.w*r.x - a.x*r.w - a.y*r.z + a.z*r.y, a.w*r.y - a.y*r.w + a.x*r.z - a.z*r.x, a.w*r.z - a.x*r.y + a.y*r.x - a.z*r.w, a.w*r.w + a.x*r.x + a.y*r.y + a.z*r.z); } template<typename Type> inline CUDA_CALLABLE void adj_mul(const quat_t<Type>& a, Type s, quat_t<Type>& adj_a, Type& adj_s, const quat_t<Type>& adj_ret) { adj_a += adj_ret*s; adj_s += dot(a, adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_mul(Type s, const quat_t<Type>& a, Type& adj_s, quat_t<Type>& adj_a, const quat_t<Type>& adj_ret) { adj_mul(a, s, adj_a, adj_s, adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_div(quat_t<Type> a, Type s, quat_t<Type>& adj_a, Type& adj_s, const quat_t<Type>& adj_ret) { adj_s -= dot(a, adj_ret)/ (s * s); // - a / s^2 adj_a += adj_ret / s; } template<typename Type> inline CUDA_CALLABLE void adj_div(Type s, quat_t<Type> a, Type& adj_s, quat_t<Type>& adj_a, const quat_t<Type>& adj_ret) { adj_s -= dot(a, adj_ret)/ (s * s); // - a / s^2 adj_a += s / adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_quat_rotate(const quat_t<Type>& q, const vec_t<3,Type>& p, quat_t<Type>& adj_q, vec_t<3,Type>& adj_p, const vec_t<3,Type>& adj_ret) { { Type t2 = p[2]*q.z*Type(2); Type t3 = p[1]*q.w*Type(2); Type t4 = p[0]*q.w*Type(2); Type t5 = p[0]*q.x*Type(2); Type t6 = p[1]*q.y*Type(2); Type t7 = p[2]*q.y*Type(2); Type t8 = p[0]*q.z*Type(2); Type t9 = p[0]*q.y*Type(2); Type t10 = p[1]*q.x*Type(2); adj_q.x += adj_ret[2]*(t3+t8)+adj_ret[0]*(t2+t6+p[0]*q.x*Type(4))+adj_ret[1]*(t9-p[2]*q.w*Type(2)); adj_q.y += adj_ret[1]*(t2+t5+p[1]*q.y*Type(4))+adj_ret[0]*(t10+p[2]*q.w*Type(2))-adj_ret[2]*(t4-p[1]*q.z*Type(2)); adj_q.z += adj_ret[1]*(t4+t7)+adj_ret[2]*(t5+t6+p[2]*q.z*Type(4))-adj_ret[0]*(t3-p[2]*q.x*Type(2)); adj_q.w += adj_ret[0]*(t7+p[0]*q.w*Type(4)-p[1]*q.z*Type(2))+adj_ret[1]*(t8+p[1]*q.w*Type(4)-p[2]*q.x*Type(2))+adj_ret[2]*(-t9+t10+p[2]*q.w*Type(4)); } { Type t2 = q.w*q.w; Type t3 = t2*Type(2); Type t4 = q.w*q.z*Type(2); Type t5 = q.x*q.y*Type(2); Type t6 = q.w*q.y*Type(2); Type t7 = q.w*q.x*Type(2); Type t8 = q.y*q.z*Type(2); adj_p[0] += adj_ret[1]*(t4+t5)+adj_ret[0]*(t3+(q.x*q.x)*Type(2)-Type(1))-adj_ret[2]*(t6-q.x*q.z*Type(2)); adj_p[1] += adj_ret[2]*(t7+t8)-adj_ret[0]*(t4-t5)+adj_ret[1]*(t3+(q.y*q.y)*Type(2)-Type(1)); adj_p[2] += -adj_ret[1]*(t7-t8)+adj_ret[2]*(t3+(q.z*q.z)*Type(2)-Type(1))+adj_ret[0]*(t6+q.x*q.z*Type(2)); } } template<typename Type> inline CUDA_CALLABLE void adj_quat_rotate_inv(const quat_t<Type>& q, const vec_t<3,Type>& p, quat_t<Type>& adj_q, vec_t<3,Type>& adj_p, const vec_t<3,Type>& adj_ret) { const vec_t<3,Type>& r = adj_ret; { Type t2 = p[2]*q.w*Type(2); Type t3 = p[2]*q.z*Type(2); Type t4 = p[1]*q.w*Type(2); Type t5 = p[0]*q.w*Type(2); Type t6 = p[0]*q.x*Type(2); Type t7 = p[1]*q.y*Type(2); Type t8 = p[1]*q.z*Type(2); Type t9 = p[2]*q.x*Type(2); Type t10 = p[0]*q.y*Type(2); adj_q.x += r[1]*(t2+t10)+r[0]*(t3+t7+p[0]*q.x*Type(4))-r[2]*(t4-p[0]*q.z*Type(2)); adj_q.y += r[2]*(t5+t8)+r[1]*(t3+t6+p[1]*q.y*Type(4))-r[0]*(t2-p[1]*q.x*Type(2)); adj_q.z += r[0]*(t4+t9)+r[2]*(t6+t7+p[2]*q.z*Type(4))-r[1]*(t5-p[2]*q.y*Type(2)); adj_q.w += r[0]*(t8+p[0]*q.w*Type(4)-p[2]*q.y*Type(2))+r[1]*(t9+p[1]*q.w*Type(4)-p[0]*q.z*Type(2))+r[2]*(t10-p[1]*q.x*Type(2)+p[2]*q.w*Type(4)); } { Type t2 = q.w*q.w; Type t3 = t2*Type(2); Type t4 = q.w*q.z*Type(2); Type t5 = q.w*q.y*Type(2); Type t6 = q.x*q.z*Type(2); Type t7 = q.w*q.x*Type(2); adj_p[0] += r[2]*(t5+t6)+r[0]*(t3+(q.x*q.x)*Type(2)-Type(1))-r[1]*(t4-q.x*q.y*Type(2)); adj_p[1] += r[1]*(t3+(q.y*q.y)*Type(2)-Type(1))+r[0]*(t4+q.x*q.y*Type(2))-r[2]*(t7-q.y*q.z*Type(2)); adj_p[2] += -r[0]*(t5-t6)+r[2]*(t3+(q.z*q.z)*Type(2)-Type(1))+r[1]*(t7+q.y*q.z*Type(2)); } } template<typename Type> inline CUDA_CALLABLE void adj_quat_slerp(const quat_t<Type>& q0, const quat_t<Type>& q1, Type t, quat_t<Type>& ret, quat_t<Type>& adj_q0, quat_t<Type>& adj_q1, Type& adj_t, const quat_t<Type>& adj_ret) { vec_t<3,Type> axis; Type angle; quat_t<Type> q0_inv = quat_inverse(q0); quat_t<Type> q_inc = mul(q0_inv, q1); quat_to_axis_angle(q_inc, axis, angle); quat_t<Type> qt = quat_from_axis_angle(axis, angle * t); angle = angle * 0.5; // adj_t adj_t += dot(mul(ret, quat_t<Type>(angle*axis[0], angle*axis[1], angle*axis[2], Type(0))), adj_ret); // adj_q0 quat_t<Type> q_inc_x_q0; quat_t<Type> q_inc_y_q0; quat_t<Type> q_inc_z_q0; quat_t<Type> q_inc_w_q0; quat_t<Type> q_inc_x_q1; quat_t<Type> q_inc_y_q1; quat_t<Type> q_inc_z_q1; quat_t<Type> q_inc_w_q1; adj_mul(q0_inv, q1, q_inc_x_q0, q_inc_x_q1, quat_t<Type>(1.f, Type(0), Type(0), Type(0))); adj_mul(q0_inv, q1, q_inc_y_q0, q_inc_y_q1, quat_t<Type>(Type(0), 1.f, Type(0), Type(0))); adj_mul(q0_inv, q1, q_inc_z_q0, q_inc_z_q1, quat_t<Type>(Type(0), Type(0), 1.f, Type(0))); adj_mul(q0_inv, q1, q_inc_w_q0, q_inc_w_q1, quat_t<Type>(Type(0), Type(0), Type(0), 1.f)); quat_t<Type> a_x_q_inc; quat_t<Type> a_y_q_inc; quat_t<Type> a_z_q_inc; quat_t<Type> t_q_inc; adj_quat_to_axis_angle(q_inc, axis, angle, a_x_q_inc, vec_t<3,Type>(1.f, Type(0), Type(0)), Type(0)); adj_quat_to_axis_angle(q_inc, axis, angle, a_y_q_inc, vec_t<3,Type>(Type(0), 1.f, Type(0)), Type(0)); adj_quat_to_axis_angle(q_inc, axis, angle, a_z_q_inc, vec_t<3,Type>(Type(0), Type(0), 1.f), Type(0)); adj_quat_to_axis_angle(q_inc, axis, angle, t_q_inc, vec_t<3,Type>(Type(0), Type(0), Type(0)), Type(1)); Type cs = cos(angle*t); Type sn = sin(angle*t); quat_t<Type> q_inc_q0_x = quat_t<Type>(-q_inc_x_q0.x, -q_inc_y_q0.x, -q_inc_z_q0.x, -q_inc_w_q0.x); quat_t<Type> q_inc_q0_y = quat_t<Type>(-q_inc_x_q0.y, -q_inc_y_q0.y, -q_inc_z_q0.y, -q_inc_w_q0.y); quat_t<Type> q_inc_q0_z = quat_t<Type>(-q_inc_x_q0.z, -q_inc_y_q0.z, -q_inc_z_q0.z, -q_inc_w_q0.z); quat_t<Type> q_inc_q0_w = quat_t<Type>(q_inc_x_q0.w, q_inc_y_q0.w, q_inc_z_q0.w, q_inc_w_q0.w); Type a_x_q0_x = dot(a_x_q_inc, q_inc_q0_x); Type a_x_q0_y = dot(a_x_q_inc, q_inc_q0_y); Type a_x_q0_z = dot(a_x_q_inc, q_inc_q0_z); Type a_x_q0_w = dot(a_x_q_inc, q_inc_q0_w); Type a_y_q0_x = dot(a_y_q_inc, q_inc_q0_x); Type a_y_q0_y = dot(a_y_q_inc, q_inc_q0_y); Type a_y_q0_z = dot(a_y_q_inc, q_inc_q0_z); Type a_y_q0_w = dot(a_y_q_inc, q_inc_q0_w); Type a_z_q0_x = dot(a_z_q_inc, q_inc_q0_x); Type a_z_q0_y = dot(a_z_q_inc, q_inc_q0_y); Type a_z_q0_z = dot(a_z_q_inc, q_inc_q0_z); Type a_z_q0_w = dot(a_z_q_inc, q_inc_q0_w); Type t_q0_x = dot(t_q_inc, q_inc_q0_x); Type t_q0_y = dot(t_q_inc, q_inc_q0_y); Type t_q0_z = dot(t_q_inc, q_inc_q0_z); Type t_q0_w = dot(t_q_inc, q_inc_q0_w); quat_t<Type> q_s_q0_x = mul(quat_t<Type>(1.f, Type(0), Type(0), Type(0)), qt) + mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q0_x * cs + a_x_q0_x * sn, 0.5 * t * axis[1] * t_q0_x * cs + a_y_q0_x * sn, 0.5 * t * axis[2] * t_q0_x * cs + a_z_q0_x * sn, -0.5 * t * t_q0_x * sn)); quat_t<Type> q_s_q0_y = mul(quat_t<Type>(Type(0), 1.f, Type(0), Type(0)), qt) + mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q0_y * cs + a_x_q0_y * sn, 0.5 * t * axis[1] * t_q0_y * cs + a_y_q0_y * sn, 0.5 * t * axis[2] * t_q0_y * cs + a_z_q0_y * sn, -0.5 * t * t_q0_y * sn)); quat_t<Type> q_s_q0_z = mul(quat_t<Type>(Type(0), Type(0), 1.f, Type(0)), qt) + mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q0_z * cs + a_x_q0_z * sn, 0.5 * t * axis[1] * t_q0_z * cs + a_y_q0_z * sn, 0.5 * t * axis[2] * t_q0_z * cs + a_z_q0_z * sn, -0.5 * t * t_q0_z * sn)); quat_t<Type> q_s_q0_w = mul(quat_t<Type>(Type(0), Type(0), Type(0), 1.f), qt) + mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q0_w * cs + a_x_q0_w * sn, 0.5 * t * axis[1] * t_q0_w * cs + a_y_q0_w * sn, 0.5 * t * axis[2] * t_q0_w * cs + a_z_q0_w * sn, -0.5 * t * t_q0_w * sn)); adj_q0.x += dot(q_s_q0_x, adj_ret); adj_q0.y += dot(q_s_q0_y, adj_ret); adj_q0.z += dot(q_s_q0_z, adj_ret); adj_q0.w += dot(q_s_q0_w, adj_ret); // adj_q1 quat_t<Type> q_inc_q1_x = quat_t<Type>(q_inc_x_q1.x, q_inc_y_q1.x, q_inc_z_q1.x, q_inc_w_q1.x); quat_t<Type> q_inc_q1_y = quat_t<Type>(q_inc_x_q1.y, q_inc_y_q1.y, q_inc_z_q1.y, q_inc_w_q1.y); quat_t<Type> q_inc_q1_z = quat_t<Type>(q_inc_x_q1.z, q_inc_y_q1.z, q_inc_z_q1.z, q_inc_w_q1.z); quat_t<Type> q_inc_q1_w = quat_t<Type>(q_inc_x_q1.w, q_inc_y_q1.w, q_inc_z_q1.w, q_inc_w_q1.w); Type a_x_q1_x = dot(a_x_q_inc, q_inc_q1_x); Type a_x_q1_y = dot(a_x_q_inc, q_inc_q1_y); Type a_x_q1_z = dot(a_x_q_inc, q_inc_q1_z); Type a_x_q1_w = dot(a_x_q_inc, q_inc_q1_w); Type a_y_q1_x = dot(a_y_q_inc, q_inc_q1_x); Type a_y_q1_y = dot(a_y_q_inc, q_inc_q1_y); Type a_y_q1_z = dot(a_y_q_inc, q_inc_q1_z); Type a_y_q1_w = dot(a_y_q_inc, q_inc_q1_w); Type a_z_q1_x = dot(a_z_q_inc, q_inc_q1_x); Type a_z_q1_y = dot(a_z_q_inc, q_inc_q1_y); Type a_z_q1_z = dot(a_z_q_inc, q_inc_q1_z); Type a_z_q1_w = dot(a_z_q_inc, q_inc_q1_w); Type t_q1_x = dot(t_q_inc, q_inc_q1_x); Type t_q1_y = dot(t_q_inc, q_inc_q1_y); Type t_q1_z = dot(t_q_inc, q_inc_q1_z); Type t_q1_w = dot(t_q_inc, q_inc_q1_w); quat_t<Type> q_s_q1_x = mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q1_x * cs + a_x_q1_x * sn, 0.5 * t * axis[1] * t_q1_x * cs + a_y_q1_x * sn, 0.5 * t * axis[2] * t_q1_x * cs + a_z_q1_x * sn, -0.5 * t * t_q1_x * sn)); quat_t<Type> q_s_q1_y = mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q1_y * cs + a_x_q1_y * sn, 0.5 * t * axis[1] * t_q1_y * cs + a_y_q1_y * sn, 0.5 * t * axis[2] * t_q1_y * cs + a_z_q1_y * sn, -0.5 * t * t_q1_y * sn)); quat_t<Type> q_s_q1_z = mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q1_z * cs + a_x_q1_z * sn, 0.5 * t * axis[1] * t_q1_z * cs + a_y_q1_z * sn, 0.5 * t * axis[2] * t_q1_z * cs + a_z_q1_z * sn, -0.5 * t * t_q1_z * sn)); quat_t<Type> q_s_q1_w = mul(q0, quat_t<Type>( 0.5 * t * axis[0] * t_q1_w * cs + a_x_q1_w * sn, 0.5 * t * axis[1] * t_q1_w * cs + a_y_q1_w * sn, 0.5 * t * axis[2] * t_q1_w * cs + a_z_q1_w * sn, -0.5 * t * t_q1_w * sn)); adj_q1.x += dot(q_s_q1_x, adj_ret); adj_q1.y += dot(q_s_q1_y, adj_ret); adj_q1.z += dot(q_s_q1_z, adj_ret); adj_q1.w += dot(q_s_q1_w, adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_quat_to_matrix(const quat_t<Type>& q, quat_t<Type>& adj_q, mat_t<3,3,Type>& adj_ret) { // we don't care about adjoint w.r.t. constant identity matrix vec_t<3,Type> t; adj_quat_rotate(q, vec_t<3,Type>(1.0, 0.0, 0.0), adj_q, t, adj_ret.get_col(0)); adj_quat_rotate(q, vec_t<3,Type>(0.0, 1.0, 0.0), adj_q, t, adj_ret.get_col(1)); adj_quat_rotate(q, vec_t<3,Type>(0.0, 0.0, 1.0), adj_q, t, adj_ret.get_col(2)); } template<typename Type> inline CUDA_CALLABLE void adj_quat_from_matrix(const mat_t<3,3,Type>& m, mat_t<3,3,Type>& adj_m, const quat_t<Type>& adj_ret) { const Type tr = m.data[0][0] + m.data[1][1] + m.data[2][2]; Type x, y, z, w, h = Type(0); Type dx_dm00 = Type(0), dx_dm01 = Type(0), dx_dm02 = Type(0); Type dx_dm10 = Type(0), dx_dm11 = Type(0), dx_dm12 = Type(0); Type dx_dm20 = Type(0), dx_dm21 = Type(0), dx_dm22 = Type(0); Type dy_dm00 = Type(0), dy_dm01 = Type(0), dy_dm02 = Type(0); Type dy_dm10 = Type(0), dy_dm11 = Type(0), dy_dm12 = Type(0); Type dy_dm20 = Type(0), dy_dm21 = Type(0), dy_dm22 = Type(0); Type dz_dm00 = Type(0), dz_dm01 = Type(0), dz_dm02 = Type(0); Type dz_dm10 = Type(0), dz_dm11 = Type(0), dz_dm12 = Type(0); Type dz_dm20 = Type(0), dz_dm21 = Type(0), dz_dm22 = Type(0); Type dw_dm00 = Type(0), dw_dm01 = Type(0), dw_dm02 = Type(0); Type dw_dm10 = Type(0), dw_dm11 = Type(0), dw_dm12 = Type(0); Type dw_dm20 = Type(0), dw_dm21 = Type(0), dw_dm22 = Type(0); if (tr >= Type(0)) { h = sqrt(tr + Type(1)); w = Type(0.5) * h; h = Type(0.5) / h; x = (m.data[2][1] - m.data[1][2]) * h; y = (m.data[0][2] - m.data[2][0]) * h; z = (m.data[1][0] - m.data[0][1]) * h; dw_dm00 = Type(0.5) * h; dw_dm11 = Type(0.5) * h; dw_dm22 = Type(0.5) * h; dx_dm21 = h; dx_dm12 = -h; dx_dm00 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]); dx_dm11 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]); dx_dm22 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]); dy_dm02 = h; dy_dm20 = -h; dy_dm00 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]); dy_dm11 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]); dy_dm22 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]); dz_dm10 = h; dz_dm01 = -h; dz_dm00 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]); dz_dm11 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]); dz_dm22 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]); } else { size_t max_diag = 0; if (m.data[1][1] > m.data[0][0]) { max_diag = 1; } if (m.data[2][2] > m.data[max_diag][max_diag]) { max_diag = 2; } if (max_diag == 0) { h = sqrt((m.data[0][0] - (m.data[1][1] + m.data[2][2])) + Type(1)); x = Type(0.5) * h; h = Type(0.5) / h; y = (m.data[0][1] + m.data[1][0]) * h; z = (m.data[2][0] + m.data[0][2]) * h; w = (m.data[2][1] - m.data[1][2]) * h; dx_dm00 = Type(0.5) * h; dx_dm11 = -Type(0.5) * h; dx_dm22 = -Type(0.5) * h; dy_dm01 = h; dy_dm10 = h; dy_dm00 = -Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]); dy_dm11 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]); dy_dm22 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]); dz_dm20 = h; dz_dm02 = h; dz_dm00 = -Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]); dz_dm11 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]); dz_dm22 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]); dw_dm21 = h; dw_dm12 = -h; dw_dm00 = Type(2) * h*h*h * (m.data[1][2] - m.data[2][1]); dw_dm11 = Type(2) * h*h*h * (m.data[2][1] - m.data[1][2]); dw_dm22 = Type(2) * h*h*h * (m.data[2][1] - m.data[1][2]); } else if (max_diag == 1) { h = sqrt((m.data[1][1] - (m.data[2][2] + m.data[0][0])) + Type(1)); y = Type(0.5) * h; h = Type(0.5) / h; z = (m.data[1][2] + m.data[2][1]) * h; x = (m.data[0][1] + m.data[1][0]) * h; w = (m.data[0][2] - m.data[2][0]) * h; dy_dm00 = -Type(0.5) * h; dy_dm11 = Type(0.5) * h; dy_dm22 = -Type(0.5) * h; dz_dm12 = h; dz_dm21 = h; dz_dm00 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]); dz_dm11 = -Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]); dz_dm22 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]); dx_dm01 = h; dx_dm10 = h; dx_dm00 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]); dx_dm11 = -Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]); dx_dm22 = Type(2) * h*h*h * (m.data[0][1] + m.data[1][0]); dw_dm02 = h; dw_dm20 = -h; dw_dm00 = Type(2) * h*h*h * (m.data[0][2] - m.data[2][0]); dw_dm11 = Type(2) * h*h*h * (m.data[2][0] - m.data[0][2]); dw_dm22 = Type(2) * h*h*h * (m.data[0][2] - m.data[2][0]); } if (max_diag == 2) { h = sqrt((m.data[2][2] - (m.data[0][0] + m.data[1][1])) + Type(1)); z = Type(0.5) * h; h = Type(0.5) / h; x = (m.data[2][0] + m.data[0][2]) * h; y = (m.data[1][2] + m.data[2][1]) * h; w = (m.data[1][0] - m.data[0][1]) * h; dz_dm00 = -Type(0.5) * h; dz_dm11 = -Type(0.5) * h; dz_dm22 = Type(0.5) * h; dx_dm20 = h; dx_dm02 = h; dx_dm00 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]); dx_dm11 = Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]); dx_dm22 = -Type(2) * h*h*h * (m.data[2][0] + m.data[0][2]); dy_dm12 = h; dy_dm21 = h; dy_dm00 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]); dy_dm11 = Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]); dy_dm22 = -Type(2) * h*h*h * (m.data[1][2] + m.data[2][1]); dw_dm10 = h; dw_dm01 = -h; dw_dm00 = Type(2) * h*h*h * (m.data[1][0] - m.data[0][1]); dw_dm11 = Type(2) * h*h*h * (m.data[1][0] - m.data[0][1]); dw_dm22 = Type(2) * h*h*h * (m.data[0][1] - m.data[1][0]); } } quat_t<Type> dq_dm00 = quat_t<Type>(dx_dm00, dy_dm00, dz_dm00, dw_dm00); quat_t<Type> dq_dm01 = quat_t<Type>(dx_dm01, dy_dm01, dz_dm01, dw_dm01); quat_t<Type> dq_dm02 = quat_t<Type>(dx_dm02, dy_dm02, dz_dm02, dw_dm02); quat_t<Type> dq_dm10 = quat_t<Type>(dx_dm10, dy_dm10, dz_dm10, dw_dm10); quat_t<Type> dq_dm11 = quat_t<Type>(dx_dm11, dy_dm11, dz_dm11, dw_dm11); quat_t<Type> dq_dm12 = quat_t<Type>(dx_dm12, dy_dm12, dz_dm12, dw_dm12); quat_t<Type> dq_dm20 = quat_t<Type>(dx_dm20, dy_dm20, dz_dm20, dw_dm20); quat_t<Type> dq_dm21 = quat_t<Type>(dx_dm21, dy_dm21, dz_dm21, dw_dm21); quat_t<Type> dq_dm22 = quat_t<Type>(dx_dm22, dy_dm22, dz_dm22, dw_dm22); quat_t<Type> adj_q; adj_normalize(quat_t<Type>(x, y, z, w), adj_q, adj_ret); adj_m.data[0][0] += dot(dq_dm00, adj_q); adj_m.data[0][1] += dot(dq_dm01, adj_q); adj_m.data[0][2] += dot(dq_dm02, adj_q); adj_m.data[1][0] += dot(dq_dm10, adj_q); adj_m.data[1][1] += dot(dq_dm11, adj_q); adj_m.data[1][2] += dot(dq_dm12, adj_q); adj_m.data[2][0] += dot(dq_dm20, adj_q); adj_m.data[2][1] += dot(dq_dm21, adj_q); adj_m.data[2][2] += dot(dq_dm22, adj_q); } template<typename Type> inline CUDA_CALLABLE void adj_mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale, vec_t<3,Type>& adj_pos, quat_t<Type>& adj_rot, vec_t<3,Type>& adj_scale, const mat_t<4,4,Type>& adj_ret) { mat_t<3,3,Type> R = quat_to_matrix(rot); mat_t<3,3,Type> adj_R(0); adj_pos[0] += adj_ret.data[0][3]; adj_pos[1] += adj_ret.data[1][3]; adj_pos[2] += adj_ret.data[2][3]; adj_mul(R.data[0][0], scale[0], adj_R.data[0][0], adj_scale[0], adj_ret.data[0][0]); adj_mul(R.data[1][0], scale[0], adj_R.data[1][0], adj_scale[0], adj_ret.data[1][0]); adj_mul(R.data[2][0], scale[0], adj_R.data[2][0], adj_scale[0], adj_ret.data[2][0]); adj_mul(R.data[0][1], scale[1], adj_R.data[0][1], adj_scale[1], adj_ret.data[0][1]); adj_mul(R.data[1][1], scale[1], adj_R.data[1][1], adj_scale[1], adj_ret.data[1][1]); adj_mul(R.data[2][1], scale[1], adj_R.data[2][1], adj_scale[1], adj_ret.data[2][1]); adj_mul(R.data[0][2], scale[2], adj_R.data[0][2], adj_scale[2], adj_ret.data[0][2]); adj_mul(R.data[1][2], scale[2], adj_R.data[1][2], adj_scale[2], adj_ret.data[1][2]); adj_mul(R.data[2][2], scale[2], adj_R.data[2][2], adj_scale[2], adj_ret.data[2][2]); adj_quat_to_matrix(rot, adj_rot, adj_R); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type>::mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale) { mat_t<3,3,Type> R = quat_to_matrix(rot); data[0][0] = R.data[0][0]*scale[0]; data[1][0] = R.data[1][0]*scale[0]; data[2][0] = R.data[2][0]*scale[0]; data[3][0] = Type(0); data[0][1] = R.data[0][1]*scale[1]; data[1][1] = R.data[1][1]*scale[1]; data[2][1] = R.data[2][1]*scale[1]; data[3][1] = Type(0); data[0][2] = R.data[0][2]*scale[2]; data[1][2] = R.data[1][2]*scale[2]; data[2][2] = R.data[2][2]*scale[2]; data[3][2] = Type(0); data[0][3] = pos[0]; data[1][3] = pos[1]; data[2][3] = pos[2]; data[3][3] = Type(1); } template<typename Type=float32> inline CUDA_CALLABLE quat_t<Type> quat_identity() { return quat_t<Type>(Type(0), Type(0), Type(0), Type(1)); } } // namespace wp
39,920
C
34.867924
201
0.519063
NVIDIA/warp/warp/native/array.h
#pragma once #include "builtin.h" namespace wp { #if FP_CHECK #define FP_ASSERT_FWD(value) \ print(value); \ printf(")\n"); \ assert(0); \ #define FP_ASSERT_ADJ(value, adj_value) \ print(value); \ printf(", "); \ print(adj_value); \ printf(")\n"); \ assert(0); \ #define FP_VERIFY_FWD(value) \ if (!isfinite(value)) { \ printf("%s:%d - %s(addr", __FILE__, __LINE__, __FUNCTION__); \ FP_ASSERT_FWD(value) \ } \ #define FP_VERIFY_FWD_1(value) \ if (!isfinite(value)) { \ printf("%s:%d - %s(arr, %d) ", __FILE__, __LINE__, __FUNCTION__, i); \ FP_ASSERT_FWD(value) \ } \ #define FP_VERIFY_FWD_2(value) \ if (!isfinite(value)) { \ printf("%s:%d - %s(arr, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j); \ FP_ASSERT_FWD(value) \ } \ #define FP_VERIFY_FWD_3(value) \ if (!isfinite(value)) { \ printf("%s:%d - %s(arr, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k); \ FP_ASSERT_FWD(value) \ } \ #define FP_VERIFY_FWD_4(value) \ if (!isfinite(value)) { \ printf("%s:%d - %s(arr, %d, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k, l); \ FP_ASSERT_FWD(value) \ } \ #define FP_VERIFY_ADJ(value, adj_value) \ if (!isfinite(value) || !isfinite(adj_value)) \ { \ printf("%s:%d - %s(addr", __FILE__, __LINE__, __FUNCTION__); \ FP_ASSERT_ADJ(value, adj_value); \ } \ #define FP_VERIFY_ADJ_1(value, adj_value) \ if (!isfinite(value) || !isfinite(adj_value)) \ { \ printf("%s:%d - %s(arr, %d) ", __FILE__, __LINE__, __FUNCTION__, i); \ FP_ASSERT_ADJ(value, adj_value); \ } \ #define FP_VERIFY_ADJ_2(value, adj_value) \ if (!isfinite(value) || !isfinite(adj_value)) \ { \ printf("%s:%d - %s(arr, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j); \ FP_ASSERT_ADJ(value, adj_value); \ } \ #define FP_VERIFY_ADJ_3(value, adj_value) \ if (!isfinite(value) || !isfinite(adj_value)) \ { \ printf("%s:%d - %s(arr, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k); \ FP_ASSERT_ADJ(value, adj_value); \ } \ #define FP_VERIFY_ADJ_4(value, adj_value) \ if (!isfinite(value) || !isfinite(adj_value)) \ { \ printf("%s:%d - %s(arr, %d, %d, %d, %d) ", __FILE__, __LINE__, __FUNCTION__, i, j, k, l); \ FP_ASSERT_ADJ(value, adj_value); \ } \ #else #define FP_VERIFY_FWD(value) {} #define FP_VERIFY_FWD_1(value) {} #define FP_VERIFY_FWD_2(value) {} #define FP_VERIFY_FWD_3(value) {} #define FP_VERIFY_FWD_4(value) {} #define FP_VERIFY_ADJ(value, adj_value) {} #define FP_VERIFY_ADJ_1(value, adj_value) {} #define FP_VERIFY_ADJ_2(value, adj_value) {} #define FP_VERIFY_ADJ_3(value, adj_value) {} #define FP_VERIFY_ADJ_4(value, adj_value) {} #endif // WP_FP_CHECK const int ARRAY_MAX_DIMS = 4; // must match constant in types.py // must match constants in types.py const int ARRAY_TYPE_REGULAR = 0; const int ARRAY_TYPE_INDEXED = 1; const int ARRAY_TYPE_FABRIC = 2; const int ARRAY_TYPE_FABRIC_INDEXED = 3; struct shape_t { int dims[ARRAY_MAX_DIMS]; CUDA_CALLABLE inline shape_t() : dims() {} CUDA_CALLABLE inline int operator[](int i) const { assert(i < ARRAY_MAX_DIMS); return dims[i]; } CUDA_CALLABLE inline int& operator[](int i) { assert(i < ARRAY_MAX_DIMS); return dims[i]; } }; CUDA_CALLABLE inline int extract(const shape_t& s, int i) { return s.dims[i]; } CUDA_CALLABLE inline void adj_extract(const shape_t& s, int i, const shape_t& adj_s, int adj_i, int adj_ret) {} inline CUDA_CALLABLE void print(shape_t s) { // todo: only print valid dims, currently shape has a fixed size // but we don't know how many dims are valid (e.g.: 1d, 2d, etc) // should probably store ndim with shape printf("(%d, %d, %d, %d)\n", s.dims[0], s.dims[1], s.dims[2], s.dims[3]); } inline CUDA_CALLABLE void adj_print(shape_t s, shape_t& shape_t) {} template <typename T> struct array_t { CUDA_CALLABLE inline array_t() : data(nullptr), grad(nullptr), shape(), strides(), ndim(0) {} CUDA_CALLABLE array_t(T* data, int size, T* grad=nullptr) : data(data), grad(grad) { // constructor for 1d array shape.dims[0] = size; shape.dims[1] = 0; shape.dims[2] = 0; shape.dims[3] = 0; ndim = 1; strides[0] = sizeof(T); strides[1] = 0; strides[2] = 0; strides[3] = 0; } CUDA_CALLABLE array_t(T* data, int dim0, int dim1, T* grad=nullptr) : data(data), grad(grad) { // constructor for 2d array shape.dims[0] = dim0; shape.dims[1] = dim1; shape.dims[2] = 0; shape.dims[3] = 0; ndim = 2; strides[0] = dim1 * sizeof(T); strides[1] = sizeof(T); strides[2] = 0; strides[3] = 0; } CUDA_CALLABLE array_t(T* data, int dim0, int dim1, int dim2, T* grad=nullptr) : data(data), grad(grad) { // constructor for 3d array shape.dims[0] = dim0; shape.dims[1] = dim1; shape.dims[2] = dim2; shape.dims[3] = 0; ndim = 3; strides[0] = dim1 * dim2 * sizeof(T); strides[1] = dim2 * sizeof(T); strides[2] = sizeof(T); strides[3] = 0; } CUDA_CALLABLE array_t(T* data, int dim0, int dim1, int dim2, int dim3, T* grad=nullptr) : data(data), grad(grad) { // constructor for 4d array shape.dims[0] = dim0; shape.dims[1] = dim1; shape.dims[2] = dim2; shape.dims[3] = dim3; ndim = 4; strides[0] = dim1 * dim2 * dim3 * sizeof(T); strides[1] = dim2 * dim3 * sizeof(T); strides[2] = dim3 * sizeof(T); strides[3] = sizeof(T); } CUDA_CALLABLE inline bool empty() const { return !data; } T* data; T* grad; shape_t shape; int strides[ARRAY_MAX_DIMS]; int ndim; CUDA_CALLABLE inline operator T*() const { return data; } }; // TODO: // - templated index type? // - templated dimensionality? (also for array_t to save space when passing arrays to kernels) template <typename T> struct indexedarray_t { CUDA_CALLABLE inline indexedarray_t() : arr(), indices(), shape() {} CUDA_CALLABLE inline bool empty() const { return !arr.data; } array_t<T> arr; int* indices[ARRAY_MAX_DIMS]; // index array per dimension (can be NULL) shape_t shape; // element count per dimension (num. indices if indexed, array dim if not) }; // return stride (in bytes) of the given index template <typename T> CUDA_CALLABLE inline size_t stride(const array_t<T>& a, int dim) { return size_t(a.strides[dim]); } template <typename T> CUDA_CALLABLE inline T* data_at_byte_offset(const array_t<T>& a, size_t byte_offset) { return reinterpret_cast<T*>(reinterpret_cast<char*>(a.data) + byte_offset); } template <typename T> CUDA_CALLABLE inline T* grad_at_byte_offset(const array_t<T>& a, size_t byte_offset) { return reinterpret_cast<T*>(reinterpret_cast<char*>(a.grad) + byte_offset); } template <typename T> CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i) { assert(i >= 0 && i < arr.shape[0]); return i*stride(arr, 0); } template <typename T> CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j) { assert(i >= 0 && i < arr.shape[0]); assert(j >= 0 && j < arr.shape[1]); return i*stride(arr, 0) + j*stride(arr, 1); } template <typename T> CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j, int k) { assert(i >= 0 && i < arr.shape[0]); assert(j >= 0 && j < arr.shape[1]); assert(k >= 0 && k < arr.shape[2]); return i*stride(arr, 0) + j*stride(arr, 1) + k*stride(arr, 2); } template <typename T> CUDA_CALLABLE inline size_t byte_offset(const array_t<T>& arr, int i, int j, int k, int l) { assert(i >= 0 && i < arr.shape[0]); assert(j >= 0 && j < arr.shape[1]); assert(k >= 0 && k < arr.shape[2]); assert(l >= 0 && l < arr.shape[3]); return i*stride(arr, 0) + j*stride(arr, 1) + k*stride(arr, 2) + l*stride(arr, 3); } template <typename T> CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i) { assert(arr.ndim == 1); T& result = *data_at_byte_offset(arr, byte_offset(arr, i)); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j) { assert(arr.ndim == 2); T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j)); FP_VERIFY_FWD_2(result) return result; } template <typename T> CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j, int k) { assert(arr.ndim == 3); T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j, k)); FP_VERIFY_FWD_3(result) return result; } template <typename T> CUDA_CALLABLE inline T& index(const array_t<T>& arr, int i, int j, int k, int l) { assert(arr.ndim == 4); T& result = *data_at_byte_offset(arr, byte_offset(arr, i, j, k, l)); FP_VERIFY_FWD_4(result) return result; } template <typename T> CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i) { T& result = *grad_at_byte_offset(arr, byte_offset(arr, i)); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j) { T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j)); FP_VERIFY_FWD_2(result) return result; } template <typename T> CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j, int k) { T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j, k)); FP_VERIFY_FWD_3(result) return result; } template <typename T> CUDA_CALLABLE inline T& index_grad(const array_t<T>& arr, int i, int j, int k, int l) { T& result = *grad_at_byte_offset(arr, byte_offset(arr, i, j, k, l)); FP_VERIFY_FWD_4(result) return result; } template <typename T> CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i) { assert(iarr.arr.ndim == 1); assert(i >= 0 && i < iarr.shape[0]); if (iarr.indices[0]) { i = iarr.indices[0][i]; assert(i >= 0 && i < iarr.arr.shape[0]); } T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i)); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j) { assert(iarr.arr.ndim == 2); assert(i >= 0 && i < iarr.shape[0]); assert(j >= 0 && j < iarr.shape[1]); if (iarr.indices[0]) { i = iarr.indices[0][i]; assert(i >= 0 && i < iarr.arr.shape[0]); } if (iarr.indices[1]) { j = iarr.indices[1][j]; assert(j >= 0 && j < iarr.arr.shape[1]); } T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j)); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j, int k) { assert(iarr.arr.ndim == 3); assert(i >= 0 && i < iarr.shape[0]); assert(j >= 0 && j < iarr.shape[1]); assert(k >= 0 && k < iarr.shape[2]); if (iarr.indices[0]) { i = iarr.indices[0][i]; assert(i >= 0 && i < iarr.arr.shape[0]); } if (iarr.indices[1]) { j = iarr.indices[1][j]; assert(j >= 0 && j < iarr.arr.shape[1]); } if (iarr.indices[2]) { k = iarr.indices[2][k]; assert(k >= 0 && k < iarr.arr.shape[2]); } T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j, k)); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline T& index(const indexedarray_t<T>& iarr, int i, int j, int k, int l) { assert(iarr.arr.ndim == 4); assert(i >= 0 && i < iarr.shape[0]); assert(j >= 0 && j < iarr.shape[1]); assert(k >= 0 && k < iarr.shape[2]); assert(l >= 0 && l < iarr.shape[3]); if (iarr.indices[0]) { i = iarr.indices[0][i]; assert(i >= 0 && i < iarr.arr.shape[0]); } if (iarr.indices[1]) { j = iarr.indices[1][j]; assert(j >= 0 && j < iarr.arr.shape[1]); } if (iarr.indices[2]) { k = iarr.indices[2][k]; assert(k >= 0 && k < iarr.arr.shape[2]); } if (iarr.indices[3]) { l = iarr.indices[3][l]; assert(l >= 0 && l < iarr.arr.shape[3]); } T& result = *data_at_byte_offset(iarr.arr, byte_offset(iarr.arr, i, j, k, l)); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i) { assert(src.ndim > 1); assert(i >= 0 && i < src.shape[0]); array_t<T> a; a.data = data_at_byte_offset(src, byte_offset(src, i)); a.shape[0] = src.shape[1]; a.shape[1] = src.shape[2]; a.shape[2] = src.shape[3]; a.strides[0] = src.strides[1]; a.strides[1] = src.strides[2]; a.strides[2] = src.strides[3]; a.ndim = src.ndim-1; return a; } template <typename T> CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i, int j) { assert(src.ndim > 2); assert(i >= 0 && i < src.shape[0]); assert(j >= 0 && j < src.shape[1]); array_t<T> a; a.data = data_at_byte_offset(src, byte_offset(src, i, j)); a.shape[0] = src.shape[2]; a.shape[1] = src.shape[3]; a.strides[0] = src.strides[2]; a.strides[1] = src.strides[3]; a.ndim = src.ndim-2; return a; } template <typename T> CUDA_CALLABLE inline array_t<T> view(array_t<T>& src, int i, int j, int k) { assert(src.ndim > 3); assert(i >= 0 && i < src.shape[0]); assert(j >= 0 && j < src.shape[1]); assert(k >= 0 && k < src.shape[2]); array_t<T> a; a.data = data_at_byte_offset(src, byte_offset(src, i, j, k)); a.shape[0] = src.shape[3]; a.strides[0] = src.strides[3]; a.ndim = src.ndim-3; return a; } template <typename T> CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i) { assert(src.arr.ndim > 1); if (src.indices[0]) { assert(i >= 0 && i < src.shape[0]); i = src.indices[0][i]; } indexedarray_t<T> a; a.arr = view(src.arr, i); a.indices[0] = src.indices[1]; a.indices[1] = src.indices[2]; a.indices[2] = src.indices[3]; a.shape[0] = src.shape[1]; a.shape[1] = src.shape[2]; a.shape[2] = src.shape[3]; return a; } template <typename T> CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i, int j) { assert(src.arr.ndim > 2); if (src.indices[0]) { assert(i >= 0 && i < src.shape[0]); i = src.indices[0][i]; } if (src.indices[1]) { assert(j >= 0 && j < src.shape[1]); j = src.indices[1][j]; } indexedarray_t<T> a; a.arr = view(src.arr, i, j); a.indices[0] = src.indices[2]; a.indices[1] = src.indices[3]; a.shape[0] = src.shape[2]; a.shape[1] = src.shape[3]; return a; } template <typename T> CUDA_CALLABLE inline indexedarray_t<T> view(indexedarray_t<T>& src, int i, int j, int k) { assert(src.arr.ndim > 3); if (src.indices[0]) { assert(i >= 0 && i < src.shape[0]); i = src.indices[0][i]; } if (src.indices[1]) { assert(j >= 0 && j < src.shape[1]); j = src.indices[1][j]; } if (src.indices[2]) { assert(k >= 0 && k < src.shape[2]); k = src.indices[2][k]; } indexedarray_t<T> a; a.arr = view(src.arr, i, j, k); a.indices[0] = src.indices[3]; a.shape[0] = src.shape[3]; return a; } template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T> inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, A2<T>& adj_src, int adj_i, A3<T> adj_ret) {} template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T> inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, int j, A2<T>& adj_src, int adj_i, int adj_j, A3<T> adj_ret) {} template<template<typename> class A1, template<typename> class A2, template<typename> class A3, typename T> inline CUDA_CALLABLE void adj_view(A1<T>& src, int i, int j, int k, A2<T>& adj_src, int adj_i, int adj_j, int adj_k, A3<T> adj_ret) {} // TODO: lower_bound() for indexed arrays? template <typename T> CUDA_CALLABLE inline int lower_bound(const array_t<T>& arr, int arr_begin, int arr_end, T value) { assert(arr.ndim == 1); int lower = arr_begin; int upper = arr_end - 1; while(lower < upper) { int mid = lower + (upper - lower) / 2; if (arr[mid] < value) { lower = mid + 1; } else { upper = mid; } } return lower; } template <typename T> CUDA_CALLABLE inline int lower_bound(const array_t<T>& arr, T value) { return lower_bound(arr, 0, arr.shape[0], value); } template <typename T> inline CUDA_CALLABLE void adj_lower_bound(const array_t<T>& arr, T value, array_t<T> adj_arr, T adj_value, int adj_ret) {} template <typename T> inline CUDA_CALLABLE void adj_lower_bound(const array_t<T>& arr, int arr_begin, int arr_end, T value, array_t<T> adj_arr, int adj_arr_begin, int adj_arr_end, T adj_value, int adj_ret) {} template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, T value) { return atomic_add(&index(buf, i), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, T value) { return atomic_add(&index(buf, i, j), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, int k, T value) { return atomic_add(&index(buf, i, j, k), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_add(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_add(&index(buf, i, j, k, l), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, T value) { return atomic_add(&index(buf, i), -value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, T value) { return atomic_add(&index(buf, i, j), -value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, int k, T value) { return atomic_add(&index(buf, i, j, k), -value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_sub(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_add(&index(buf, i, j, k, l), -value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, T value) { return atomic_min(&index(buf, i), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, T value) { return atomic_min(&index(buf, i, j), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, int k, T value) { return atomic_min(&index(buf, i, j, k), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_min(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_min(&index(buf, i, j, k, l), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, T value) { return atomic_max(&index(buf, i), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, T value) { return atomic_max(&index(buf, i, j), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, int k, T value) { return atomic_max(&index(buf, i, j, k), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T atomic_max(const A<T>& buf, int i, int j, int k, int l, T value) { return atomic_max(&index(buf, i, j, k, l), value); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T* address(const A<T>& buf, int i) { return &index(buf, i); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j) { return &index(buf, i, j); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j, int k) { return &index(buf, i, j, k); } template<template<typename> class A, typename T> inline CUDA_CALLABLE T* address(const A<T>& buf, int i, int j, int k, int l) { return &index(buf, i, j, k, l); } template<template<typename> class A, typename T> inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, T value) { FP_VERIFY_FWD_1(value) index(buf, i) = value; } template<template<typename> class A, typename T> inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, T value) { FP_VERIFY_FWD_2(value) index(buf, i, j) = value; } template<template<typename> class A, typename T> inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, int k, T value) { FP_VERIFY_FWD_3(value) index(buf, i, j, k) = value; } template<template<typename> class A, typename T> inline CUDA_CALLABLE void array_store(const A<T>& buf, int i, int j, int k, int l, T value) { FP_VERIFY_FWD_4(value) index(buf, i, j, k, l) = value; } template<typename T> inline CUDA_CALLABLE void store(T* address, T value) { FP_VERIFY_FWD(value) *address = value; } template<typename T> inline CUDA_CALLABLE T load(T* address) { T value = *address; FP_VERIFY_FWD(value) return value; } // select operator to check for array being null template <typename T1, typename T2> CUDA_CALLABLE inline T2 select(const array_t<T1>& arr, const T2& a, const T2& b) { return arr.data?b:a; } template <typename T1, typename T2> CUDA_CALLABLE inline void adj_select(const array_t<T1>& arr, const T2& a, const T2& b, const array_t<T1>& adj_cond, T2& adj_a, T2& adj_b, const T2& adj_ret) { if (arr.data) adj_b += adj_ret; else adj_a += adj_ret; } // stub for the case where we have an nested array inside a struct and // atomic add the whole struct onto an array (e.g.: during backwards pass) template <typename T> CUDA_CALLABLE inline void atomic_add(array_t<T>*, array_t<T>) {} // for float and vector types this is just an alias for an atomic add template <typename T> CUDA_CALLABLE inline void adj_atomic_add(T* buf, T value) { atomic_add(buf, value); } // for integral types we do not accumulate gradients CUDA_CALLABLE inline void adj_atomic_add(int8* buf, int8 value) { } CUDA_CALLABLE inline void adj_atomic_add(uint8* buf, uint8 value) { } CUDA_CALLABLE inline void adj_atomic_add(int16* buf, int16 value) { } CUDA_CALLABLE inline void adj_atomic_add(uint16* buf, uint16 value) { } CUDA_CALLABLE inline void adj_atomic_add(int32* buf, int32 value) { } CUDA_CALLABLE inline void adj_atomic_add(uint32* buf, uint32 value) { } CUDA_CALLABLE inline void adj_atomic_add(int64* buf, int64 value) { } CUDA_CALLABLE inline void adj_atomic_add(uint64* buf, uint64 value) { } CUDA_CALLABLE inline void adj_atomic_add(bool* buf, bool value) { } // only generate gradients for T types template<typename T> inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, const array_t<T>& adj_buf, int& adj_i, const T& adj_output) { if (adj_buf.data) adj_atomic_add(&index(adj_buf, i), adj_output); else if (buf.grad) adj_atomic_add(&index_grad(buf, i), adj_output); } template<typename T> inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, const array_t<T>& adj_buf, int& adj_i, int& adj_j, const T& adj_output) { if (adj_buf.data) adj_atomic_add(&index(adj_buf, i, j), adj_output); else if (buf.grad) adj_atomic_add(&index_grad(buf, i, j), adj_output); } template<typename T> inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, int k, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, const T& adj_output) { if (adj_buf.data) adj_atomic_add(&index(adj_buf, i, j, k), adj_output); else if (buf.grad) adj_atomic_add(&index_grad(buf, i, j, k), adj_output); } template<typename T> inline CUDA_CALLABLE void adj_address(const array_t<T>& buf, int i, int j, int k, int l, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, const T& adj_output) { if (adj_buf.data) adj_atomic_add(&index(adj_buf, i, j, k, l), adj_output); else if (buf.grad) adj_atomic_add(&index_grad(buf, i, j, k, l), adj_output); } template<typename T> inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value) { if (adj_buf.data) adj_value += index(adj_buf, i); else if (buf.grad) adj_value += index_grad(buf, i); FP_VERIFY_ADJ_1(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value) { if (adj_buf.data) adj_value += index(adj_buf, i, j); else if (buf.grad) adj_value += index_grad(buf, i, j); FP_VERIFY_ADJ_2(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value) { if (adj_buf.data) adj_value += index(adj_buf, i, j, k); else if (buf.grad) adj_value += index_grad(buf, i, j, k); FP_VERIFY_ADJ_3(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_array_store(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value) { if (adj_buf.data) adj_value += index(adj_buf, i, j, k, l); else if (buf.grad) adj_value += index_grad(buf, i, j, k, l); FP_VERIFY_ADJ_4(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_store(const T* address, T value, const T& adj_address, T& adj_value) { // nop; generic store() operations are not differentiable, only array_store() is FP_VERIFY_ADJ(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_load(const T* address, const T& adj_address, T& adj_value) { // nop; generic load() operations are not differentiable } template<typename T> inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value += index(adj_buf, i); else if (buf.grad) adj_value += index_grad(buf, i); FP_VERIFY_ADJ_1(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value += index(adj_buf, i, j); else if (buf.grad) adj_value += index_grad(buf, i, j); FP_VERIFY_ADJ_2(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value += index(adj_buf, i, j, k); else if (buf.grad) adj_value += index_grad(buf, i, j, k); FP_VERIFY_ADJ_3(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_atomic_add(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value += index(adj_buf, i, j, k, l); else if (buf.grad) adj_value += index_grad(buf, i, j, k, l); FP_VERIFY_ADJ_4(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, T value, const array_t<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value -= index(adj_buf, i); else if (buf.grad) adj_value -= index_grad(buf, i); FP_VERIFY_ADJ_1(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value -= index(adj_buf, i, j); else if (buf.grad) adj_value -= index_grad(buf, i, j); FP_VERIFY_ADJ_2(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, int k, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value -= index(adj_buf, i, j, k); else if (buf.grad) adj_value -= index_grad(buf, i, j, k); FP_VERIFY_ADJ_3(value, adj_value) } template<typename T> inline CUDA_CALLABLE void adj_atomic_sub(const array_t<T>& buf, int i, int j, int k, int l, T value, const array_t<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_value -= index(adj_buf, i, j, k, l); else if (buf.grad) adj_value -= index_grad(buf, i, j, k, l); FP_VERIFY_ADJ_4(value, adj_value) } // generic array types that do not support gradient computation (indexedarray, etc.) template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, const A2<T>& adj_buf, int& adj_i, const T& adj_output) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, const A2<T>& adj_buf, int& adj_i, int& adj_j, const T& adj_output) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, int k, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, const T& adj_output) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_address(const A1<T>& buf, int i, int j, int k, int l, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, const T& adj_output) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_array_store(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_add(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) {} template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_sub(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) {} // generic handler for scalar values template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i), &index(adj_buf, i), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i), &index_grad(buf, i), value, adj_value); FP_VERIFY_ADJ_1(value, adj_value) } template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i, j), &index(adj_buf, i, j), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i, j), &index_grad(buf, i, j), value, adj_value); FP_VERIFY_ADJ_2(value, adj_value) } template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i, j, k), &index(adj_buf, i, j, k), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i, j, k), &index_grad(buf, i, j, k), value, adj_value); FP_VERIFY_ADJ_3(value, adj_value) } template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_min(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i, j, k, l), &index(adj_buf, i, j, k, l), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i, j, k, l), &index_grad(buf, i, j, k, l), value, adj_value); FP_VERIFY_ADJ_4(value, adj_value) } template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, T value, const A2<T>& adj_buf, int& adj_i, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i), &index(adj_buf, i), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i), &index_grad(buf, i), value, adj_value); FP_VERIFY_ADJ_1(value, adj_value) } template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i, j), &index(adj_buf, i, j), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i, j), &index_grad(buf, i, j), value, adj_value); FP_VERIFY_ADJ_2(value, adj_value) } template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, int k, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i, j, k), &index(adj_buf, i, j, k), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i, j, k), &index_grad(buf, i, j, k), value, adj_value); FP_VERIFY_ADJ_3(value, adj_value) } template<template<typename> class A1, template<typename> class A2, typename T> inline CUDA_CALLABLE void adj_atomic_max(const A1<T>& buf, int i, int j, int k, int l, T value, const A2<T>& adj_buf, int& adj_i, int& adj_j, int& adj_k, int& adj_l, T& adj_value, const T& adj_ret) { if (adj_buf.data) adj_atomic_minmax(&index(buf, i, j, k, l), &index(adj_buf, i, j, k, l), value, adj_value); else if (buf.grad) adj_atomic_minmax(&index(buf, i, j, k, l), &index_grad(buf, i, j, k, l), value, adj_value); FP_VERIFY_ADJ_4(value, adj_value) } } // namespace wp #include "fabric.h"
38,118
C
34.525629
208
0.617818
NVIDIA/warp/warp/native/marching.h
#pragma once
14
C
3.999999
12
0.714286
NVIDIA/warp/warp/native/sort.cpp
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "warp.h" #include "sort.h" #include "string.h" #include <cstdint> void radix_sort_pairs_host(int* keys, int* values, int n) { static int tables[2][1 << 16]; memset(tables, 0, sizeof(tables)); int* auxKeys = keys + n; int* auxValues = values + n; // build histograms for (int i=0; i < n; ++i) { const unsigned short low = keys[i] & 0xffff; const unsigned short high = keys[i] >> 16; ++tables[0][low]; ++tables[1][high]; } // convert histograms to offset tables in-place int offlow = 0; int offhigh = 0; for (int i=0; i < 65536; ++i) { const int newofflow = offlow + tables[0][i]; const int newoffhigh = offhigh + tables[1][i]; tables[0][i] = offlow; tables[1][i] = offhigh; offlow = newofflow; offhigh = newoffhigh; } // pass 1 - sort by low 16 bits for (int i=0; i < n; ++i) { // lookup offset of input const int k = keys[i]; const int v = values[i]; const int b = k & 0xffff; // find offset and increment const int offset = tables[0][b]++; auxKeys[offset] = k; auxValues[offset] = v; } // pass 2 - sort by high 16 bits for (int i=0; i < n; ++i) { // lookup offset of input const int k = auxKeys[i]; const int v = auxValues[i]; const int b = k >> 16; const int offset = tables[1][b]++; keys[offset] = k; values[offset] = v; } } #if !WP_ENABLE_CUDA void radix_sort_reserve(void* context, int n, void** mem_out, size_t* size_out) {} void radix_sort_pairs_int_device(uint64_t keys, uint64_t values, int n) {} #endif // !WP_ENABLE_CUDA void radix_sort_pairs_int_host(uint64_t keys, uint64_t values, int n) { radix_sort_pairs_host( reinterpret_cast<int *>(keys), reinterpret_cast<int *>(values), n); }
2,179
C++
21.947368
82
0.642955
NVIDIA/warp/warp/native/matnn.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once namespace wp { CUDA_CALLABLE inline int dense_index(int stride, int i, int j) { return i*stride + j; } template <bool transpose> CUDA_CALLABLE inline int dense_index(int rows, int cols, int i, int j) { if (transpose) return j*rows + i; else return i*cols + j; } template <bool t1, bool t2, bool add> CUDA_CALLABLE inline void dense_gemm_impl(int m, int n, int p, const float* __restrict__ A, const float* __restrict__ B, float* __restrict__ C) { for (int i=0; i < m; i++) { for (int j=0; j < n; ++j) { float sum = 0.0f; for (int k=0; k < p; ++k) { sum += A[dense_index<t1>(m, p, i, k)]*B[dense_index<t2>(p, n, k, j)]; } if (add) C[i*n + j] += sum; else C[i*n + j] = sum; } } } template <bool add=false> CUDA_CALLABLE inline void dense_gemm(int m, int n, int p, int t1, int t2, const array_t<float>& A, const array_t<float>& B, array_t<float>& C) { if (t1 == 0 && t2 == 0) dense_gemm_impl<false, false, add>(m, n, p, A.data, B.data, C.data); else if (t1 == 1 && t2 == 0) dense_gemm_impl<true, false, add>(m, n, p, A.data, B.data, C.data); else if (t1 == 0 && t2 == 1) dense_gemm_impl<false, true, add>(m, n, p, A.data, B.data, C.data); else if (t1 == 1 && t2 == 1) dense_gemm_impl<true, true, add>(m, n, p, A.data, B.data, C.data); } void CUDA_CALLABLE inline dense_chol(int n, const array_t<float>& A, float regularization, array_t<float>& L) { for (int j=0; j < n; ++j) { float s = A.data[dense_index(n, j, j)] + regularization; for (int k=0; k < j; ++k) { float r = L.data[dense_index(n, j, k)]; s -= r*r; } s = sqrt(s); const float invS = 1.0f/s; L.data[dense_index(n, j, j)] = s; for (int i=j+1; i < n; ++i) { s = A.data[dense_index(n, i, j)]; for (int k=0; k < j; ++k) { s -= L.data[dense_index(n, i, k)]*L.data[dense_index(n, j, k)]; } L.data[dense_index(n, i, j)] = s*invS; } } } // Solves (L*L^T)x = b given the Cholesky factor L CUDA_CALLABLE inline void dense_subs(int n, const array_t<float>& L, const array_t<float>& b, array_t<float>& x) { // forward substitution for (int i=0; i < n; ++i) { float s = b.data[i]; for (int j=0; j < i; ++j) { s -= L.data[dense_index(n, i, j)]*x.data[j]; } x.data[i] = s/L.data[dense_index(n, i, i)]; } // backward substitution for (int i=n-1; i >= 0; --i) { float s = x.data[i]; for (int j=i+1; j < n; ++j) { s -= L.data[dense_index(n, j, i)]*x.data[j]; } x.data[i] = s/L.data[dense_index(n, i, i)]; } } CUDA_CALLABLE inline void dense_solve(int n, const array_t<float>& A, const array_t<float>& L, const array_t<float>& b, array_t<float>& x) { dense_subs(n, L, b, x); } // CUDA_CALLABLE inline void print_matrix(const char* name, int m, int n, const float* data) // { // printf("%s = [", name); // for (int i=0; i < m; ++i) // { // for (int j=0; j < n; ++j) // { // printf("%f ", data[dense_index(n, i, j)]); // } // printf(";\n"); // } // printf("]\n"); // } // adjoint methods CUDA_CALLABLE inline void adj_dense_gemm( int m, int n, int p, int t1, int t2, const array_t<float>& A, const array_t<float>& B, array_t<float>& C, int adj_m, int adj_n, int adj_p, int adj_t1, int adj_t2, array_t<float>& adj_A, array_t<float>& adj_B, const array_t<float>& adj_C) { // print_matrix("A", m, p, A); // print_matrix("B", p, n, B); // printf("t1: %d t2: %d\n", t1, t2); if (t1) { dense_gemm<true>(p, m, n, 0, 1, B, adj_C, adj_A); dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B); } else { dense_gemm<true>(m, p, n, 0, int(!t2), adj_C, B, adj_A); dense_gemm<true>(p, n, m, int(!t1), 0, A, adj_C, adj_B); } } CUDA_CALLABLE inline void adj_dense_chol( int n, const array_t<float>& A, float regularization, array_t<float>& L, int adj_n, const array_t<float>& adj_A, float adj_regularization, array_t<float>& adj_L) { // nop, use dense_solve to differentiate through (A^-1)b = x } CUDA_CALLABLE inline void adj_dense_subs( int n, const array_t<float>& L, const array_t<float>& b, array_t<float>& x, int adj_n, const array_t<float>& adj_L, const array_t<float>& adj_b, array_t<float>& adj_x) { // nop, use dense_solve to differentiate through (A^-1)b = x } CUDA_CALLABLE inline void adj_dense_solve(int n, const array_t<float>& A, const array_t<float>& L, const array_t<float>& b, const array_t<float>& x, int adj_n, array_t<float>& adj_A, array_t<float>& adj_L, array_t<float>& adj_b, const array_t<float>& adj_x) { // see https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pwp, section 2.3.1 dense_subs(n, L, adj_x, adj_b); // A* = -adj_b*x^T for (int i=0; i < n; ++i) { for (int j=0; j < n; ++j) { adj_A.data[dense_index(n, i, j)] += -adj_b.data[i]*x.data[j]; } } } template <typename F> CUDA_CALLABLE inline void mlp(const array_t<float>& weights, const array_t<float>& bias, F activation, int index, const array_t<float>& x, array_t<float>& out) { const int m = weights.shape[0]; const int n = weights.shape[1]; const int b = x.shape[1]; for (int i=0; i < m; ++i) { float tmp = bias.data[i]; for(int j=0; j < n; ++j) { tmp += weights.data[i*n + j]*x.data[index + b*j]; } out.data[index + b*i] = activation(tmp); } } template <typename F, typename AdjF> CUDA_CALLABLE inline void adj_mlp(const array_t<float>& weights, const array_t<float>& bias, F activation, int index, const array_t<float>& x, array_t<float>& out, array_t<float>& adj_weights, array_t<float>& adj_bias, AdjF adj_activation, int adj_index, array_t<float>& adj_x, array_t<float>& adj_out) { const int m = weights.shape[0]; const int n = weights.shape[1]; const int b = x.shape[1]; for (int i=0; i < m; ++i) { // recompute forward pass so we don't have to store pre-activation outputs float tmp = bias.data[i]; for(int j=0; j < n; ++j) { tmp += weights.data[i*n + j]*x.data[index + b*j]; } // adjoint w.r.t to activation float adj_f = 0.0f; if (adj_out.data) adj_activation(tmp, adj_f, adj_out.data[index + b*i]); for (int j=0; j < n; ++j) { // adjoint w.r.t M_i if (adj_weights.data) atomic_add(&adj_weights.data[i*n + j], x.data[index + b*j]*adj_f); // todo: reduce these atomic stores using warp/block level reductions // adjoint w.r.t x if (adj_x.data) atomic_add(&adj_x.data[index + b*j], weights.data[i*n + j]*adj_f); } // adjoint w.r.t b if (adj_bias.data) atomic_add(&adj_bias.data[i], adj_f); } } // template <typename F> // CUDA_CALLABLE inline void mlp(const array_t<float>& weights, const array_t<float>& bias, F activation, int m, int n, int b, int index, const array_t<float>& x, array_t<float>& out) // { // x += index*n; // out += index*m; // for (int i=0; i < m; ++i) // { // float tmp = bias[i]; // for(int j=0; j < n; ++j) // { // tmp += weights[i*n + j]*x[j]; // } // out[i] = activation(tmp); // } // } // template <typename F, typename AdjF> // CUDA_CALLABLE inline void adj_mlp(const array_t<float>& weights, const array_t<float>& bias, F activation, int m, int n, int b, int index, const array_t<float>& x, const array_t<float>& out, // array_t<float>& adj_weights, array_t<float>& adj_bias, AdjF adj_activation, int adj_m, int adj_n, int adj_b, int adj_index, array_t<float>& adj_x, array_t<float>& adj_out) // { // x += index*n; // out += index*m; // adj_x += index*n; // adj_out += index*m; // for (int i=0; i < m; ++i) // { // // recompute forward pass so we don't have to store pre-activation outputs // float tmp = bias[i]; // for(int j=0; j < n; ++j) // { // tmp += weights[i*n + j]*x[index + b*j]; // } // // adjoint w.r.t to activation // float adj_f = 0.0f; // adj_activation(tmp, adj_f, adj_out[index + b*i]); // for (int j=0; j < n; ++j) // { // // adjoint w.r.t M_i // adj_weights[i*n + j] += x[j]*adj_f; // // adjoint w.r.t x // adj_x[index + b*j] += weights[i*n + j]*adj_f; // } // // adjoint w.r.t b // adj_bias[i] += adj_f; // } // } } // namespace wp
9,617
C
27.796407
208
0.517833
NVIDIA/warp/warp/native/mesh.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "builtin.h" #include "bvh.h" #include "intersect.h" #include "array.h" #include "solid_angle.h" #define BVH_DEBUG 0 namespace wp { struct Mesh { array_t<vec3> points; array_t<vec3> velocities; array_t<int> indices; vec3* lowers; vec3* uppers; SolidAngleProps* solid_angle_props; int num_points; int num_tris; BVH bvh; void* context; float average_edge_length; inline CUDA_CALLABLE Mesh(int id = 0) { // for backward a = 0 initialization syntax lowers = nullptr; uppers = nullptr; num_points = 0; num_tris = 0; context = nullptr; solid_angle_props = nullptr; average_edge_length = 0.0f; } inline CUDA_CALLABLE Mesh( array_t<vec3> points, array_t<vec3> velocities, array_t<int> indices, int num_points, int num_tris, void* context = nullptr ) : points(points), velocities(velocities), indices(indices), num_points(num_points), num_tris(num_tris), context(context) { lowers = nullptr; uppers = nullptr; solid_angle_props = nullptr; average_edge_length = 0.0f; } }; CUDA_CALLABLE inline Mesh mesh_get(uint64_t id) { return *(Mesh*)(id); } CUDA_CALLABLE inline Mesh& operator += (Mesh& a, const Mesh& b) { // dummy operator needed for adj_select involving meshes return a; } CUDA_CALLABLE inline float distance_to_aabb_sq(const vec3& p, const vec3& lower, const vec3& upper) { vec3 cp = closest_point_to_aabb(p, lower, upper); return length_sq(p-cp); } CUDA_CALLABLE inline float furthest_distance_to_aabb_sq(const vec3& p, const vec3& lower, const vec3& upper) { vec3 c0 = vec3(lower[0], lower[1], lower[2]); vec3 c1 = vec3(lower[0], lower[1], upper[2]); vec3 c2 = vec3(lower[0], upper[1], lower[2]); vec3 c3 = vec3(lower[0], upper[1], upper[2]); vec3 c4 = vec3(upper[0], lower[1], lower[2]); vec3 c5 = vec3(upper[0], lower[1], upper[2]); vec3 c6 = vec3(upper[0], upper[1], lower[2]); vec3 c7 = vec3(upper[0], upper[1], upper[2]); float max_dist_sq = 0.0; float d; d = length_sq(p-c0); if (d > max_dist_sq) max_dist_sq = d; d = length_sq(p-c1); if (d > max_dist_sq) max_dist_sq = d; d = length_sq(p-c2); if (d > max_dist_sq) max_dist_sq = d; d = length_sq(p-c3); if (d > max_dist_sq) max_dist_sq = d; d = length_sq(p-c4); if (d > max_dist_sq) max_dist_sq = d; d = length_sq(p-c5); if (d > max_dist_sq) max_dist_sq = d; d = length_sq(p-c6); if (d > max_dist_sq) max_dist_sq = d; d = length_sq(p-c7); if (d > max_dist_sq) max_dist_sq = d; return max_dist_sq; } CUDA_CALLABLE inline float mesh_query_inside(uint64_t id, const vec3& p); // returns true if there is a point (strictly) < distance max_dist CUDA_CALLABLE inline bool mesh_query_point(uint64_t id, const vec3& point, float max_dist, float& inside, int& face, float& u, float& v) { Mesh mesh = mesh_get(id); int stack[32]; stack[0] = *mesh.bvh.root; int count = 1; float min_dist_sq = max_dist*max_dist; int min_face; float min_v; float min_w; #if BVH_DEBUG int tests = 0; int secondary_culls = 0; std::vector<int> test_history; std::vector<vec3> test_centers; std::vector<vec3> test_extents; #endif while (count) { const int nodeIndex = stack[--count]; BVHPackedNodeHalf lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf upper = mesh.bvh.node_uppers[nodeIndex]; // re-test distance float node_dist_sq = distance_to_aabb_sq(point, vec3(lower.x, lower.y, lower.z), vec3(upper.x, upper.y, upper.z)); if (node_dist_sq > min_dist_sq) { #if BVH_DEBUG secondary_culls++; #endif continue; } const int left_index = lower.i; const int right_index = upper.i; if (lower.b) { // compute closest point on tri int i = mesh.indices[left_index*3+0]; int j = mesh.indices[left_index*3+1]; int k = mesh.indices[left_index*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 e0 = q-p; vec3 e1 = r-p; vec3 e2 = r-q; vec3 normal = cross(e0, e1); // sliver detection if (length(normal)/(dot(e0,e0) + dot(e1,e1) + dot(e2,e2)) < 1.e-6f) continue; vec2 barycentric = closest_point_to_triangle(p, q, r, point); float u = barycentric[0]; float v = barycentric[1]; float w = 1.f - u - v; vec3 c = u*p + v*q + w*r; float dist_sq = length_sq(c-point); if (dist_sq < min_dist_sq) { min_dist_sq = dist_sq; min_v = v; min_w = w; min_face = left_index; } #if BVH_DEBUG tests++; bounds3 b; b = bounds_union(b, p); b = bounds_union(b, q); b = bounds_union(b, r); if (distance_to_aabb_sq(point, b.lower, b.upper) < max_dist*max_dist) { //if (dist_sq < max_dist*max_dist) test_history.push_back(left_index); test_centers.push_back(b.center()); test_extents.push_back(b.edges()); } #endif } else { BVHPackedNodeHalf left_lower = mesh.bvh.node_lowers[left_index]; BVHPackedNodeHalf left_upper = mesh.bvh.node_uppers[left_index]; BVHPackedNodeHalf right_lower = mesh.bvh.node_lowers[right_index]; BVHPackedNodeHalf right_upper = mesh.bvh.node_uppers[right_index]; float left_dist_sq = distance_to_aabb_sq(point, vec3(left_lower.x, left_lower.y, left_lower.z), vec3(left_upper.x, left_upper.y, left_upper.z)); float right_dist_sq = distance_to_aabb_sq(point, vec3(right_lower.x, right_lower.y, right_lower.z), vec3(right_upper.x, right_upper.y, right_upper.z)); float left_score = left_dist_sq; float right_score = right_dist_sq; if (left_score < right_score) { // put left on top of the stack if (right_dist_sq < min_dist_sq) stack[count++] = right_index; if (left_dist_sq < min_dist_sq) stack[count++] = left_index; } else { // put right on top of the stack if (left_dist_sq < min_dist_sq) stack[count++] = left_index; if (right_dist_sq < min_dist_sq) stack[count++] = right_index; } } } #if BVH_DEBUG printf("%d\n", tests); static int max_tests = 0; static vec3 max_point; static float max_point_dist = 0.0f; static int max_secondary_culls = 0; if (secondary_culls > max_secondary_culls) max_secondary_culls = secondary_culls; if (tests > max_tests) { max_tests = tests; max_point = point; max_point_dist = sqrtf(min_dist_sq); printf("max_tests: %d max_point: %f %f %f max_point_dist: %f max_second_culls: %d\n", max_tests, max_point[0], max_point[1], max_point[2], max_point_dist, max_secondary_culls); FILE* f = fopen("test_history.txt", "w"); for (int i=0; i < test_history.size(); ++i) { fprintf(f, "%d, %f, %f, %f, %f, %f, %f\n", test_history[i], test_centers[i][0], test_centers[i][1], test_centers[i][2], test_extents[i][0], test_extents[i][1], test_extents[i][2]); } fclose(f); } #endif // check if we found a point, and write outputs if (min_dist_sq < max_dist*max_dist) { u = 1.0f - min_v - min_w; v = min_v; face = min_face; // determine inside outside using ray-cast parity check inside = mesh_query_inside(id, point); return true; } else { return false; } } // returns true if there is a point (strictly) < distance max_dist CUDA_CALLABLE inline bool mesh_query_point_no_sign(uint64_t id, const vec3& point, float max_dist, int& face, float& u, float& v) { Mesh mesh = mesh_get(id); int stack[32]; stack[0] = *mesh.bvh.root; int count = 1; float min_dist_sq = max_dist*max_dist; int min_face; float min_v; float min_w; #if BVH_DEBUG int tests = 0; int secondary_culls = 0; std::vector<int> test_history; std::vector<vec3> test_centers; std::vector<vec3> test_extents; #endif while (count) { const int nodeIndex = stack[--count]; BVHPackedNodeHalf lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf upper = mesh.bvh.node_uppers[nodeIndex]; // re-test distance float node_dist_sq = distance_to_aabb_sq(point, vec3(lower.x, lower.y, lower.z), vec3(upper.x, upper.y, upper.z)); if (node_dist_sq > min_dist_sq) { #if BVH_DEBUG secondary_culls++; #endif continue; } const int left_index = lower.i; const int right_index = upper.i; if (lower.b) { // compute closest point on tri int i = mesh.indices[left_index*3+0]; int j = mesh.indices[left_index*3+1]; int k = mesh.indices[left_index*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 e0 = q-p; vec3 e1 = r-p; vec3 e2 = r-q; vec3 normal = cross(e0, e1); // sliver detection if (length(normal)/(dot(e0,e0) + dot(e1,e1) + dot(e2,e2)) < 1.e-6f) continue; vec2 barycentric = closest_point_to_triangle(p, q, r, point); float u = barycentric[0]; float v = barycentric[1]; float w = 1.f - u - v; vec3 c = u*p + v*q + w*r; float dist_sq = length_sq(c-point); if (dist_sq < min_dist_sq) { min_dist_sq = dist_sq; min_v = v; min_w = w; min_face = left_index; } #if BVH_DEBUG tests++; bounds3 b; b = bounds_union(b, p); b = bounds_union(b, q); b = bounds_union(b, r); if (distance_to_aabb_sq(point, b.lower, b.upper) < max_dist*max_dist) { //if (dist_sq < max_dist*max_dist) test_history.push_back(left_index); test_centers.push_back(b.center()); test_extents.push_back(b.edges()); } #endif } else { BVHPackedNodeHalf left_lower = mesh.bvh.node_lowers[left_index]; BVHPackedNodeHalf left_upper = mesh.bvh.node_uppers[left_index]; BVHPackedNodeHalf right_lower = mesh.bvh.node_lowers[right_index]; BVHPackedNodeHalf right_upper = mesh.bvh.node_uppers[right_index]; float left_dist_sq = distance_to_aabb_sq(point, vec3(left_lower.x, left_lower.y, left_lower.z), vec3(left_upper.x, left_upper.y, left_upper.z)); float right_dist_sq = distance_to_aabb_sq(point, vec3(right_lower.x, right_lower.y, right_lower.z), vec3(right_upper.x, right_upper.y, right_upper.z)); float left_score = left_dist_sq; float right_score = right_dist_sq; if (left_score < right_score) { // put left on top of the stack if (right_dist_sq < min_dist_sq) stack[count++] = right_index; if (left_dist_sq < min_dist_sq) stack[count++] = left_index; } else { // put right on top of the stack if (left_dist_sq < min_dist_sq) stack[count++] = left_index; if (right_dist_sq < min_dist_sq) stack[count++] = right_index; } } } #if BVH_DEBUG printf("%d\n", tests); static int max_tests = 0; static vec3 max_point; static float max_point_dist = 0.0f; static int max_secondary_culls = 0; if (secondary_culls > max_secondary_culls) max_secondary_culls = secondary_culls; if (tests > max_tests) { max_tests = tests; max_point = point; max_point_dist = sqrtf(min_dist_sq); printf("max_tests: %d max_point: %f %f %f max_point_dist: %f max_second_culls: %d\n", max_tests, max_point[0], max_point[1], max_point[2], max_point_dist, max_secondary_culls); FILE* f = fopen("test_history.txt", "w"); for (int i=0; i < test_history.size(); ++i) { fprintf(f, "%d, %f, %f, %f, %f, %f, %f\n", test_history[i], test_centers[i][0], test_centers[i][1], test_centers[i][2], test_extents[i][0], test_extents[i][1], test_extents[i][2]); } fclose(f); } #endif // check if we found a point, and write outputs if (min_dist_sq < max_dist*max_dist) { u = 1.0f - min_v - min_w; v = min_v; face = min_face; return true; } else { return false; } } // returns true if there is a point (strictly) > distance min_dist CUDA_CALLABLE inline bool mesh_query_furthest_point_no_sign(uint64_t id, const vec3& point, float min_dist, int& face, float& u, float& v) { Mesh mesh = mesh_get(id); int stack[32]; stack[0] = *mesh.bvh.root; int count = 1; float max_dist_sq = min_dist*min_dist; int min_face; float min_v; float min_w; #if BVH_DEBUG int tests = 0; int secondary_culls = 0; std::vector<int> test_history; std::vector<vec3> test_centers; std::vector<vec3> test_extents; #endif while (count) { const int nodeIndex = stack[--count]; BVHPackedNodeHalf lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf upper = mesh.bvh.node_uppers[nodeIndex]; // re-test distance float node_dist_sq = furthest_distance_to_aabb_sq(point, vec3(lower.x, lower.y, lower.z), vec3(upper.x, upper.y, upper.z)); // if maximum distance to this node is less than our existing furthest max then skip if (node_dist_sq < max_dist_sq) { #if BVH_DEBUG secondary_culls++; #endif continue; } const int left_index = lower.i; const int right_index = upper.i; if (lower.b) { // compute closest point on tri int i = mesh.indices[left_index*3+0]; int j = mesh.indices[left_index*3+1]; int k = mesh.indices[left_index*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 e0 = q-p; vec3 e1 = r-p; vec3 e2 = r-q; vec3 normal = cross(e0, e1); // sliver detection if (length(normal)/(dot(e0,e0) + dot(e1,e1) + dot(e2,e2)) < 1.e-6f) continue; vec2 barycentric = furthest_point_to_triangle(p, q, r, point); float u = barycentric[0]; float v = barycentric[1]; float w = 1.f - u - v; vec3 c = u*p + v*q + w*r; float dist_sq = length_sq(c-point); if (dist_sq > max_dist_sq) { max_dist_sq = dist_sq; min_v = v; min_w = w; min_face = left_index; } #if BVH_DEBUG tests++; bounds3 b; b = bounds_union(b, p); b = bounds_union(b, q); b = bounds_union(b, r); if (distance_to_aabb_sq(point, b.lower, b.upper) > max_dist*max_dist) { //if (dist_sq < max_dist*max_dist) test_history.push_back(left_index); test_centers.push_back(b.center()); test_extents.push_back(b.edges()); } #endif } else { BVHPackedNodeHalf left_lower = mesh.bvh.node_lowers[left_index]; BVHPackedNodeHalf left_upper = mesh.bvh.node_uppers[left_index]; BVHPackedNodeHalf right_lower = mesh.bvh.node_lowers[right_index]; BVHPackedNodeHalf right_upper = mesh.bvh.node_uppers[right_index]; float left_dist_sq = furthest_distance_to_aabb_sq(point, vec3(left_lower.x, left_lower.y, left_lower.z), vec3(left_upper.x, left_upper.y, left_upper.z)); float right_dist_sq = furthest_distance_to_aabb_sq(point, vec3(right_lower.x, right_lower.y, right_lower.z), vec3(right_upper.x, right_upper.y, right_upper.z)); float left_score = left_dist_sq; float right_score = right_dist_sq; if (left_score > right_score) { // put left on top of the stack if (right_dist_sq > max_dist_sq) stack[count++] = right_index; if (left_dist_sq > max_dist_sq) stack[count++] = left_index; } else { // put right on top of the stack if (left_dist_sq > max_dist_sq) stack[count++] = left_index; if (right_dist_sq > max_dist_sq) stack[count++] = right_index; } } } #if BVH_DEBUG printf("%d\n", tests); static int max_tests = 0; static vec3 max_point; static float max_point_dist = 0.0f; static int max_secondary_culls = 0; if (secondary_culls > max_secondary_culls) max_secondary_culls = secondary_culls; if (tests > max_tests) { max_tests = tests; max_point = point; max_point_dist = sqrtf(max_dist_sq); printf("max_tests: %d max_point: %f %f %f max_point_dist: %f max_second_culls: %d\n", max_tests, max_point[0], max_point[1], max_point[2], max_point_dist, max_secondary_culls); FILE* f = fopen("test_history.txt", "w"); for (int i=0; i < test_history.size(); ++i) { fprintf(f, "%d, %f, %f, %f, %f, %f, %f\n", test_history[i], test_centers[i][0], test_centers[i][1], test_centers[i][2], test_extents[i][0], test_extents[i][1], test_extents[i][2]); } fclose(f); } #endif // check if we found a point, and write outputs if (max_dist_sq > min_dist*min_dist) { u = 1.0f - min_v - min_w; v = min_v; face = min_face; return true; } else { return false; } } // returns true if there is a point (strictly) < distance max_dist CUDA_CALLABLE inline bool mesh_query_point_sign_normal(uint64_t id, const vec3& point, float max_dist, float& inside, int& face, float& u, float& v, const float epsilon = 1e-3f) { Mesh mesh = mesh_get(id); int stack[32]; stack[0] = *mesh.bvh.root; int count = 1; float min_dist = max_dist; int min_face; float min_v; float min_w; vec3 accumulated_angle_weighted_normal; #if BVH_DEBUG int tests = 0; int secondary_culls = 0; std::vector<int> test_history; std::vector<vec3> test_centers; std::vector<vec3> test_extents; #endif float epsilon_min_dist = mesh.average_edge_length * epsilon; float epsilon_min_dist_sq = epsilon_min_dist*epsilon_min_dist; while (count) { const int nodeIndex = stack[--count]; BVHPackedNodeHalf lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf upper = mesh.bvh.node_uppers[nodeIndex]; // re-test distance float node_dist_sq = distance_to_aabb_sq(point, vec3(lower.x, lower.y, lower.z), vec3(upper.x, upper.y, upper.z)); if (node_dist_sq > (min_dist + epsilon_min_dist)*(min_dist + epsilon_min_dist)) { #if BVH_DEBUG secondary_culls++; #endif continue; } const int left_index = lower.i; const int right_index = upper.i; if (lower.b) { // compute closest point on tri int i = mesh.indices[left_index*3+0]; int j = mesh.indices[left_index*3+1]; int k = mesh.indices[left_index*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 e0 = q-p; vec3 e1 = r-p; vec3 e2 = r-q; vec3 normal = cross(e0, e1); // sliver detection float e0_norm_sq = dot(e0,e0); float e1_norm_sq = dot(e1,e1); float e2_norm_sq = dot(e2,e2); if (length(normal)/(e0_norm_sq + e1_norm_sq + e2_norm_sq) < 1.e-6f) continue; vec2 barycentric = closest_point_to_triangle(p, q, r, point); float u = barycentric[0]; float v = barycentric[1]; float w = 1.f - u - v; vec3 c = u*p + v*q + w*r; float dist = sqrtf(length_sq(c-point)); if (dist < min_dist + epsilon_min_dist) { float weight = 0.0f; vec3 cp = c-p; vec3 cq = c-q; vec3 cr = c-r; float len_cp_sq = length_sq(cp); float len_cq_sq = length_sq(cq); float len_cr_sq = length_sq(cr); // Check if near vertex if (len_cp_sq < epsilon_min_dist_sq) { // Vertex 0 is the closest feature weight = acosf(dot(normalize(e0), normalize(e1))); } else if (len_cq_sq < epsilon_min_dist_sq) { // Vertex 1 is the closest feature weight = acosf(dot(normalize(e2), normalize(-e0))); } else if (len_cr_sq < epsilon_min_dist_sq) { // Vertex 2 is the closest feature weight = acosf(dot(normalize(-e1), normalize(-e2))); } else { float e0cp = dot(e0, cp); float e2cq = dot(e2, cq); float e1cp = dot(e1, cp); if ((len_cp_sq*e0_norm_sq-e0cp*e0cp < epsilon_min_dist_sq*e0_norm_sq) || (len_cq_sq*e2_norm_sq-e2cq*e2cq < epsilon_min_dist_sq*e2_norm_sq) || (len_cp_sq*e1_norm_sq-e1cp*e1cp < epsilon_min_dist_sq*e1_norm_sq)) { // One of the edge weight = 3.14159265359f; // PI } else { weight = 2.0f*3.14159265359f; // 2*PI } } if (dist > min_dist - epsilon_min_dist) { // Treat as equal accumulated_angle_weighted_normal += weight*normalize(normal); if (dist < min_dist) { min_dist = dist; min_v = v; min_w = w; min_face = left_index; } } else { // Less min_dist = dist; min_v = v; min_w = w; min_face = left_index; accumulated_angle_weighted_normal = weight*normalize(normal); } } #if BVH_DEBUG tests++; bounds3 b; b = bounds_union(b, p); b = bounds_union(b, q); b = bounds_union(b, r); if (distance_to_aabb_sq(point, b.lower, b.upper) < (max_dist+epsilon_min_dist)*(max_dist+epsilon_min_dist)) { //if (dist_sq < max_dist*max_dist) test_history.push_back(left_index); test_centers.push_back(b.center()); test_extents.push_back(b.edges()); } #endif } else { BVHPackedNodeHalf left_lower = mesh.bvh.node_lowers[left_index]; BVHPackedNodeHalf left_upper = mesh.bvh.node_uppers[left_index]; BVHPackedNodeHalf right_lower = mesh.bvh.node_lowers[right_index]; BVHPackedNodeHalf right_upper = mesh.bvh.node_uppers[right_index]; float left_dist_sq = distance_to_aabb_sq(point, vec3(left_lower.x, left_lower.y, left_lower.z), vec3(left_upper.x, left_upper.y, left_upper.z)); float right_dist_sq = distance_to_aabb_sq(point, vec3(right_lower.x, right_lower.y, right_lower.z), vec3(right_upper.x, right_upper.y, right_upper.z)); float left_score = left_dist_sq; float right_score = right_dist_sq; if (left_score < right_score) { // put left on top of the stack if (right_dist_sq < (min_dist + epsilon_min_dist) * (min_dist + epsilon_min_dist)) stack[count++] = right_index; if (left_dist_sq < (min_dist + epsilon_min_dist) * (min_dist + epsilon_min_dist)) stack[count++] = left_index; } else { // put right on top of the stack if (left_dist_sq < (min_dist + epsilon_min_dist) * (min_dist + epsilon_min_dist)) stack[count++] = left_index; if (right_dist_sq < (min_dist + epsilon_min_dist) * (min_dist + epsilon_min_dist)) stack[count++] = right_index; } } } #if BVH_DEBUG printf("%d\n", tests); static int max_tests = 0; static vec3 max_point; static float max_point_dist = 0.0f; static int max_secondary_culls = 0; if (secondary_culls > max_secondary_culls) max_secondary_culls = secondary_culls; if (tests > max_tests) { max_tests = tests; max_point = point; max_point_dist = min_dist; printf("max_tests: %d max_point: %f %f %f max_point_dist: %f max_second_culls: %d\n", max_tests, max_point[0], max_point[1], max_point[2], max_point_dist, max_secondary_culls); FILE* f = fopen("test_history.txt", "w"); for (int i=0; i < test_history.size(); ++i) { fprintf(f, "%d, %f, %f, %f, %f, %f, %f\n", test_history[i], test_centers[i][0], test_centers[i][1], test_centers[i][2], test_extents[i][0], test_extents[i][1], test_extents[i][2]); } fclose(f); } #endif // check if we found a point, and write outputs if (min_dist < max_dist) { u = 1.0f - min_v - min_w; v = min_v; face = min_face; // determine inside outside using ray-cast parity check //inside = mesh_query_inside(id, point); int i = mesh.indices[min_face*3+0]; int j = mesh.indices[min_face*3+1]; int k = mesh.indices[min_face*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 closest_point = p*u+q*v+r*min_w; if (dot(accumulated_angle_weighted_normal, point-closest_point) > 0.0) { inside = 1.0f; } else { inside = -1.0f; } return true; } else { return false; } } CUDA_CALLABLE inline float solid_angle_iterative(uint64_t id, const vec3& p, const float accuracy_sq) { Mesh mesh = mesh_get(id); int stack[32]; int at_child[32]; // 0 for left, 1 for right, 2 for done float angle[32]; stack[0] = *mesh.bvh.root; at_child[0] = 0; int count = 1; angle[0] = 0.0f; while (count) { const int nodeIndex = stack[count - 1]; BVHPackedNodeHalf lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf upper = mesh.bvh.node_uppers[nodeIndex]; const int left_index = lower.i; const int right_index = upper.i; if (lower.b) { // compute closest point on tri const int leaf_index = left_index; angle[count - 1] = robust_solid_angle(mesh.points[mesh.indices[leaf_index*3+0]], mesh.points[mesh.indices[leaf_index*3+1]], mesh.points[mesh.indices[leaf_index*3+2]], p); //printf("Leaf %d, got %f\n", leaf_index, my_data[count - 1]); count--; } else { // See if I have to descend if (at_child[count - 1] == 0) { // First visit bool des = evaluate_node_solid_angle(p, &mesh.solid_angle_props[nodeIndex], angle[count - 1], accuracy_sq); //printf("Non-Leaf %d, got %f\n", nodeIndex, angle[count - 1]); if (des) { // Go left stack[count] = left_index; at_child[count - 1] = 1; angle[count] = 0.0f; at_child[count] = 0; count++; } else { // Does not descend done count--; } } else if (at_child[count - 1] == 1) { // Add data to parent angle[count - 1] += angle[count]; // Go right stack[count] = right_index; at_child[count - 1] = 2; angle[count] = 0.0f; at_child[count] = 0; count++; } else { // Descend both sides already angle[count - 1] += angle[count]; count--; } } } return angle[0]; } CUDA_CALLABLE inline float mesh_query_winding_number(uint64_t id, const vec3& p, const float accuracy) { float angle = solid_angle_iterative(id, p, accuracy*accuracy); return angle * 0.07957747154; // divided by 4 PI } // returns true if there is a point (strictly) < distance max_dist CUDA_CALLABLE inline bool mesh_query_point_sign_winding_number(uint64_t id, const vec3& point, float max_dist, float& inside, int& face, float& u, float& v, const float accuracy, const float winding_number_threshold) { Mesh mesh = mesh_get(id); int stack[32]; stack[0] = *mesh.bvh.root; int count = 1; float min_dist_sq = max_dist*max_dist; int min_face; float min_v; float min_w; #if BVH_DEBUG int tests = 0; int secondary_culls = 0; std::vector<int> test_history; std::vector<vec3> test_centers; std::vector<vec3> test_extents; #endif while (count) { const int nodeIndex = stack[--count]; BVHPackedNodeHalf lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf upper = mesh.bvh.node_uppers[nodeIndex]; // re-test distance float node_dist_sq = distance_to_aabb_sq(point, vec3(lower.x, lower.y, lower.z), vec3(upper.x, upper.y, upper.z)); if (node_dist_sq > min_dist_sq) { #if BVH_DEBUG secondary_culls++; #endif continue; } const int left_index = lower.i; const int right_index = upper.i; if (lower.b) { // compute closest point on tri int i = mesh.indices[left_index*3+0]; int j = mesh.indices[left_index*3+1]; int k = mesh.indices[left_index*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 e0 = q-p; vec3 e1 = r-p; vec3 e2 = r-q; vec3 normal = cross(e0, e1); // sliver detection if (length(normal)/(dot(e0,e0) + dot(e1,e1) + dot(e2,e2)) < 1.e-6f) continue; vec2 barycentric = closest_point_to_triangle(p, q, r, point); float u = barycentric[0]; float v = barycentric[1]; float w = 1.f - u - v; vec3 c = u*p + v*q + w*r; float dist_sq = length_sq(c-point); if (dist_sq < min_dist_sq) { min_dist_sq = dist_sq; min_v = v; min_w = w; min_face = left_index; } #if BVH_DEBUG tests++; bounds3 b; b = bounds_union(b, p); b = bounds_union(b, q); b = bounds_union(b, r); if (distance_to_aabb_sq(point, b.lower, b.upper) < max_dist*max_dist) { //if (dist_sq < max_dist*max_dist) test_history.push_back(left_index); test_centers.push_back(b.center()); test_extents.push_back(b.edges()); } #endif } else { BVHPackedNodeHalf left_lower = mesh.bvh.node_lowers[left_index]; BVHPackedNodeHalf left_upper = mesh.bvh.node_uppers[left_index]; BVHPackedNodeHalf right_lower = mesh.bvh.node_lowers[right_index]; BVHPackedNodeHalf right_upper = mesh.bvh.node_uppers[right_index]; float left_dist_sq = distance_to_aabb_sq(point, vec3(left_lower.x, left_lower.y, left_lower.z), vec3(left_upper.x, left_upper.y, left_upper.z)); float right_dist_sq = distance_to_aabb_sq(point, vec3(right_lower.x, right_lower.y, right_lower.z), vec3(right_upper.x, right_upper.y, right_upper.z)); float left_score = left_dist_sq; float right_score = right_dist_sq; if (left_score < right_score) { // put left on top of the stack if (right_dist_sq < min_dist_sq) stack[count++] = right_index; if (left_dist_sq < min_dist_sq) stack[count++] = left_index; } else { // put right on top of the stack if (left_dist_sq < min_dist_sq) stack[count++] = left_index; if (right_dist_sq < min_dist_sq) stack[count++] = right_index; } } } #if BVH_DEBUG printf("%d\n", tests); static int max_tests = 0; static vec3 max_point; static float max_point_dist = 0.0f; static int max_secondary_culls = 0; if (secondary_culls > max_secondary_culls) max_secondary_culls = secondary_culls; if (tests > max_tests) { max_tests = tests; max_point = point; max_point_dist = sqrtf(min_dist_sq); printf("max_tests: %d max_point: %f %f %f max_point_dist: %f max_second_culls: %d\n", max_tests, max_point[0], max_point[1], max_point[2], max_point_dist, max_secondary_culls); FILE* f = fopen("test_history.txt", "w"); for (int i=0; i < test_history.size(); ++i) { fprintf(f, "%d, %f, %f, %f, %f, %f, %f\n", test_history[i], test_centers[i][0], test_centers[i][1], test_centers[i][2], test_extents[i][0], test_extents[i][1], test_extents[i][2]); } fclose(f); } #endif // check if we found a point, and write outputs if (min_dist_sq < max_dist*max_dist) { u = 1.0f - min_v - min_w; v = min_v; face = min_face; // determine inside outside using ray-cast parity check if (!mesh.solid_angle_props) { inside = mesh_query_inside(id, point); } else { float winding_number = mesh_query_winding_number(id, point, accuracy); inside = (winding_number > winding_number_threshold) ? -1.0f:1.0f; } return true; } else { return false; } } CUDA_CALLABLE inline void adj_mesh_query_point_no_sign(uint64_t id, const vec3& point, float max_dist, const int& face, const float& u, const float& v, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, int& adj_face, float& adj_u, float& adj_v, bool& adj_ret) { Mesh mesh = mesh_get(id); // face is determined by BVH in forward pass int i = mesh.indices[face*3+0]; int j = mesh.indices[face*3+1]; int k = mesh.indices[face*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 adj_p, adj_q, adj_r; vec2 adj_uv(adj_u, adj_v); adj_closest_point_to_triangle(p, q, r, point, adj_p, adj_q, adj_r, adj_point, adj_uv); } CUDA_CALLABLE inline void adj_mesh_query_furthest_point_no_sign(uint64_t id, const vec3& point, float min_dist, const int& face, const float& u, const float& v, uint64_t adj_id, vec3& adj_point, float& adj_min_dist, int& adj_face, float& adj_u, float& adj_v, bool& adj_ret) { Mesh mesh = mesh_get(id); // face is determined by BVH in forward pass int i = mesh.indices[face*3+0]; int j = mesh.indices[face*3+1]; int k = mesh.indices[face*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; vec3 adj_p, adj_q, adj_r; vec2 adj_uv(adj_u, adj_v); adj_closest_point_to_triangle(p, q, r, point, adj_p, adj_q, adj_r, adj_point, adj_uv); // Todo for Miles :> } CUDA_CALLABLE inline void adj_mesh_query_point(uint64_t id, const vec3& point, float max_dist, const float& inside, const int& face, const float& u, const float& v, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, float& adj_inside, int& adj_face, float& adj_u, float& adj_v, bool& adj_ret) { adj_mesh_query_point_no_sign(id, point, max_dist, face, u, v, adj_id, adj_point, adj_max_dist, adj_face, adj_u, adj_v, adj_ret); } CUDA_CALLABLE inline void adj_mesh_query_point_sign_normal(uint64_t id, const vec3& point, float max_dist, const float& inside, const int& face, const float& u, const float& v, const float epsilon, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, float& adj_inside, int& adj_face, float& adj_u, float& adj_v, float& adj_epsilon, bool& adj_ret) { adj_mesh_query_point_no_sign(id, point, max_dist, face, u, v, adj_id, adj_point, adj_max_dist, adj_face, adj_u, adj_v, adj_ret); } CUDA_CALLABLE inline void adj_mesh_query_point_sign_winding_number(uint64_t id, const vec3& point, float max_dist, const float& inside, const int& face, const float& u, const float& v, const float accuracy, const float winding_number_threshold, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, float& adj_inside, int& adj_face, float& adj_u, float& adj_v, float& adj_accuracy, float& adj_winding_number_threshold, bool& adj_ret) { adj_mesh_query_point_no_sign(id, point, max_dist, face, u, v, adj_id, adj_point, adj_max_dist, adj_face, adj_u, adj_v, adj_ret); } // Stores the result of querying the closest point on a mesh. struct mesh_query_point_t { CUDA_CALLABLE mesh_query_point_t() : result(false), sign(0.0f), face(0), u(0.0f), v(0.0f) {} // Required for adjoint computations. CUDA_CALLABLE inline mesh_query_point_t& operator+=(const mesh_query_point_t& other) { result += other.result; sign += other.sign; face += other.face; u += other.u; v += other.v; return *this; } bool result; float sign; int face; float u; float v; }; CUDA_CALLABLE inline mesh_query_point_t mesh_query_point(uint64_t id, const vec3& point, float max_dist) { mesh_query_point_t query; query.result = mesh_query_point(id, point, max_dist, query.sign, query.face, query.u, query.v); return query; } CUDA_CALLABLE inline mesh_query_point_t mesh_query_point_no_sign(uint64_t id, const vec3& point, float max_dist) { mesh_query_point_t query; query.sign = 0.0; query.result = mesh_query_point_no_sign(id, point, max_dist, query.face, query.u, query.v); return query; } CUDA_CALLABLE inline mesh_query_point_t mesh_query_furthest_point_no_sign(uint64_t id, const vec3& point, float min_dist) { mesh_query_point_t query; query.sign = 0.0; query.result = mesh_query_furthest_point_no_sign(id, point, min_dist, query.face, query.u, query.v); return query; } CUDA_CALLABLE inline mesh_query_point_t mesh_query_point_sign_normal(uint64_t id, const vec3& point, float max_dist, const float epsilon = 1e-3f) { mesh_query_point_t query; query.result = mesh_query_point_sign_normal(id, point, max_dist, query.sign, query.face, query.u, query.v, epsilon); return query; } CUDA_CALLABLE inline mesh_query_point_t mesh_query_point_sign_winding_number(uint64_t id, const vec3& point, float max_dist, float accuracy, float winding_number_threshold) { mesh_query_point_t query; query.result = mesh_query_point_sign_winding_number(id, point, max_dist, query.sign, query.face, query.u, query.v, accuracy, winding_number_threshold); return query; } CUDA_CALLABLE inline void adj_mesh_query_point(uint64_t id, const vec3& point, float max_dist, const mesh_query_point_t& ret, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, mesh_query_point_t& adj_ret) { adj_mesh_query_point(id, point, max_dist, ret.sign, ret.face, ret.u, ret.v, adj_id, adj_point, adj_max_dist, adj_ret.sign, adj_ret.face, adj_ret.u, adj_ret.v, adj_ret.result); } CUDA_CALLABLE inline void adj_mesh_query_point_no_sign(uint64_t id, const vec3& point, float max_dist, const mesh_query_point_t& ret, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, mesh_query_point_t& adj_ret) { adj_mesh_query_point_no_sign(id, point, max_dist, ret.face, ret.u, ret.v, adj_id, adj_point, adj_max_dist, adj_ret.face, adj_ret.u, adj_ret.v, adj_ret.result); } CUDA_CALLABLE inline void adj_mesh_query_furthest_point_no_sign(uint64_t id, const vec3& point, float min_dist, const mesh_query_point_t& ret, uint64_t adj_id, vec3& adj_point, float& adj_min_dist, mesh_query_point_t& adj_ret) { adj_mesh_query_furthest_point_no_sign(id, point, min_dist, ret.face, ret.u, ret.v, adj_id, adj_point, adj_min_dist, adj_ret.face, adj_ret.u, adj_ret.v, adj_ret.result); } CUDA_CALLABLE inline void adj_mesh_query_point_sign_normal(uint64_t id, const vec3& point, float max_dist, float epsilon, const mesh_query_point_t& ret, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, float& adj_epsilon, mesh_query_point_t& adj_ret) { adj_mesh_query_point_sign_normal(id, point, max_dist, ret.sign, ret.face, ret.u, ret.v, epsilon, adj_id, adj_point, adj_max_dist, adj_ret.sign, adj_ret.face, adj_ret.u, adj_ret.v, epsilon, adj_ret.result); } CUDA_CALLABLE inline void adj_mesh_query_point_sign_winding_number(uint64_t id, const vec3& point, float max_dist, float accuracy, float winding_number_threshold, const mesh_query_point_t& ret, uint64_t adj_id, vec3& adj_point, float& adj_max_dist, float& adj_accuracy, float& adj_winding_number_threshold, mesh_query_point_t& adj_ret) { adj_mesh_query_point_sign_winding_number(id, point, max_dist, ret.sign, ret.face, ret.u, ret.v, accuracy, winding_number_threshold, adj_id, adj_point, adj_max_dist, adj_ret.sign, adj_ret.face, adj_ret.u, adj_ret.v, adj_accuracy, adj_winding_number_threshold, adj_ret.result); } CUDA_CALLABLE inline bool mesh_query_ray(uint64_t id, const vec3& start, const vec3& dir, float max_t, float& t, float& u, float& v, float& sign, vec3& normal, int& face) { Mesh mesh = mesh_get(id); int stack[32]; stack[0] = *mesh.bvh.root; int count = 1; vec3 rcp_dir = vec3(1.0f/dir[0], 1.0f/dir[1], 1.0f/dir[2]); float min_t = max_t; int min_face; float min_u; float min_v; float min_sign = 1.0f; vec3 min_normal; while (count) { const int nodeIndex = stack[--count]; BVHPackedNodeHalf lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf upper = mesh.bvh.node_uppers[nodeIndex]; // todo: switch to robust ray-aabb, or expand bounds in build stage float eps = 1.e-3f; float t = 0.0f; bool hit = intersect_ray_aabb(start, rcp_dir, vec3(lower.x-eps, lower.y-eps, lower.z-eps), vec3(upper.x+eps, upper.y+eps, upper.z+eps), t); if (hit && t < min_t) { const int left_index = lower.i; const int right_index = upper.i; if (lower.b) { // compute closest point on tri int i = mesh.indices[left_index*3+0]; int j = mesh.indices[left_index*3+1]; int k = mesh.indices[left_index*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; float t, u, v, sign; vec3 n; if (intersect_ray_tri_woop(start, dir, p, q, r, t, u, v, sign, &n)) { if (t < min_t && t >= 0.0f) { min_t = t; min_face = left_index; min_u = u; min_v = v; min_sign = sign; min_normal = n; } } } else { stack[count++] = left_index; stack[count++] = right_index; } } } if (min_t < max_t) { // write outputs u = min_u; v = min_v; sign = min_sign; t = min_t; normal = normalize(min_normal); face = min_face; return true; } else { return false; } } CUDA_CALLABLE inline void adj_mesh_query_ray( uint64_t id, const vec3& start, const vec3& dir, float max_t, float t, float u, float v, float sign, const vec3& n, int face, uint64_t adj_id, vec3& adj_start, vec3& adj_dir, float& adj_max_t, float& adj_t, float& adj_u, float& adj_v, float& adj_sign, vec3& adj_n, int& adj_face, bool& adj_ret) { Mesh mesh = mesh_get(id); // face is determined by BVH in forward pass int i = mesh.indices[face*3+0]; int j = mesh.indices[face*3+1]; int k = mesh.indices[face*3+2]; vec3 a = mesh.points[i]; vec3 b = mesh.points[j]; vec3 c = mesh.points[k]; vec3 adj_a, adj_b, adj_c; adj_intersect_ray_tri_woop(start, dir, a, b, c, t, u, v, sign, n, adj_start, adj_dir, adj_a, adj_b, adj_c, adj_t, adj_u, adj_v, adj_sign, adj_n, adj_ret); } // Stores the result of querying the closest point on a mesh. struct mesh_query_ray_t { CUDA_CALLABLE mesh_query_ray_t() : result(false), sign(0.0f), face(0), t(0.0f), u(0.0f), v(0.0f), normal() { } // Required for adjoint computations. CUDA_CALLABLE inline mesh_query_ray_t& operator+=(const mesh_query_ray_t& other) { result += other.result; sign += other.sign; face += other.face; t += other.t; u += other.u; v += other.v; normal += other.normal; return *this; } bool result; float sign; int face; float t; float u; float v; vec3 normal; }; CUDA_CALLABLE inline mesh_query_ray_t mesh_query_ray(uint64_t id, const vec3& start, const vec3& dir, float max_t) { mesh_query_ray_t query; query.result = mesh_query_ray(id, start, dir, max_t, query.t, query.u, query.v, query.sign, query.normal, query.face); return query; } CUDA_CALLABLE inline void adj_mesh_query_ray( uint64_t id, const vec3& start, const vec3& dir, float max_t, const mesh_query_ray_t& ret, uint64_t adj_id, vec3& adj_start, vec3& adj_dir, float& adj_max_t, mesh_query_ray_t& adj_ret ) { adj_mesh_query_ray( id, start, dir, max_t, ret.t, ret.u, ret.v, ret.sign, ret.normal, ret.face, adj_id, adj_start, adj_dir, adj_max_t, adj_ret.t, adj_ret.u, adj_ret.v, adj_ret.sign, adj_ret.normal, adj_ret.face, adj_ret.result ); } // determine if a point is inside (ret < 0 ) or outside the mesh (ret > 0) CUDA_CALLABLE inline float mesh_query_inside(uint64_t id, const vec3& p) { float t, u, v, sign; vec3 n; int face; int vote = 0; for(int i = 0; i <3; ++i) { if (mesh_query_ray(id, p, vec3(float(i==0), float(i==1), float(i==2)), FLT_MAX, t, u, v, sign, n, face) && sign < 0) { vote++; } } if (vote == 3) return -1.0f; else return 1.0f; } // stores state required to traverse the BVH nodes that // overlap with a query AABB. struct mesh_query_aabb_t { CUDA_CALLABLE mesh_query_aabb_t() : mesh(), stack(), count(0), input_lower(), input_upper(), face(0) {} // Required for adjoint computations. CUDA_CALLABLE inline mesh_query_aabb_t& operator+=(const mesh_query_aabb_t& other) { return *this; } // Mesh Id Mesh mesh; // BVH traversal stack: int stack[32]; int count; // inputs wp::vec3 input_lower; wp::vec3 input_upper; // Face int face; }; CUDA_CALLABLE inline mesh_query_aabb_t mesh_query_aabb( uint64_t id, const vec3& lower, const vec3& upper) { // This routine traverses the BVH tree until it finds // the first triangle with an overlapping bvh. // initialize empty mesh_query_aabb_t query; query.face = -1; Mesh mesh = mesh_get(id); query.mesh = mesh; query.stack[0] = *mesh.bvh.root; query.count = 1; query.input_lower = lower; query.input_upper = upper; wp::bounds3 input_bounds(query.input_lower, query.input_upper); // Navigate through the bvh, find the first overlapping leaf node. while (query.count) { const int nodeIndex = query.stack[--query.count]; BVHPackedNodeHalf node_lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf node_upper = mesh.bvh.node_uppers[nodeIndex]; wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z); wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z); wp::bounds3 current_bounds(lower_pos, upper_pos); if (!input_bounds.overlaps(current_bounds)) { // Skip this box, it doesn't overlap with our target box. continue; } const int left_index = node_lower.i; const int right_index = node_upper.i; // Make bounds from this AABB if (node_lower.b) { // found very first triangle index. // Back up one level and return query.stack[query.count++] = nodeIndex; return query; } else { query.stack[query.count++] = left_index; query.stack[query.count++] = right_index; } } return query; } //Stub CUDA_CALLABLE inline void adj_mesh_query_aabb(uint64_t id, const vec3& lower, const vec3& upper, uint64_t, vec3&, vec3&, mesh_query_aabb_t&) { } CUDA_CALLABLE inline bool mesh_query_aabb_next(mesh_query_aabb_t& query, int& index) { Mesh mesh = query.mesh; wp::bounds3 input_bounds(query.input_lower, query.input_upper); // Navigate through the bvh, find the first overlapping leaf node. while (query.count) { const int nodeIndex = query.stack[--query.count]; BVHPackedNodeHalf node_lower = mesh.bvh.node_lowers[nodeIndex]; BVHPackedNodeHalf node_upper = mesh.bvh.node_uppers[nodeIndex]; wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z); wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z); wp::bounds3 current_bounds(lower_pos, upper_pos); if (!input_bounds.overlaps(current_bounds)) { // Skip this box, it doesn't overlap with our target box. continue; } const int left_index = node_lower.i; const int right_index = node_upper.i; // Make bounds from this AABB if (node_lower.b) { // found very first triangle index query.face = left_index; index = left_index; return true; } else { query.stack[query.count++] = left_index; query.stack[query.count++] = right_index; } } return false; } CUDA_CALLABLE inline int iter_next(mesh_query_aabb_t& query) { return query.face; } CUDA_CALLABLE inline bool iter_cmp(mesh_query_aabb_t& query) { bool finished = mesh_query_aabb_next(query, query.face); return finished; } CUDA_CALLABLE inline mesh_query_aabb_t iter_reverse(const mesh_query_aabb_t& query) { // can't reverse BVH queries, users should not rely on neighbor ordering return query; } // stub CUDA_CALLABLE inline void adj_mesh_query_aabb_next(mesh_query_aabb_t& query, int& index, mesh_query_aabb_t&, int&, bool&) { } CUDA_CALLABLE inline vec3 mesh_eval_position(uint64_t id, int tri, float u, float v) { Mesh mesh = mesh_get(id); if (!mesh.points) return vec3(); assert(tri < mesh.num_tris); int i = mesh.indices[tri*3+0]; int j = mesh.indices[tri*3+1]; int k = mesh.indices[tri*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; return p*u + q*v + r*(1.0f-u-v); } CUDA_CALLABLE inline vec3 mesh_eval_velocity(uint64_t id, int tri, float u, float v) { Mesh mesh = mesh_get(id); if (!mesh.velocities) return vec3(); assert(tri < mesh.num_tris); int i = mesh.indices[tri*3+0]; int j = mesh.indices[tri*3+1]; int k = mesh.indices[tri*3+2]; vec3 vp = mesh.velocities[i]; vec3 vq = mesh.velocities[j]; vec3 vr = mesh.velocities[k]; return vp*u + vq*v + vr*(1.0f-u-v); } CUDA_CALLABLE inline void adj_mesh_eval_position(uint64_t id, int tri, float u, float v, uint64_t& adj_id, int& adj_tri, float& adj_u, float& adj_v, const vec3& adj_ret) { Mesh mesh = mesh_get(id); if (!mesh.points) return; assert(tri < mesh.num_tris); int i = mesh.indices[tri*3+0]; int j = mesh.indices[tri*3+1]; int k = mesh.indices[tri*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; adj_u += (p[0] - r[0]) * adj_ret[0] + (p[1] - r[1]) * adj_ret[1] + (p[2] - r[2]) * adj_ret[2]; adj_v += (q[0] - r[0]) * adj_ret[0] + (q[1] - r[1]) * adj_ret[1] + (q[2] - r[2]) * adj_ret[2]; } CUDA_CALLABLE inline void adj_mesh_eval_velocity(uint64_t id, int tri, float u, float v, uint64_t& adj_id, int& adj_tri, float& adj_u, float& adj_v, const vec3& adj_ret) { Mesh mesh = mesh_get(id); if (!mesh.velocities) return; assert(tri < mesh.num_tris); int i = mesh.indices[tri*3+0]; int j = mesh.indices[tri*3+1]; int k = mesh.indices[tri*3+2]; vec3 vp = mesh.velocities[i]; vec3 vq = mesh.velocities[j]; vec3 vr = mesh.velocities[k]; adj_u += (vp[0] - vr[0]) * adj_ret[0] + (vp[1] - vr[1]) * adj_ret[1] + (vp[2] - vr[2]) * adj_ret[2]; adj_v += (vq[0] - vr[0]) * adj_ret[0] + (vq[1] - vr[1]) * adj_ret[1] + (vq[2] - vr[2]) * adj_ret[2]; } CUDA_CALLABLE inline vec3 mesh_eval_face_normal(uint64_t id, int tri) { Mesh mesh = mesh_get(id); if (!mesh.points) return vec3(); assert(tri < mesh.num_tris); int i = mesh.indices[tri*3+0]; int j = mesh.indices[tri*3+1]; int k = mesh.indices[tri*3+2]; vec3 p = mesh.points[i]; vec3 q = mesh.points[j]; vec3 r = mesh.points[k]; return normalize(cross(q - p, r - p)); } CUDA_CALLABLE inline void adj_mesh_eval_face_normal(uint64_t id, int tri, uint64_t& adj_id, int& adj_tri, const vec3& adj_ret) { // no-op } CUDA_CALLABLE inline vec3 mesh_get_point(uint64_t id, int index) { Mesh mesh = mesh_get(id); if (!mesh.points) return vec3(); #if FP_CHECK if (index >= mesh.num_tris * 3) { printf("mesh_get_point (%llu, %d) out of bounds at %s:%d\n", id, index, __FILE__, __LINE__); assert(0); } #endif int i = mesh.indices[index]; return mesh.points[i]; } CUDA_CALLABLE inline void adj_mesh_get_point(uint64_t id, int index, uint64_t& adj_id, int& adj_index, const vec3& adj_ret) { // no-op } CUDA_CALLABLE inline vec3 mesh_get_velocity(uint64_t id, int index) { Mesh mesh = mesh_get(id); if (!mesh.velocities) return vec3(); #if FP_CHECK if (index >= mesh.num_tris * 3) { printf("mesh_get_velocity (%llu, %d) out of bounds at %s:%d\n", id, index, __FILE__, __LINE__); assert(0); } #endif int i = mesh.indices[index]; return mesh.velocities[i]; } CUDA_CALLABLE inline void adj_mesh_get_velocity(uint64_t id, int index, uint64_t& adj_id, int& adj_index, const vec3& adj_ret) { // no-op } CUDA_CALLABLE inline int mesh_get_index(uint64_t id, int face_vertex_index) { Mesh mesh = mesh_get(id); if (!mesh.indices) return -1; assert(face_vertex_index < mesh.num_tris * 3); return mesh.indices[face_vertex_index]; } CUDA_CALLABLE inline void adj_mesh_get_index(uint64_t id, int index, uint64_t& adj_id, int& adj_index, const vec3& adj_ret) { // no-op } CUDA_CALLABLE bool mesh_get_descriptor(uint64_t id, Mesh& mesh); CUDA_CALLABLE void mesh_add_descriptor(uint64_t id, const Mesh& mesh); CUDA_CALLABLE void mesh_rem_descriptor(uint64_t id); } // namespace wp
59,575
C
30.555085
244
0.536836
NVIDIA/warp/warp/native/intersect_tri.h
/* Triangle/triangle intersection test routine, * by Tomas Moller, 1997. * See article "A Fast Triangle-Triangle Intersection Test", * Journal of Graphics Tools, 2(2), 1997 * * Updated June 1999: removed the divisions -- a little faster now! * Updated October 1999: added {} to CROSS and SUB macros * * int NoDivTriTriIsect(float V0[3],float V1[3],float V2[3], * float U0[3],float U1[3],float U2[3]) * * parameters: vertices of triangle 1: V0,V1,V2 * vertices of triangle 2: U0,U1,U2 * result : returns 1 if the triangles intersect, otherwise 0 * */ /* Copyright 2020 Tomas Akenine-Möller Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* if USE_EPSILON_TEST is true then we do a check: if |dv|<EPSILON then dv=0.0; else no check is done (which is less robust) */ #define USE_EPSILON_TEST TRUE #define EPSILON 0.000001 /* some macros */ #define CROSS(dest,v1,v2){ \ dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \ dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \ dest[2]=v1[0]*v2[1]-v1[1]*v2[0];} #define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]) #define SUB(dest,v1,v2){ \ dest[0]=v1[0]-v2[0]; \ dest[1]=v1[1]-v2[1]; \ dest[2]=v1[2]-v2[2];} /* sort so that a<=b */ #define SORT(a,b) \ if(a>b) \ { \ float c; \ c=a; \ a=b; \ b=c; \ } /* this edge to edge test is based on Franlin Antonio's gem: "Faster Line Segment Intersection", in Graphics Gems III, pp. 199-202 */ #define EDGE_EDGE_TEST(V0,U0,U1) \ Bx=U0[i0]-U1[i0]; \ By=U0[i1]-U1[i1]; \ Cx=V0[i0]-U0[i0]; \ Cy=V0[i1]-U0[i1]; \ f=Ay*Bx-Ax*By; \ d=By*Cx-Bx*Cy; \ if((f>0 && d>=0 && d<=f) || (f<0 && d<=0 && d>=f)) \ { \ e=Ax*Cy-Ay*Cx; \ if(f>0) \ { \ if(e>=0 && e<=f) return 1; \ } \ else \ { \ if(e<=0 && e>=f) return 1; \ } \ } #define EDGE_AGAINST_TRI_EDGES(V0,V1,U0,U1,U2) \ { \ float Ax,Ay,Bx,By,Cx,Cy,e,d,f; \ Ax=V1[i0]-V0[i0]; \ Ay=V1[i1]-V0[i1]; \ /* test edge U0,U1 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U0,U1); \ /* test edge U1,U2 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U1,U2); \ /* test edge U2,U1 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U2,U0); \ } #define POINT_IN_TRI(V0,U0,U1,U2) \ { \ float a,b,c,d0,d1,d2; \ /* is T1 completely inside T2? */ \ /* check if V0 is inside tri(U0,U1,U2) */ \ a=U1[i1]-U0[i1]; \ b=-(U1[i0]-U0[i0]); \ c=-a*U0[i0]-b*U0[i1]; \ d0=a*V0[i0]+b*V0[i1]+c; \ \ a=U2[i1]-U1[i1]; \ b=-(U2[i0]-U1[i0]); \ c=-a*U1[i0]-b*U1[i1]; \ d1=a*V0[i0]+b*V0[i1]+c; \ \ a=U0[i1]-U2[i1]; \ b=-(U0[i0]-U2[i0]); \ c=-a*U2[i0]-b*U2[i1]; \ d2=a*V0[i0]+b*V0[i1]+c; \ if(d0*d1>0.0) \ { \ if(d0*d2>0.0) return 1; \ } \ } CUDA_CALLABLE inline int coplanar_tri_tri(float N[3],float V0[3],float V1[3],float V2[3], float U0[3],float U1[3],float U2[3]) { float A[3]; short i0,i1; /* first project onto an axis-aligned plane, that maximizes the area */ /* of the triangles, compute indices: i0,i1. */ A[0]=fabsf(N[0]); A[1]=fabsf(N[1]); A[2]=fabsf(N[2]); if(A[0]>A[1]) { if(A[0]>A[2]) { i0=1; /* A[0] is greatest */ i1=2; } else { i0=0; /* A[2] is greatest */ i1=1; } } else /* A[0]<=A[1] */ { if(A[2]>A[1]) { i0=0; /* A[2] is greatest */ i1=1; } else { i0=0; /* A[1] is greatest */ i1=2; } } /* test all edges of triangle 1 against the edges of triangle 2 */ EDGE_AGAINST_TRI_EDGES(V0,V1,U0,U1,U2); EDGE_AGAINST_TRI_EDGES(V1,V2,U0,U1,U2); EDGE_AGAINST_TRI_EDGES(V2,V0,U0,U1,U2); /* finally, test if tri1 is totally contained in tri2 or vice versa */ POINT_IN_TRI(V0,U0,U1,U2); POINT_IN_TRI(U0,V0,V1,V2); return 0; } #define NEWCOMPUTE_INTERVALS(VV0,VV1,VV2,D0,D1,D2,D0D1,D0D2,A,B,C,X0,X1) \ { \ if(D0D1>0.0f) \ { \ /* here we know that D0D2<=0.0 */ \ /* that is D0, D1 are on the same side, D2 on the other or on the plane */ \ A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \ } \ else if(D0D2>0.0f)\ { \ /* here we know that d0d1<=0.0 */ \ A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \ } \ else if(D1*D2>0.0f || D0!=0.0f) \ { \ /* here we know that d0d1<=0.0 or that D0!=0.0 */ \ A=VV0; B=(VV1-VV0)*D0; C=(VV2-VV0)*D0; X0=D0-D1; X1=D0-D2; \ } \ else if(D1!=0.0f) \ { \ A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \ } \ else if(D2!=0.0f) \ { \ A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \ } \ else \ { \ /* triangles are coplanar */ \ return coplanar_tri_tri(N1,V0,V1,V2,U0,U1,U2); \ } \ } CUDA_CALLABLE inline int NoDivTriTriIsect(float V0[3],float V1[3],float V2[3], float U0[3],float U1[3],float U2[3]) { float E1[3],E2[3]; float N1[3],N2[3],d1,d2; float du0,du1,du2,dv0,dv1,dv2; float D[3]; float isect1[2], isect2[2]; float du0du1,du0du2,dv0dv1,dv0dv2; short index; float vp0,vp1,vp2; float up0,up1,up2; float bb,cc,max; /* compute plane equation of triangle(V0,V1,V2) */ SUB(E1,V1,V0); SUB(E2,V2,V0); CROSS(N1,E1,E2); d1=-DOT(N1,V0); /* plane equation 1: N1.X+d1=0 */ /* put U0,U1,U2 into plane equation 1 to compute signed distances to the plane*/ du0=DOT(N1,U0)+d1; du1=DOT(N1,U1)+d1; du2=DOT(N1,U2)+d1; /* coplanarity robustness check */ #if USE_EPSILON_TEST==TRUE if(fabsf(du0)<EPSILON) du0=0.0; if(fabsf(du1)<EPSILON) du1=0.0; if(fabsf(du2)<EPSILON) du2=0.0; #endif du0du1=du0*du1; du0du2=du0*du2; if(du0du1>0.0f && du0du2>0.0f) /* same sign on all of them + not equal 0 ? */ return 0; /* no intersection occurs */ /* compute plane of triangle (U0,U1,U2) */ SUB(E1,U1,U0); SUB(E2,U2,U0); CROSS(N2,E1,E2); d2=-DOT(N2,U0); /* plane equation 2: N2.X+d2=0 */ /* put V0,V1,V2 into plane equation 2 */ dv0=DOT(N2,V0)+d2; dv1=DOT(N2,V1)+d2; dv2=DOT(N2,V2)+d2; #if USE_EPSILON_TEST==TRUE if(fabsf(dv0)<EPSILON) dv0=0.0; if(fabsf(dv1)<EPSILON) dv1=0.0; if(fabsf(dv2)<EPSILON) dv2=0.0; #endif dv0dv1=dv0*dv1; dv0dv2=dv0*dv2; if(dv0dv1>0.0f && dv0dv2>0.0f) /* same sign on all of them + not equal 0 ? */ return 0; /* no intersection occurs */ /* compute direction of intersection line */ CROSS(D,N1,N2); /* compute and index to the largest component of D */ max=fabsf(D[0]); index=0; bb=fabsf(D[1]); cc=fabsf(D[2]); if(bb>max) max=bb,index=1; if(cc>max) max=cc,index=2; /* this is the simplified projection onto L*/ vp0=V0[index]; vp1=V1[index]; vp2=V2[index]; up0=U0[index]; up1=U1[index]; up2=U2[index]; /* compute interval for triangle 1 */ float a,b,c,x0,x1; NEWCOMPUTE_INTERVALS(vp0,vp1,vp2,dv0,dv1,dv2,dv0dv1,dv0dv2,a,b,c,x0,x1); /* compute interval for triangle 2 */ float d,e,f,y0,y1; NEWCOMPUTE_INTERVALS(up0,up1,up2,du0,du1,du2,du0du1,du0du2,d,e,f,y0,y1); float xx,yy,xxyy,tmp; xx=x0*x1; yy=y0*y1; xxyy=xx*yy; tmp=a*xxyy; isect1[0]=tmp+b*x1*yy; isect1[1]=tmp+c*x0*yy; tmp=d*xxyy; isect2[0]=tmp+e*xx*y1; isect2[1]=tmp+f*xx*y0; SORT(isect1[0],isect1[1]); SORT(isect2[0],isect2[1]); if(isect1[1]<isect2[0] || isect2[1]<isect1[0]) return 0; return 1; }
10,195
C
30.566563
117
0.480039
NVIDIA/warp/warp/native/volume_builder.h
#pragma once #include <nanovdb/NanoVDB.h> template<typename BuildT> struct BuildGridParams { double voxel_size = 1.0; BuildT background_value{0}; nanovdb::Vec3d translation{0.0, 0.0, 0.0}; char name[256] = ""; }; template<> struct BuildGridParams<nanovdb::ValueIndex> { double voxel_size = 1.0; nanovdb::ValueIndex background_value; nanovdb::Vec3d translation{0.0, 0.0, 0.0}; char name[256] = ""; }; template<> struct BuildGridParams<nanovdb::ValueOnIndex> { double voxel_size = 1.0; nanovdb::Vec3d translation{0.0, 0.0, 0.0}; char name[256] = ""; }; template <typename BuildT> void build_grid_from_points(nanovdb::Grid<nanovdb::NanoTree<BuildT>> *&out_grid, size_t &out_grid_size, const void *points, size_t num_points, bool points_in_world_space, const BuildGridParams<BuildT> &params);
973
C
26.828571
80
0.593011
NVIDIA/warp/warp/native/cuda_crt.h
/** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once // Attributes otherwise defined by CUDA's crt/host_defines.h #define __constant__ __attribute__((constant)) #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) #define __host__ __attribute__((host)) #define __shared__ __attribute__((shared)) #define __forceinline__ __attribute__((always_inline)) using size_t = unsigned long long; // stdio.h extern "C" __device__ int printf(const char* format, ... ); // assert.h extern __host__ __device__ void __assertfail(const char * __assertion, const char *__file, unsigned int __line, const char *__function, size_t charsize); #if defined(NDEBUG) #define assert(e) (static_cast<void>(0)) #else /* !NDEBUG */ #define __ASSERT_STR_HELPER(x) #x #define assert(e) ((e) ? static_cast<void>(0)\ : __assertfail(__ASSERT_STR_HELPER(e), __FILE__,\ __LINE__, __PRETTY_FUNCTION__,\ sizeof(char))) #endif #define __device_forceinline__ static __device__ inline __forceinline__ // Implementations of CUDA builtin variables struct __threadIdx_t { __declspec(property(get = get_x)) unsigned int x; __declspec(property(get = get_y)) unsigned int y; __declspec(property(get = get_z)) unsigned int z; __device_forceinline__ unsigned int get_x() { return __nvvm_read_ptx_sreg_tid_x(); } __device_forceinline__ unsigned int get_y() { return __nvvm_read_ptx_sreg_tid_y(); } __device_forceinline__ unsigned int get_z() { return __nvvm_read_ptx_sreg_tid_z(); } }; struct __blockIdx_t { __declspec(property(get = get_x)) unsigned int x; __declspec(property(get = get_y)) unsigned int y; __declspec(property(get = get_z)) unsigned int z; __device_forceinline__ unsigned int get_x() { return __nvvm_read_ptx_sreg_ctaid_x(); } __device_forceinline__ unsigned int get_y() { return __nvvm_read_ptx_sreg_ctaid_y(); } __device_forceinline__ unsigned int get_z() { return __nvvm_read_ptx_sreg_ctaid_z(); } }; struct __blockDim_t { __declspec(property(get = get_x)) unsigned int x; __declspec(property(get = get_y)) unsigned int y; __declspec(property(get = get_z)) unsigned int z; __device_forceinline__ unsigned int get_x() { return __nvvm_read_ptx_sreg_ntid_x(); } __device_forceinline__ unsigned int get_y() { return __nvvm_read_ptx_sreg_ntid_y(); } __device_forceinline__ unsigned int get_z() { return __nvvm_read_ptx_sreg_ntid_z(); } }; struct __gridDim_t { __declspec(property(get = get_x)) unsigned int x; __declspec(property(get = get_y)) unsigned int y; __declspec(property(get = get_z)) unsigned int z; __device_forceinline__ unsigned int get_x() { return __nvvm_read_ptx_sreg_nctaid_x(); } __device_forceinline__ unsigned int get_y() { return __nvvm_read_ptx_sreg_nctaid_y(); } __device_forceinline__ unsigned int get_z() { return __nvvm_read_ptx_sreg_nctaid_z(); } }; extern const __device__ __threadIdx_t threadIdx; extern const __device__ __blockIdx_t blockIdx; extern const __device__ __blockDim_t blockDim; extern const __device__ __gridDim_t gridDim; // Forward declarations of libdevice functions extern "C" { __device__ int __nv_abs(int a); __device__ double __nv_acos(double a); __device__ float __nv_acosf(float a); __device__ double __nv_acosh(double a); __device__ float __nv_acoshf(float a); __device__ double __nv_asin(double a); __device__ float __nv_asinf(float a); __device__ double __nv_asinh(double a); __device__ float __nv_asinhf(float a); __device__ double __nv_atan2(double a, double b); __device__ float __nv_atan2f(float a, float b); __device__ double __nv_atan(double a); __device__ float __nv_atanf(float a); __device__ double __nv_atanh(double a); __device__ float __nv_atanhf(float a); __device__ int __nv_brev(int a); __device__ long long __nv_brevll(long long a); __device__ int __nv_byte_perm(int a, int b, int c); __device__ double __nv_cbrt(double a); __device__ float __nv_cbrtf(float a); __device__ double __nv_ceil(double a); __device__ float __nv_ceilf(float a); __device__ int __nv_clz(int a); __device__ int __nv_clzll(long long a); __device__ double __nv_copysign(double a, double b); __device__ float __nv_copysignf(float a, float b); __device__ double __nv_cos(double a); __device__ float __nv_cosf(float a); __device__ double __nv_cosh(double a); __device__ float __nv_coshf(float a); __device__ double __nv_cospi(double a); __device__ float __nv_cospif(float a); __device__ double __nv_cyl_bessel_i0(double a); __device__ float __nv_cyl_bessel_i0f(float a); __device__ double __nv_cyl_bessel_i1(double a); __device__ float __nv_cyl_bessel_i1f(float a); __device__ double __nv_dadd_rd(double a, double b); __device__ double __nv_dadd_rn(double a, double b); __device__ double __nv_dadd_ru(double a, double b); __device__ double __nv_dadd_rz(double a, double b); __device__ double __nv_ddiv_rd(double a, double b); __device__ double __nv_ddiv_rn(double a, double b); __device__ double __nv_ddiv_ru(double a, double b); __device__ double __nv_ddiv_rz(double a, double b); __device__ double __nv_dmul_rd(double a, double b); __device__ double __nv_dmul_rn(double a, double b); __device__ double __nv_dmul_ru(double a, double b); __device__ double __nv_dmul_rz(double a, double b); __device__ float __nv_double2float_rd(double a); __device__ float __nv_double2float_rn(double a); __device__ float __nv_double2float_ru(double a); __device__ float __nv_double2float_rz(double a); __device__ int __nv_double2hiint(double a); __device__ int __nv_double2int_rd(double a); __device__ int __nv_double2int_rn(double a); __device__ int __nv_double2int_ru(double a); __device__ int __nv_double2int_rz(double a); __device__ long long __nv_double2ll_rd(double a); __device__ long long __nv_double2ll_rn(double a); __device__ long long __nv_double2ll_ru(double a); __device__ long long __nv_double2ll_rz(double a); __device__ int __nv_double2loint(double a); __device__ unsigned int __nv_double2uint_rd(double a); __device__ unsigned int __nv_double2uint_rn(double a); __device__ unsigned int __nv_double2uint_ru(double a); __device__ unsigned int __nv_double2uint_rz(double a); __device__ unsigned long long __nv_double2ull_rd(double a); __device__ unsigned long long __nv_double2ull_rn(double a); __device__ unsigned long long __nv_double2ull_ru(double a); __device__ unsigned long long __nv_double2ull_rz(double a); __device__ unsigned long long __nv_double_as_longlong(double a); __device__ double __nv_drcp_rd(double a); __device__ double __nv_drcp_rn(double a); __device__ double __nv_drcp_ru(double a); __device__ double __nv_drcp_rz(double a); __device__ double __nv_dsqrt_rd(double a); __device__ double __nv_dsqrt_rn(double a); __device__ double __nv_dsqrt_ru(double a); __device__ double __nv_dsqrt_rz(double a); __device__ double __nv_dsub_rd(double a, double b); __device__ double __nv_dsub_rn(double a, double b); __device__ double __nv_dsub_ru(double a, double b); __device__ double __nv_dsub_rz(double a, double b); __device__ double __nv_erfc(double a); __device__ float __nv_erfcf(float a); __device__ double __nv_erfcinv(double a); __device__ float __nv_erfcinvf(float a); __device__ double __nv_erfcx(double a); __device__ float __nv_erfcxf(float a); __device__ double __nv_erf(double a); __device__ float __nv_erff(float a); __device__ double __nv_erfinv(double a); __device__ float __nv_erfinvf(float a); __device__ double __nv_exp10(double a); __device__ float __nv_exp10f(float a); __device__ double __nv_exp2(double a); __device__ float __nv_exp2f(float a); __device__ double __nv_exp(double a); __device__ float __nv_expf(float a); __device__ double __nv_expm1(double a); __device__ float __nv_expm1f(float a); __device__ double __nv_fabs(double a); __device__ float __nv_fabsf(float a); __device__ float __nv_fadd_rd(float a, float b); __device__ float __nv_fadd_rn(float a, float b); __device__ float __nv_fadd_ru(float a, float b); __device__ float __nv_fadd_rz(float a, float b); __device__ float __nv_fast_cosf(float a); __device__ float __nv_fast_exp10f(float a); __device__ float __nv_fast_expf(float a); __device__ float __nv_fast_fdividef(float a, float b); __device__ float __nv_fast_log10f(float a); __device__ float __nv_fast_log2f(float a); __device__ float __nv_fast_logf(float a); __device__ float __nv_fast_powf(float a, float b); __device__ void __nv_fast_sincosf(float a, float *s, float *c); __device__ float __nv_fast_sinf(float a); __device__ float __nv_fast_tanf(float a); __device__ double __nv_fdim(double a, double b); __device__ float __nv_fdimf(float a, float b); __device__ float __nv_fdiv_rd(float a, float b); __device__ float __nv_fdiv_rn(float a, float b); __device__ float __nv_fdiv_ru(float a, float b); __device__ float __nv_fdiv_rz(float a, float b); __device__ int __nv_ffs(int a); __device__ int __nv_ffsll(long long a); __device__ int __nv_finitef(float a); __device__ unsigned short __nv_float2half_rn(float a); __device__ int __nv_float2int_rd(float a); __device__ int __nv_float2int_rn(float a); __device__ int __nv_float2int_ru(float a); __device__ int __nv_float2int_rz(float a); __device__ long long __nv_float2ll_rd(float a); __device__ long long __nv_float2ll_rn(float a); __device__ long long __nv_float2ll_ru(float a); __device__ long long __nv_float2ll_rz(float a); __device__ unsigned int __nv_float2uint_rd(float a); __device__ unsigned int __nv_float2uint_rn(float a); __device__ unsigned int __nv_float2uint_ru(float a); __device__ unsigned int __nv_float2uint_rz(float a); __device__ unsigned long long __nv_float2ull_rd(float a); __device__ unsigned long long __nv_float2ull_rn(float a); __device__ unsigned long long __nv_float2ull_ru(float a); __device__ unsigned long long __nv_float2ull_rz(float a); __device__ int __nv_float_as_int(float a); __device__ unsigned int __nv_float_as_uint(float a); __device__ double __nv_floor(double a); __device__ float __nv_floorf(float a); __device__ double __nv_fma(double a, double b, double c); __device__ float __nv_fmaf(float a, float b, float c); __device__ float __nv_fmaf_ieee_rd(float a, float b, float c); __device__ float __nv_fmaf_ieee_rn(float a, float b, float c); __device__ float __nv_fmaf_ieee_ru(float a, float b, float c); __device__ float __nv_fmaf_ieee_rz(float a, float b, float c); __device__ float __nv_fmaf_rd(float a, float b, float c); __device__ float __nv_fmaf_rn(float a, float b, float c); __device__ float __nv_fmaf_ru(float a, float b, float c); __device__ float __nv_fmaf_rz(float a, float b, float c); __device__ double __nv_fma_rd(double a, double b, double c); __device__ double __nv_fma_rn(double a, double b, double c); __device__ double __nv_fma_ru(double a, double b, double c); __device__ double __nv_fma_rz(double a, double b, double c); __device__ double __nv_fmax(double a, double b); __device__ float __nv_fmaxf(float a, float b); __device__ double __nv_fmin(double a, double b); __device__ float __nv_fminf(float a, float b); __device__ double __nv_fmod(double a, double b); __device__ float __nv_fmodf(float a, float b); __device__ float __nv_fmul_rd(float a, float b); __device__ float __nv_fmul_rn(float a, float b); __device__ float __nv_fmul_ru(float a, float b); __device__ float __nv_fmul_rz(float a, float b); __device__ float __nv_frcp_rd(float a); __device__ float __nv_frcp_rn(float a); __device__ float __nv_frcp_ru(float a); __device__ float __nv_frcp_rz(float a); __device__ double __nv_frexp(double a, int *b); __device__ float __nv_frexpf(float a, int *b); __device__ float __nv_frsqrt_rn(float a); __device__ float __nv_fsqrt_rd(float a); __device__ float __nv_fsqrt_rn(float a); __device__ float __nv_fsqrt_ru(float a); __device__ float __nv_fsqrt_rz(float a); __device__ float __nv_fsub_rd(float a, float b); __device__ float __nv_fsub_rn(float a, float b); __device__ float __nv_fsub_ru(float a, float b); __device__ float __nv_fsub_rz(float a, float b); __device__ int __nv_hadd(int a, int b); __device__ float __nv_half2float(unsigned short h); __device__ double __nv_hiloint2double(int a, int b); __device__ double __nv_hypot(double a, double b); __device__ float __nv_hypotf(float a, float b); __device__ int __nv_ilogb(double a); __device__ int __nv_ilogbf(float a); __device__ double __nv_int2double_rn(int a); __device__ float __nv_int2float_rd(int a); __device__ float __nv_int2float_rn(int a); __device__ float __nv_int2float_ru(int a); __device__ float __nv_int2float_rz(int a); __device__ float __nv_int_as_float(int a); __device__ int __nv_isfinited(double a); __device__ int __nv_isinfd(double a); __device__ int __nv_isinff(float a); __device__ int __nv_isnand(double a); __device__ int __nv_isnanf(float a); __device__ double __nv_j0(double a); __device__ float __nv_j0f(float a); __device__ double __nv_j1(double a); __device__ float __nv_j1f(float a); __device__ float __nv_jnf(int a, float b); __device__ double __nv_jn(int a, double b); __device__ double __nv_ldexp(double a, int b); __device__ float __nv_ldexpf(float a, int b); __device__ double __nv_lgamma(double a); __device__ float __nv_lgammaf(float a); __device__ double __nv_ll2double_rd(long long a); __device__ double __nv_ll2double_rn(long long a); __device__ double __nv_ll2double_ru(long long a); __device__ double __nv_ll2double_rz(long long a); __device__ float __nv_ll2float_rd(long long a); __device__ float __nv_ll2float_rn(long long a); __device__ float __nv_ll2float_ru(long long a); __device__ float __nv_ll2float_rz(long long a); __device__ long long __nv_llabs(long long a); __device__ long long __nv_llmax(long long a, long long b); __device__ long long __nv_llmin(long long a, long long b); __device__ long long __nv_llrint(double a); __device__ long long __nv_llrintf(float a); __device__ long long __nv_llround(double a); __device__ long long __nv_llroundf(float a); __device__ double __nv_log10(double a); __device__ float __nv_log10f(float a); __device__ double __nv_log1p(double a); __device__ float __nv_log1pf(float a); __device__ double __nv_log2(double a); __device__ float __nv_log2f(float a); __device__ double __nv_logb(double a); __device__ float __nv_logbf(float a); __device__ double __nv_log(double a); __device__ float __nv_logf(float a); __device__ double __nv_longlong_as_double(long long a); __device__ int __nv_max(int a, int b); __device__ int __nv_min(int a, int b); __device__ double __nv_modf(double a, double *b); __device__ float __nv_modff(float a, float *b); __device__ int __nv_mul24(int a, int b); __device__ long long __nv_mul64hi(long long a, long long b); __device__ int __nv_mulhi(int a, int b); __device__ double __nv_nan(const signed char *a); __device__ float __nv_nanf(const signed char *a); __device__ double __nv_nearbyint(double a); __device__ float __nv_nearbyintf(float a); __device__ double __nv_nextafter(double a, double b); __device__ float __nv_nextafterf(float a, float b); __device__ double __nv_norm3d(double a, double b, double c); __device__ float __nv_norm3df(float a, float b, float c); __device__ double __nv_norm4d(double a, double b, double c, double d); __device__ float __nv_norm4df(float a, float b, float c, float d); __device__ double __nv_normcdf(double a); __device__ float __nv_normcdff(float a); __device__ double __nv_normcdfinv(double a); __device__ float __nv_normcdfinvf(float a); __device__ float __nv_normf(int a, const float *b); __device__ double __nv_norm(int a, const double *b); __device__ int __nv_popc(int a); __device__ int __nv_popcll(long long a); __device__ double __nv_pow(double a, double b); __device__ float __nv_powf(float a, float b); __device__ double __nv_powi(double a, int b); __device__ float __nv_powif(float a, int b); __device__ double __nv_rcbrt(double a); __device__ float __nv_rcbrtf(float a); __device__ double __nv_rcp64h(double a); __device__ double __nv_remainder(double a, double b); __device__ float __nv_remainderf(float a, float b); __device__ double __nv_remquo(double a, double b, int *c); __device__ float __nv_remquof(float a, float b, int *c); __device__ int __nv_rhadd(int a, int b); __device__ double __nv_rhypot(double a, double b); __device__ float __nv_rhypotf(float a, float b); __device__ double __nv_rint(double a); __device__ float __nv_rintf(float a); __device__ double __nv_rnorm3d(double a, double b, double c); __device__ float __nv_rnorm3df(float a, float b, float c); __device__ double __nv_rnorm4d(double a, double b, double c, double d); __device__ float __nv_rnorm4df(float a, float b, float c, float d); __device__ float __nv_rnormf(int a, const float *b); __device__ double __nv_rnorm(int a, const double *b); __device__ double __nv_round(double a); __device__ float __nv_roundf(float a); __device__ double __nv_rsqrt(double a); __device__ float __nv_rsqrtf(float a); __device__ int __nv_sad(int a, int b, int c); __device__ float __nv_saturatef(float a); __device__ double __nv_scalbn(double a, int b); __device__ float __nv_scalbnf(float a, int b); __device__ int __nv_signbitd(double a); __device__ int __nv_signbitf(float a); __device__ void __nv_sincos(double a, double *b, double *c); __device__ void __nv_sincosf(float a, float *b, float *c); __device__ void __nv_sincospi(double a, double *b, double *c); __device__ void __nv_sincospif(float a, float *b, float *c); __device__ double __nv_sin(double a); __device__ float __nv_sinf(float a); __device__ double __nv_sinh(double a); __device__ float __nv_sinhf(float a); __device__ double __nv_sinpi(double a); __device__ float __nv_sinpif(float a); __device__ double __nv_sqrt(double a); __device__ float __nv_sqrtf(float a); __device__ double __nv_tan(double a); __device__ float __nv_tanf(float a); __device__ double __nv_tanh(double a); __device__ float __nv_tanhf(float a); __device__ double __nv_tgamma(double a); __device__ float __nv_tgammaf(float a); __device__ double __nv_trunc(double a); __device__ float __nv_truncf(float a); __device__ int __nv_uhadd(unsigned int a, unsigned int b); __device__ double __nv_uint2double_rn(unsigned int i); __device__ float __nv_uint2float_rd(unsigned int a); __device__ float __nv_uint2float_rn(unsigned int a); __device__ float __nv_uint2float_ru(unsigned int a); __device__ float __nv_uint2float_rz(unsigned int a); __device__ float __nv_uint_as_float(unsigned int a); __device__ double __nv_ull2double_rd(unsigned long long a); __device__ double __nv_ull2double_rn(unsigned long long a); __device__ double __nv_ull2double_ru(unsigned long long a); __device__ double __nv_ull2double_rz(unsigned long long a); __device__ float __nv_ull2float_rd(unsigned long long a); __device__ float __nv_ull2float_rn(unsigned long long a); __device__ float __nv_ull2float_ru(unsigned long long a); __device__ float __nv_ull2float_rz(unsigned long long a); __device__ unsigned long long __nv_ullmax(unsigned long long a, unsigned long long b); __device__ unsigned long long __nv_ullmin(unsigned long long a, unsigned long long b); __device__ unsigned int __nv_umax(unsigned int a, unsigned int b); __device__ unsigned int __nv_umin(unsigned int a, unsigned int b); __device__ unsigned int __nv_umul24(unsigned int a, unsigned int b); __device__ unsigned long long __nv_umul64hi(unsigned long long a, unsigned long long b); __device__ unsigned int __nv_umulhi(unsigned int a, unsigned int b); __device__ unsigned int __nv_urhadd(unsigned int a, unsigned int b); __device__ unsigned int __nv_usad(unsigned int a, unsigned int b, unsigned int c); __device__ double __nv_y0(double a); __device__ float __nv_y0f(float a); __device__ double __nv_y1(double a); __device__ float __nv_y1f(float a); __device__ float __nv_ynf(int a, float b); __device__ double __nv_yn(int a, double b); } // extern "C" // Implementation of CUDA intrinsics __device_forceinline__ int __all(int a) { return __nvvm_vote_all(a); } __device_forceinline__ int __any(int a) { return __nvvm_vote_any(a); } __device_forceinline__ unsigned int __ballot(int a) { return __nvvm_vote_ballot(a); } __device_forceinline__ unsigned int __brev(unsigned int a) { return __nv_brev(a); } __device_forceinline__ unsigned long long __brevll(unsigned long long a) { return __nv_brevll(a); } __device_forceinline__ void __brkpt() { __asm__ __volatile__("brkpt;"); } __device_forceinline__ void __brkpt(int a) { __brkpt(); } __device_forceinline__ unsigned int __byte_perm(unsigned int a, unsigned int b, unsigned int c) { return __nv_byte_perm(a, b, c); } __device_forceinline__ int __clz(int a) { return __nv_clz(a); } __device_forceinline__ int __clzll(long long a) { return __nv_clzll(a); } __device_forceinline__ float __cosf(float a) { return __nv_fast_cosf(a); } __device_forceinline__ double __dAtomicAdd(double *p, double v) { return __nvvm_atom_add_gen_d(p, v); } __device_forceinline__ double __dAtomicAdd_block(double *p, double v) { return __nvvm_atom_cta_add_gen_d(p, v); } __device_forceinline__ double __dAtomicAdd_system(double *p, double v) { return __nvvm_atom_sys_add_gen_d(p, v); } __device_forceinline__ double __dadd_rd(double a, double b) { return __nv_dadd_rd(a, b); } __device_forceinline__ double __dadd_rn(double a, double b) { return __nv_dadd_rn(a, b); } __device_forceinline__ double __dadd_ru(double a, double b) { return __nv_dadd_ru(a, b); } __device_forceinline__ double __dadd_rz(double a, double b) { return __nv_dadd_rz(a, b); } __device_forceinline__ double __ddiv_rd(double a, double b) { return __nv_ddiv_rd(a, b); } __device_forceinline__ double __ddiv_rn(double a, double b) { return __nv_ddiv_rn(a, b); } __device_forceinline__ double __ddiv_ru(double a, double b) { return __nv_ddiv_ru(a, b); } __device_forceinline__ double __ddiv_rz(double a, double b) { return __nv_ddiv_rz(a, b); } __device_forceinline__ double __dmul_rd(double a, double b) { return __nv_dmul_rd(a, b); } __device_forceinline__ double __dmul_rn(double a, double b) { return __nv_dmul_rn(a, b); } __device_forceinline__ double __dmul_ru(double a, double b) { return __nv_dmul_ru(a, b); } __device_forceinline__ double __dmul_rz(double a, double b) { return __nv_dmul_rz(a, b); } __device_forceinline__ float __double2float_rd(double a) { return __nv_double2float_rd(a); } __device_forceinline__ float __double2float_rn(double a) { return __nv_double2float_rn(a); } __device_forceinline__ float __double2float_ru(double a) { return __nv_double2float_ru(a); } __device_forceinline__ float __double2float_rz(double a) { return __nv_double2float_rz(a); } __device_forceinline__ int __double2hiint(double a) { return __nv_double2hiint(a); } __device_forceinline__ int __double2int_rd(double a) { return __nv_double2int_rd(a); } __device_forceinline__ int __double2int_rn(double a) { return __nv_double2int_rn(a); } __device_forceinline__ int __double2int_ru(double a) { return __nv_double2int_ru(a); } __device_forceinline__ int __double2int_rz(double a) { return __nv_double2int_rz(a); } __device_forceinline__ long long __double2ll_rd(double a) { return __nv_double2ll_rd(a); } __device_forceinline__ long long __double2ll_rn(double a) { return __nv_double2ll_rn(a); } __device_forceinline__ long long __double2ll_ru(double a) { return __nv_double2ll_ru(a); } __device_forceinline__ long long __double2ll_rz(double a) { return __nv_double2ll_rz(a); } __device_forceinline__ int __double2loint(double a) { return __nv_double2loint(a); } __device_forceinline__ unsigned int __double2uint_rd(double a) { return __nv_double2uint_rd(a); } __device_forceinline__ unsigned int __double2uint_rn(double a) { return __nv_double2uint_rn(a); } __device_forceinline__ unsigned int __double2uint_ru(double a) { return __nv_double2uint_ru(a); } __device_forceinline__ unsigned int __double2uint_rz(double a) { return __nv_double2uint_rz(a); } __device_forceinline__ unsigned long long __double2ull_rd(double a) { return __nv_double2ull_rd(a); } __device_forceinline__ unsigned long long __double2ull_rn(double a) { return __nv_double2ull_rn(a); } __device_forceinline__ unsigned long long __double2ull_ru(double a) { return __nv_double2ull_ru(a); } __device_forceinline__ unsigned long long __double2ull_rz(double a) { return __nv_double2ull_rz(a); } __device_forceinline__ long long __double_as_longlong(double a) { return __nv_double_as_longlong(a); } __device_forceinline__ double __drcp_rd(double a) { return __nv_drcp_rd(a); } __device_forceinline__ double __drcp_rn(double a) { return __nv_drcp_rn(a); } __device_forceinline__ double __drcp_ru(double a) { return __nv_drcp_ru(a); } __device_forceinline__ double __drcp_rz(double a) { return __nv_drcp_rz(a); } __device_forceinline__ double __dsqrt_rd(double a) { return __nv_dsqrt_rd(a); } __device_forceinline__ double __dsqrt_rn(double a) { return __nv_dsqrt_rn(a); } __device_forceinline__ double __dsqrt_ru(double a) { return __nv_dsqrt_ru(a); } __device_forceinline__ double __dsqrt_rz(double a) { return __nv_dsqrt_rz(a); } __device_forceinline__ double __dsub_rd(double a, double b) { return __nv_dsub_rd(a, b); } __device_forceinline__ double __dsub_rn(double a, double b) { return __nv_dsub_rn(a, b); } __device_forceinline__ double __dsub_ru(double a, double b) { return __nv_dsub_ru(a, b); } __device_forceinline__ double __dsub_rz(double a, double b) { return __nv_dsub_rz(a, b); } __device_forceinline__ float __exp10f(float a) { return __nv_fast_exp10f(a); } __device_forceinline__ float __expf(float a) { return __nv_fast_expf(a); } __device_forceinline__ float __fAtomicAdd(float *p, float v) { return __nvvm_atom_add_gen_f(p, v); } __device_forceinline__ float __fAtomicAdd_block(float *p, float v) { return __nvvm_atom_cta_add_gen_f(p, v); } __device_forceinline__ float __fAtomicAdd_system(float *p, float v) { return __nvvm_atom_sys_add_gen_f(p, v); } __device_forceinline__ float __fAtomicExch(float *p, float v) { return __nv_int_as_float(__nvvm_atom_xchg_gen_i((int *)p, __nv_float_as_int(v))); } __device_forceinline__ float __fAtomicExch_block(float *p, float v) { return __nv_int_as_float(__nvvm_atom_cta_xchg_gen_i((int *)p, __nv_float_as_int(v))); } __device_forceinline__ float __fAtomicExch_system(float *p, float v) { return __nv_int_as_float(__nvvm_atom_sys_xchg_gen_i((int *)p, __nv_float_as_int(v))); } __device_forceinline__ float __fadd_rd(float a, float b) { return __nv_fadd_rd(a, b); } __device_forceinline__ float __fadd_rn(float a, float b) { return __nv_fadd_rn(a, b); } __device_forceinline__ float __fadd_ru(float a, float b) { return __nv_fadd_ru(a, b); } __device_forceinline__ float __fadd_rz(float a, float b) { return __nv_fadd_rz(a, b); } __device_forceinline__ float __fdiv_rd(float a, float b) { return __nv_fdiv_rd(a, b); } __device_forceinline__ float __fdiv_rn(float a, float b) { return __nv_fdiv_rn(a, b); } __device_forceinline__ float __fdiv_ru(float a, float b) { return __nv_fdiv_ru(a, b); } __device_forceinline__ float __fdiv_rz(float a, float b) { return __nv_fdiv_rz(a, b); } __device_forceinline__ float __fdividef(float a, float b) { return __nv_fast_fdividef(a, b); } __device_forceinline__ int __ffs(int a) { return __nv_ffs(a); } __device_forceinline__ int __ffsll(long long a) { return __nv_ffsll(a); } __device_forceinline__ int __finite(double a) { return __nv_isfinited(a); } __device_forceinline__ int __finitef(float a) { return __nv_finitef(a); } __device_forceinline__ int __float2int_rd(float a) { return __nv_float2int_rd(a); } __device_forceinline__ int __float2int_rn(float a) { return __nv_float2int_rn(a); } __device_forceinline__ int __float2int_ru(float a) { return __nv_float2int_ru(a); } __device_forceinline__ int __float2int_rz(float a) { return __nv_float2int_rz(a); } __device_forceinline__ long long __float2ll_rd(float a) { return __nv_float2ll_rd(a); } __device_forceinline__ long long __float2ll_rn(float a) { return __nv_float2ll_rn(a); } __device_forceinline__ long long __float2ll_ru(float a) { return __nv_float2ll_ru(a); } __device_forceinline__ long long __float2ll_rz(float a) { return __nv_float2ll_rz(a); } __device_forceinline__ unsigned int __float2uint_rd(float a) { return __nv_float2uint_rd(a); } __device_forceinline__ unsigned int __float2uint_rn(float a) { return __nv_float2uint_rn(a); } __device_forceinline__ unsigned int __float2uint_ru(float a) { return __nv_float2uint_ru(a); } __device_forceinline__ unsigned int __float2uint_rz(float a) { return __nv_float2uint_rz(a); } __device_forceinline__ unsigned long long __float2ull_rd(float a) { return __nv_float2ull_rd(a); } __device_forceinline__ unsigned long long __float2ull_rn(float a) { return __nv_float2ull_rn(a); } __device_forceinline__ unsigned long long __float2ull_ru(float a) { return __nv_float2ull_ru(a); } __device_forceinline__ unsigned long long __float2ull_rz(float a) { return __nv_float2ull_rz(a); } __device_forceinline__ int __float_as_int(float a) { return __nv_float_as_int(a); } __device_forceinline__ unsigned int __float_as_uint(float a) { return __nv_float_as_uint(a); } __device_forceinline__ double __fma_rd(double a, double b, double c) { return __nv_fma_rd(a, b, c); } __device_forceinline__ double __fma_rn(double a, double b, double c) { return __nv_fma_rn(a, b, c); } __device_forceinline__ double __fma_ru(double a, double b, double c) { return __nv_fma_ru(a, b, c); } __device_forceinline__ double __fma_rz(double a, double b, double c) { return __nv_fma_rz(a, b, c); } __device_forceinline__ float __fmaf_ieee_rd(float a, float b, float c) { return __nv_fmaf_ieee_rd(a, b, c); } __device_forceinline__ float __fmaf_ieee_rn(float a, float b, float c) { return __nv_fmaf_ieee_rn(a, b, c); } __device_forceinline__ float __fmaf_ieee_ru(float a, float b, float c) { return __nv_fmaf_ieee_ru(a, b, c); } __device_forceinline__ float __fmaf_ieee_rz(float a, float b, float c) { return __nv_fmaf_ieee_rz(a, b, c); } __device_forceinline__ float __fmaf_rd(float a, float b, float c) { return __nv_fmaf_rd(a, b, c); } __device_forceinline__ float __fmaf_rn(float a, float b, float c) { return __nv_fmaf_rn(a, b, c); } __device_forceinline__ float __fmaf_ru(float a, float b, float c) { return __nv_fmaf_ru(a, b, c); } __device_forceinline__ float __fmaf_rz(float a, float b, float c) { return __nv_fmaf_rz(a, b, c); } __device_forceinline__ float __fmul_rd(float a, float b) { return __nv_fmul_rd(a, b); } __device_forceinline__ float __fmul_rn(float a, float b) { return __nv_fmul_rn(a, b); } __device_forceinline__ float __fmul_ru(float a, float b) { return __nv_fmul_ru(a, b); } __device_forceinline__ float __fmul_rz(float a, float b) { return __nv_fmul_rz(a, b); } __device_forceinline__ float __frcp_rd(float a) { return __nv_frcp_rd(a); } __device_forceinline__ float __frcp_rn(float a) { return __nv_frcp_rn(a); } __device_forceinline__ float __frcp_ru(float a) { return __nv_frcp_ru(a); } __device_forceinline__ float __frcp_rz(float a) { return __nv_frcp_rz(a); } __device_forceinline__ float __frsqrt_rn(float a) { return __nv_frsqrt_rn(a); } __device_forceinline__ float __fsqrt_rd(float a) { return __nv_fsqrt_rd(a); } __device_forceinline__ float __fsqrt_rn(float a) { return __nv_fsqrt_rn(a); } __device_forceinline__ float __fsqrt_ru(float a) { return __nv_fsqrt_ru(a); } __device_forceinline__ float __fsqrt_rz(float a) { return __nv_fsqrt_rz(a); } __device_forceinline__ float __fsub_rd(float a, float b) { return __nv_fsub_rd(a, b); } __device_forceinline__ float __fsub_rn(float a, float b) { return __nv_fsub_rn(a, b); } __device_forceinline__ float __fsub_ru(float a, float b) { return __nv_fsub_ru(a, b); } __device_forceinline__ float __fsub_rz(float a, float b) { return __nv_fsub_rz(a, b); } __device_forceinline__ int __hadd(int a, int b) { return __nv_hadd(a, b); } __device_forceinline__ double __hiloint2double(int a, int b) { return __nv_hiloint2double(a, b); } __device_forceinline__ int __iAtomicAdd(int *p, int v) { return __nvvm_atom_add_gen_i(p, v); } __device_forceinline__ int __iAtomicAdd_block(int *p, int v) { return __nvvm_atom_cta_add_gen_i(p, v); } __device_forceinline__ int __iAtomicAdd_system(int *p, int v) { return __nvvm_atom_sys_add_gen_i(p, v); } __device_forceinline__ int __iAtomicAnd(int *p, int v) { return __nvvm_atom_and_gen_i(p, v); } __device_forceinline__ int __iAtomicAnd_block(int *p, int v) { return __nvvm_atom_cta_and_gen_i(p, v); } __device_forceinline__ int __iAtomicAnd_system(int *p, int v) { return __nvvm_atom_sys_and_gen_i(p, v); } __device_forceinline__ int __iAtomicCAS(int *p, int cmp, int v) { return __nvvm_atom_cas_gen_i(p, cmp, v); } __device_forceinline__ int __iAtomicCAS_block(int *p, int cmp, int v) { return __nvvm_atom_cta_cas_gen_i(p, cmp, v); } __device_forceinline__ int __iAtomicCAS_system(int *p, int cmp, int v) { return __nvvm_atom_sys_cas_gen_i(p, cmp, v); } __device_forceinline__ int __iAtomicExch(int *p, int v) { return __nvvm_atom_xchg_gen_i(p, v); } __device_forceinline__ int __iAtomicExch_block(int *p, int v) { return __nvvm_atom_cta_xchg_gen_i(p, v); } __device_forceinline__ int __iAtomicExch_system(int *p, int v) { return __nvvm_atom_sys_xchg_gen_i(p, v); } __device_forceinline__ int __iAtomicMax(int *p, int v) { return __nvvm_atom_max_gen_i(p, v); } __device_forceinline__ int __iAtomicMax_block(int *p, int v) { return __nvvm_atom_cta_max_gen_i(p, v); } __device_forceinline__ int __iAtomicMax_system(int *p, int v) { return __nvvm_atom_sys_max_gen_i(p, v); } __device_forceinline__ int __iAtomicMin(int *p, int v) { return __nvvm_atom_min_gen_i(p, v); } __device_forceinline__ int __iAtomicMin_block(int *p, int v) { return __nvvm_atom_cta_min_gen_i(p, v); } __device_forceinline__ int __iAtomicMin_system(int *p, int v) { return __nvvm_atom_sys_min_gen_i(p, v); } __device_forceinline__ int __iAtomicOr(int *p, int v) { return __nvvm_atom_or_gen_i(p, v); } __device_forceinline__ int __iAtomicOr_block(int *p, int v) { return __nvvm_atom_cta_or_gen_i(p, v); } __device_forceinline__ int __iAtomicOr_system(int *p, int v) { return __nvvm_atom_sys_or_gen_i(p, v); } __device_forceinline__ int __iAtomicXor(int *p, int v) { return __nvvm_atom_xor_gen_i(p, v); } __device_forceinline__ int __iAtomicXor_block(int *p, int v) { return __nvvm_atom_cta_xor_gen_i(p, v); } __device_forceinline__ int __iAtomicXor_system(int *p, int v) { return __nvvm_atom_sys_xor_gen_i(p, v); } __device_forceinline__ long long __illAtomicMax(long long *p, long long v) { return __nvvm_atom_max_gen_ll(p, v); } __device_forceinline__ long long __illAtomicMax_block(long long *p, long long v) { return __nvvm_atom_cta_max_gen_ll(p, v); } __device_forceinline__ long long __illAtomicMax_system(long long *p, long long v) { return __nvvm_atom_sys_max_gen_ll(p, v); } __device_forceinline__ long long __illAtomicMin(long long *p, long long v) { return __nvvm_atom_min_gen_ll(p, v); } __device_forceinline__ long long __illAtomicMin_block(long long *p, long long v) { return __nvvm_atom_cta_min_gen_ll(p, v); } __device_forceinline__ long long __illAtomicMin_system(long long *p, long long v) { return __nvvm_atom_sys_min_gen_ll(p, v); } __device_forceinline__ double __int2double_rn(int a) { return __nv_int2double_rn(a); } __device_forceinline__ float __int2float_rd(int a) { return __nv_int2float_rd(a); } __device_forceinline__ float __int2float_rn(int a) { return __nv_int2float_rn(a); } __device_forceinline__ float __int2float_ru(int a) { return __nv_int2float_ru(a); } __device_forceinline__ float __int2float_rz(int a) { return __nv_int2float_rz(a); } __device_forceinline__ float __int_as_float(int a) { return __nv_int_as_float(a); } __device_forceinline__ int __isfinited(double a) { return __nv_isfinited(a); } __device_forceinline__ int __isinf(double a) { return __nv_isinfd(a); } __device_forceinline__ int __isinff(float a) { return __nv_isinff(a); } __device_forceinline__ int __isnan(double a) { return __nv_isnand(a); } __device_forceinline__ int __isnanf(float a) { return __nv_isnanf(a); } __device_forceinline__ double __ll2double_rd(long long a) { return __nv_ll2double_rd(a); } __device_forceinline__ double __ll2double_rn(long long a) { return __nv_ll2double_rn(a); } __device_forceinline__ double __ll2double_ru(long long a) { return __nv_ll2double_ru(a); } __device_forceinline__ double __ll2double_rz(long long a) { return __nv_ll2double_rz(a); } __device_forceinline__ float __ll2float_rd(long long a) { return __nv_ll2float_rd(a); } __device_forceinline__ float __ll2float_rn(long long a) { return __nv_ll2float_rn(a); } __device_forceinline__ float __ll2float_ru(long long a) { return __nv_ll2float_ru(a); } __device_forceinline__ float __ll2float_rz(long long a) { return __nv_ll2float_rz(a); } __device_forceinline__ long long __llAtomicAnd(long long *p, long long v) { return __nvvm_atom_and_gen_ll(p, v); } __device_forceinline__ long long __llAtomicAnd_block(long long *p, long long v) { return __nvvm_atom_cta_and_gen_ll(p, v); } __device_forceinline__ long long __llAtomicAnd_system(long long *p, long long v) { return __nvvm_atom_sys_and_gen_ll(p, v); } __device_forceinline__ long long __llAtomicOr(long long *p, long long v) { return __nvvm_atom_or_gen_ll(p, v); } __device_forceinline__ long long __llAtomicOr_block(long long *p, long long v) { return __nvvm_atom_cta_or_gen_ll(p, v); } __device_forceinline__ long long __llAtomicOr_system(long long *p, long long v) { return __nvvm_atom_sys_or_gen_ll(p, v); } __device_forceinline__ long long __llAtomicXor(long long *p, long long v) { return __nvvm_atom_xor_gen_ll(p, v); } __device_forceinline__ long long __llAtomicXor_block(long long *p, long long v) { return __nvvm_atom_cta_xor_gen_ll(p, v); } __device_forceinline__ long long __llAtomicXor_system(long long *p, long long v) { return __nvvm_atom_sys_xor_gen_ll(p, v); } __device_forceinline__ float __log10f(float a) { return __nv_fast_log10f(a); } __device_forceinline__ float __log2f(float a) { return __nv_fast_log2f(a); } __device_forceinline__ float __logf(float a) { return __nv_fast_logf(a); } __device_forceinline__ double __longlong_as_double(long long a) { return __nv_longlong_as_double(a); } __device_forceinline__ int __mul24(int a, int b) { return __nv_mul24(a, b); } __device_forceinline__ long long __mul64hi(long long a, long long b) { return __nv_mul64hi(a, b); } __device_forceinline__ int __mulhi(int a, int b) { return __nv_mulhi(a, b); } __device_forceinline__ unsigned int __pm0(void) { return __nvvm_read_ptx_sreg_pm0(); } __device_forceinline__ unsigned int __pm1(void) { return __nvvm_read_ptx_sreg_pm1(); } __device_forceinline__ unsigned int __pm2(void) { return __nvvm_read_ptx_sreg_pm2(); } __device_forceinline__ unsigned int __pm3(void) { return __nvvm_read_ptx_sreg_pm3(); } __device_forceinline__ int __popc(int a) { return __nv_popc(a); } __device_forceinline__ int __popcll(long long a) { return __nv_popcll(a); } __device_forceinline__ float __powf(float a, float b) { return __nv_fast_powf(a, b); } #define __prof_trigger(__counter) __asm__ __volatile__("pmevent \t%0;" ::"i"(__counter)) __device_forceinline__ int __rhadd(int a, int b) { return __nv_rhadd(a, b); } __device_forceinline__ unsigned int __sad(int a, int b, unsigned int c) { return __nv_sad(a, b, c); } __device_forceinline__ float __saturatef(float a) { return __nv_saturatef(a); } __device_forceinline__ int __signbitd(double a) { return __nv_signbitd(a); } __device_forceinline__ int __signbitf(float a) { return __nv_signbitf(a); } __device_forceinline__ void __sincosf(float a, float *s, float *c) { return __nv_fast_sincosf(a, s, c); } __device_forceinline__ float __sinf(float a) { return __nv_fast_sinf(a); } __device_forceinline__ int __syncthreads_and(int a) { return __nvvm_bar0_and(a); } __device_forceinline__ int __syncthreads_count(int a) { return __nvvm_bar0_popc(a); } __device_forceinline__ int __syncthreads_or(int a) { return __nvvm_bar0_or(a); } __device_forceinline__ float __tanf(float a) { return __nv_fast_tanf(a); } __device_forceinline__ void __threadfence(void) { __nvvm_membar_gl(); } __device_forceinline__ void __threadfence_block(void) { __nvvm_membar_cta(); }; __device_forceinline__ void __threadfence_system(void) { __nvvm_membar_sys(); }; __device_forceinline__ void __trap(void) { __asm__ __volatile__("trap;"); } __device_forceinline__ unsigned int __uAtomicAdd(unsigned int *p, unsigned int v) { return __nvvm_atom_add_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicAdd_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_add_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicAdd_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_add_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicAnd(unsigned int *p, unsigned int v) { return __nvvm_atom_and_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicAnd_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_and_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicAnd_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_and_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicCAS(unsigned int *p, unsigned int cmp, unsigned int v) { return __nvvm_atom_cas_gen_i((int *)p, cmp, v); } __device_forceinline__ unsigned int __uAtomicCAS_block(unsigned int *p, unsigned int cmp, unsigned int v) { return __nvvm_atom_cta_cas_gen_i((int *)p, cmp, v); } __device_forceinline__ unsigned int __uAtomicCAS_system(unsigned int *p, unsigned int cmp, unsigned int v) { return __nvvm_atom_sys_cas_gen_i((int *)p, cmp, v); } __device_forceinline__ unsigned int __uAtomicDec(unsigned int *p, unsigned int v) { return __nvvm_atom_dec_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicDec_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_dec_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicDec_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_dec_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicExch(unsigned int *p, unsigned int v) { return __nvvm_atom_xchg_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicExch_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_xchg_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicExch_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_xchg_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicInc(unsigned int *p, unsigned int v) { return __nvvm_atom_inc_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicInc_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_inc_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicInc_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_inc_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicMax(unsigned int *p, unsigned int v) { return __nvvm_atom_max_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicMax_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_max_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicMax_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_max_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicMin(unsigned int *p, unsigned int v) { return __nvvm_atom_min_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicMin_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_min_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicMin_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_min_gen_ui(p, v); } __device_forceinline__ unsigned int __uAtomicOr(unsigned int *p, unsigned int v) { return __nvvm_atom_or_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicOr_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_or_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicOr_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_or_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicXor(unsigned int *p, unsigned int v) { return __nvvm_atom_xor_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicXor_block(unsigned int *p, unsigned int v) { return __nvvm_atom_cta_xor_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uAtomicXor_system(unsigned int *p, unsigned int v) { return __nvvm_atom_sys_xor_gen_i((int *)p, v); } __device_forceinline__ unsigned int __uhadd(unsigned int a, unsigned int b) { return __nv_uhadd(a, b); } __device_forceinline__ double __uint2double_rn(unsigned int a) { return __nv_uint2double_rn(a); } __device_forceinline__ float __uint2float_rd(unsigned int a) { return __nv_uint2float_rd(a); } __device_forceinline__ float __uint2float_rn(unsigned int a) { return __nv_uint2float_rn(a); } __device_forceinline__ float __uint2float_ru(unsigned int a) { return __nv_uint2float_ru(a); } __device_forceinline__ float __uint2float_rz(unsigned int a) { return __nv_uint2float_rz(a); } __device_forceinline__ float __uint_as_float(unsigned int a) { return __nv_uint_as_float(a); } __device_forceinline__ double __ull2double_rd(unsigned long long a) { return __nv_ull2double_rd(a); } __device_forceinline__ double __ull2double_rn(unsigned long long a) { return __nv_ull2double_rn(a); } __device_forceinline__ double __ull2double_ru(unsigned long long a) { return __nv_ull2double_ru(a); } __device_forceinline__ double __ull2double_rz(unsigned long long a) { return __nv_ull2double_rz(a); } __device_forceinline__ float __ull2float_rd(unsigned long long a) { return __nv_ull2float_rd(a); } __device_forceinline__ float __ull2float_rn(unsigned long long a) { return __nv_ull2float_rn(a); } __device_forceinline__ float __ull2float_ru(unsigned long long a) { return __nv_ull2float_ru(a); } __device_forceinline__ float __ull2float_rz(unsigned long long a) { return __nv_ull2float_rz(a); } __device_forceinline__ unsigned long long __ullAtomicAdd(unsigned long long *p, unsigned long long v) { return __nvvm_atom_add_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicAdd_block(unsigned long long *p, unsigned long long v) { return __nvvm_atom_cta_add_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicAdd_system(unsigned long long *p, unsigned long long v) { return __nvvm_atom_sys_add_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicAnd(unsigned long long *p, unsigned long long v) { return __nvvm_atom_and_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicAnd_block(unsigned long long *p, unsigned long long v) { return __nvvm_atom_cta_and_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicAnd_system(unsigned long long *p, unsigned long long v) { return __nvvm_atom_sys_and_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicCAS(unsigned long long *p, unsigned long long cmp, unsigned long long v) { return __nvvm_atom_cas_gen_ll((long long *)p, cmp, v); } __device_forceinline__ unsigned long long __ullAtomicCAS_block(unsigned long long *p, unsigned long long cmp, unsigned long long v) { return __nvvm_atom_cta_cas_gen_ll((long long *)p, cmp, v); } __device_forceinline__ unsigned long long __ullAtomicCAS_system(unsigned long long *p, unsigned long long cmp, unsigned long long v) { return __nvvm_atom_sys_cas_gen_ll((long long *)p, cmp, v); } __device_forceinline__ unsigned long long __ullAtomicExch(unsigned long long *p, unsigned long long v) { return __nvvm_atom_xchg_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicExch_block(unsigned long long *p, unsigned long long v) { return __nvvm_atom_cta_xchg_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicExch_system(unsigned long long *p, unsigned long long v) { return __nvvm_atom_sys_xchg_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicMax(unsigned long long *p, unsigned long long v) { return __nvvm_atom_max_gen_ull(p, v); } __device_forceinline__ unsigned long long __ullAtomicMax_block(unsigned long long *p, unsigned long long v) { return __nvvm_atom_cta_max_gen_ull(p, v); } __device_forceinline__ unsigned long long __ullAtomicMax_system(unsigned long long *p, unsigned long long v) { return __nvvm_atom_sys_max_gen_ull(p, v); } __device_forceinline__ unsigned long long __ullAtomicMin(unsigned long long *p, unsigned long long v) { return __nvvm_atom_min_gen_ull(p, v); } __device_forceinline__ unsigned long long __ullAtomicMin_block(unsigned long long *p, unsigned long long v) { return __nvvm_atom_cta_min_gen_ull(p, v); } __device_forceinline__ unsigned long long __ullAtomicMin_system(unsigned long long *p, unsigned long long v) { return __nvvm_atom_sys_min_gen_ull(p, v); } __device_forceinline__ unsigned long long __ullAtomicOr(unsigned long long *p, unsigned long long v) { return __nvvm_atom_or_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicOr_block(unsigned long long *p, unsigned long long v) { return __nvvm_atom_cta_or_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicOr_system(unsigned long long *p, unsigned long long v) { return __nvvm_atom_sys_or_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicXor(unsigned long long *p, unsigned long long v) { return __nvvm_atom_xor_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicXor_block(unsigned long long *p, unsigned long long v) { return __nvvm_atom_cta_xor_gen_ll((long long *)p, v); } __device_forceinline__ unsigned long long __ullAtomicXor_system(unsigned long long *p, unsigned long long v) { return __nvvm_atom_sys_xor_gen_ll((long long *)p, v); } __device_forceinline__ unsigned int __umul24(unsigned int a, unsigned int b) { return __nv_umul24(a, b); } __device_forceinline__ unsigned long long __umul64hi(unsigned long long a, unsigned long long b) { return __nv_umul64hi(a, b); } __device_forceinline__ unsigned int __umulhi(unsigned int a, unsigned int b) { return __nv_umulhi(a, b); } __device_forceinline__ unsigned int __urhadd(unsigned int a, unsigned int b) { return __nv_urhadd(a, b); } __device_forceinline__ unsigned int __usad(unsigned int a, unsigned int b, unsigned int c) { return __nv_usad(a, b, c); } __device_forceinline__ void *memcpy(void *a, const void *b, size_t c) { return __builtin_memcpy(a, b, c); } __device_forceinline__ void *memset(void *a, int b, size_t c) { return __builtin_memset(a, b, c); } #if defined(__FAST_MATH__) #define __FAST_OR_SLOW(fast, slow) fast #else #define __FAST_OR_SLOW(fast, slow) slow #endif __device_forceinline__ int abs(int a) { return __nv_abs(a); } __device_forceinline__ double fabs(double a) { return __nv_fabs(a); } __device_forceinline__ double acos(double a) { return __nv_acos(a); } __device_forceinline__ float acosf(float a) { return __nv_acosf(a); } __device_forceinline__ double acosh(double a) { return __nv_acosh(a); } __device_forceinline__ float acoshf(float a) { return __nv_acoshf(a); } __device_forceinline__ double asin(double a) { return __nv_asin(a); } __device_forceinline__ float asinf(float a) { return __nv_asinf(a); } __device_forceinline__ double asinh(double a) { return __nv_asinh(a); } __device_forceinline__ float asinhf(float a) { return __nv_asinhf(a); } __device_forceinline__ double atan(double a) { return __nv_atan(a); } __device_forceinline__ double atan2(double a, double b) { return __nv_atan2(a, b); } __device_forceinline__ float atan2f(float a, float b) { return __nv_atan2f(a, b); } __device_forceinline__ float atanf(float a) { return __nv_atanf(a); } __device_forceinline__ double atanh(double a) { return __nv_atanh(a); } __device_forceinline__ float atanhf(float a) { return __nv_atanhf(a); } __device_forceinline__ double cbrt(double a) { return __nv_cbrt(a); } __device_forceinline__ float cbrtf(float a) { return __nv_cbrtf(a); } __device_forceinline__ double ceil(double a) { return __nv_ceil(a); } __device_forceinline__ float ceilf(float a) { return __nv_ceilf(a); } __device_forceinline__ double copysign(double a, double b) { return __nv_copysign(a, b); } __device_forceinline__ float copysignf(float a, float b) { return __nv_copysignf(a, b); } __device_forceinline__ double cos(double a) { return __nv_cos(a); } __device_forceinline__ float cosf(float a) { return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(a); } __device_forceinline__ double cosh(double a) { return __nv_cosh(a); } __device_forceinline__ float coshf(float a) { return __nv_coshf(a); } __device_forceinline__ double cospi(double a) { return __nv_cospi(a); } __device_forceinline__ float cospif(float a) { return __nv_cospif(a); } __device_forceinline__ double cyl_bessel_i0(double a) { return __nv_cyl_bessel_i0(a); } __device_forceinline__ float cyl_bessel_i0f(float a) { return __nv_cyl_bessel_i0f(a); } __device_forceinline__ double cyl_bessel_i1(double a) { return __nv_cyl_bessel_i1(a); } __device_forceinline__ float cyl_bessel_i1f(float a) { return __nv_cyl_bessel_i1f(a); } __device_forceinline__ double erf(double a) { return __nv_erf(a); } __device_forceinline__ double erfc(double a) { return __nv_erfc(a); } __device_forceinline__ float erfcf(float a) { return __nv_erfcf(a); } __device_forceinline__ double erfcinv(double a) { return __nv_erfcinv(a); } __device_forceinline__ float erfcinvf(float a) { return __nv_erfcinvf(a); } __device_forceinline__ double erfcx(double a) { return __nv_erfcx(a); } __device_forceinline__ float erfcxf(float a) { return __nv_erfcxf(a); } __device_forceinline__ float erff(float a) { return __nv_erff(a); } __device_forceinline__ double erfinv(double a) { return __nv_erfinv(a); } __device_forceinline__ float erfinvf(float a) { return __nv_erfinvf(a); } __device_forceinline__ double exp(double a) { return __nv_exp(a); } __device_forceinline__ double exp10(double a) { return __nv_exp10(a); } __device_forceinline__ float exp10f(float a) { return __nv_exp10f(a); } __device_forceinline__ double exp2(double a) { return __nv_exp2(a); } __device_forceinline__ float exp2f(float a) { return __nv_exp2f(a); } __device_forceinline__ float expf(float a) { return __nv_expf(a); } __device_forceinline__ double expm1(double a) { return __nv_expm1(a); } __device_forceinline__ float expm1f(float a) { return __nv_expm1f(a); } __device_forceinline__ float fabsf(float a) { return __nv_fabsf(a); } __device_forceinline__ double fdim(double a, double b) { return __nv_fdim(a, b); } __device_forceinline__ float fdimf(float a, float b) { return __nv_fdimf(a, b); } __device_forceinline__ double fdivide(double a, double b) { return a / b; } __device_forceinline__ float fdividef(float a, float b) { return __FAST_OR_SLOW(__nv_fast_fdividef(a, b), a / b); } __device_forceinline__ double floor(double f) { return __nv_floor(f); } __device_forceinline__ float floorf(float f) { return __nv_floorf(f); } __device_forceinline__ double fma(double a, double b, double c) { return __nv_fma(a, b, c); } __device_forceinline__ float fmaf(float a, float b, float c) { return __nv_fmaf(a, b, c); } __device_forceinline__ double fmax(double a, double b) { return __nv_fmax(a, b); } __device_forceinline__ float fmaxf(float a, float b) { return __nv_fmaxf(a, b); } __device_forceinline__ double fmin(double a, double b) { return __nv_fmin(a, b); } __device_forceinline__ float fminf(float a, float b) { return __nv_fminf(a, b); } __device_forceinline__ double fmod(double a, double b) { return __nv_fmod(a, b); } __device_forceinline__ float fmodf(float a, float b) { return __nv_fmodf(a, b); } __device_forceinline__ double frexp(double a, int *b) { return __nv_frexp(a, b); } __device_forceinline__ float frexpf(float a, int *b) { return __nv_frexpf(a, b); } __device_forceinline__ double hypot(double a, double b) { return __nv_hypot(a, b); } __device_forceinline__ float hypotf(float a, float b) { return __nv_hypotf(a, b); } __device_forceinline__ int ilogb(double a) { return __nv_ilogb(a); } __device_forceinline__ int ilogbf(float a) { return __nv_ilogbf(a); } __device_forceinline__ double j0(double a) { return __nv_j0(a); } __device_forceinline__ float j0f(float a) { return __nv_j0f(a); } __device_forceinline__ double j1(double a) { return __nv_j1(a); } __device_forceinline__ float j1f(float a) { return __nv_j1f(a); } __device_forceinline__ double jn(int n, double a) { return __nv_jn(n, a); } __device_forceinline__ float jnf(int n, float a) { return __nv_jnf(n, a); } #if defined(__LP64__) __device_forceinline__ long labs(long a) { return __nv_llabs(a); }; #else __device_forceinline__ long labs(long a) { return __nv_abs(a); }; #endif __device_forceinline__ double ldexp(double a, int b) { return __nv_ldexp(a, b); } __device_forceinline__ float ldexpf(float a, int b) { return __nv_ldexpf(a, b); } __device_forceinline__ double lgamma(double a) { return __nv_lgamma(a); } __device_forceinline__ float lgammaf(float a) { return __nv_lgammaf(a); } __device_forceinline__ long long llabs(long long a) { return __nv_llabs(a); } __device_forceinline__ long long llmax(long long a, long long b) { return __nv_llmax(a, b); } __device_forceinline__ long long llmin(long long a, long long b) { return __nv_llmin(a, b); } __device_forceinline__ long long llrint(double a) { return __nv_llrint(a); } __device_forceinline__ long long llrintf(float a) { return __nv_llrintf(a); } __device_forceinline__ long long llround(double a) { return __nv_llround(a); } __device_forceinline__ long long llroundf(float a) { return __nv_llroundf(a); } __device_forceinline__ double round(double a) { return __nv_round(a); } __device_forceinline__ float roundf(float a) { return __nv_roundf(a); } __device_forceinline__ double log(double a) { return __nv_log(a); } __device_forceinline__ double log10(double a) { return __nv_log10(a); } __device_forceinline__ float log10f(float a) { return __nv_log10f(a); } __device_forceinline__ double log1p(double a) { return __nv_log1p(a); } __device_forceinline__ float log1pf(float a) { return __nv_log1pf(a); } __device_forceinline__ double log2(double a) { return __nv_log2(a); } __device_forceinline__ float log2f(float a) { return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(a); } __device_forceinline__ double logb(double a) { return __nv_logb(a); } __device_forceinline__ float logbf(float a) { return __nv_logbf(a); } __device_forceinline__ float logf(float a) { return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(a); } #if defined(__LP64__) __device_forceinline__ long lrint(double a) { return llrint(a); } __device_forceinline__ long lrintf(float a) { return __float2ll_rn(a); } __device_forceinline__ long lround(double a) { return llround(a); } __device_forceinline__ long lroundf(float a) { return llroundf(a); } #else __device_forceinline__ long lrint(double a) { return (long)rint(a); } __device_forceinline__ long lrintf(float a) { return __float2int_rn(a); } __device_forceinline__ long lround(double a) { return round(a); } __device_forceinline__ long lroundf(float a) { return roundf(a); } #endif __device_forceinline__ int max(int a, int b) { return __nv_max(a, b); } __device_forceinline__ int min(int a, int b) { return __nv_min(a, b); } __device_forceinline__ double modf(double a, double *b) { return __nv_modf(a, b); } __device_forceinline__ float modff(float a, float *b) { return __nv_modff(a, b); } __device_forceinline__ double nearbyint(double a) { return __builtin_nearbyint(a); } __device_forceinline__ float nearbyintf(float a) { return __builtin_nearbyintf(a); } __device_forceinline__ double nextafter(double a, double b) { return __nv_nextafter(a, b); } __device_forceinline__ float nextafterf(float a, float b) { return __nv_nextafterf(a, b); } __device_forceinline__ double norm(int dim, const double *t) { return __nv_norm(dim, t); } __device_forceinline__ double norm3d(double a, double b, double c) { return __nv_norm3d(a, b, c); } __device_forceinline__ float norm3df(float a, float b, float c) { return __nv_norm3df(a, b, c); } __device_forceinline__ double norm4d(double a, double b, double c, double d) { return __nv_norm4d(a, b, c, d); } __device_forceinline__ float norm4df(float a, float b, float c, float d) { return __nv_norm4df(a, b, c, d); } __device_forceinline__ double normcdf(double a) { return __nv_normcdf(a); } __device_forceinline__ float normcdff(float a) { return __nv_normcdff(a); } __device_forceinline__ double normcdfinv(double a) { return __nv_normcdfinv(a); } __device_forceinline__ float normcdfinvf(float a) { return __nv_normcdfinvf(a); } __device_forceinline__ float normf(int dim, const float *t) { return __nv_normf(dim, t); } __device_forceinline__ double pow(double a, double b) { return __nv_pow(a, b); } __device_forceinline__ float powf(float a, float b) { return __nv_powf(a, b); } __device_forceinline__ double powi(double a, int b) { return __nv_powi(a, b); } __device_forceinline__ float powif(float a, int b) { return __nv_powif(a, b); } __device_forceinline__ double rcbrt(double a) { return __nv_rcbrt(a); } __device_forceinline__ float rcbrtf(float a) { return __nv_rcbrtf(a); } __device_forceinline__ double remainder(double a, double b) { return __nv_remainder(a, b); } __device_forceinline__ float remainderf(float a, float b) { return __nv_remainderf(a, b); } __device_forceinline__ double remquo(double a, double b, int *c) { return __nv_remquo(a, b, c); } __device_forceinline__ float remquof(float a, float b, int *c) { return __nv_remquof(a, b, c); } __device_forceinline__ double rhypot(double a, double b) { return __nv_rhypot(a, b); } __device_forceinline__ float rhypotf(float a, float b) { return __nv_rhypotf(a, b); } __device_forceinline__ double rint(double a) { return __builtin_rint(a); } __device_forceinline__ float rintf(float a) { return __builtin_rintf(a); } __device_forceinline__ double rnorm(int a, const double *b) { return __nv_rnorm(a, b); } __device_forceinline__ double rnorm3d(double a, double b, double c) { return __nv_rnorm3d(a, b, c); } __device_forceinline__ float rnorm3df(float a, float b, float c) { return __nv_rnorm3df(a, b, c); } __device_forceinline__ double rnorm4d(double a, double b, double c, double d) { return __nv_rnorm4d(a, b, c, d); } __device_forceinline__ float rnorm4df(float a, float b, float c, float d) { return __nv_rnorm4df(a, b, c, d); } __device_forceinline__ float rnormf(int dim, const float *t) { return __nv_rnormf(dim, t); } __device_forceinline__ double rsqrt(double a) { return __nv_rsqrt(a); } __device_forceinline__ float rsqrtf(float a) { return __nv_rsqrtf(a); } __device_forceinline__ double scalbn(double a, int b) { return __nv_scalbn(a, b); } __device_forceinline__ float scalbnf(float a, int b) { return __nv_scalbnf(a, b); } __device_forceinline__ double scalbln(double a, long b) { if (b > INT_MAX) { return a > 0 ? INFINITY : -HUGE_VAL; } if (b < INT_MIN) { return a > 0 ? 0.0 : -0.0; } return scalbn(a, (int)b); } __device_forceinline__ float scalblnf(float a, long b) { if (b > INT_MAX) { return a > 0 ? HUGE_VALF : -HUGE_VALF; } if (b < INT_MIN) { return a > 0 ? 0.f : -0.f; } return scalbnf(a, (int)b); } __device_forceinline__ double sin(double a) { return __nv_sin(a); } __device_forceinline__ void sincos(double a, double *s, double *c) { return __nv_sincos(a, s, c); } __device_forceinline__ void sincosf(float a, float *s, float *c) { return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(a, s, c); } __device_forceinline__ void sincospi(double a, double *s, double *c) { return __nv_sincospi(a, s, c); } __device_forceinline__ void sincospif(float a, float *s, float *c) { return __nv_sincospif(a, s, c); } __device_forceinline__ float sinf(float a) { return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(a); } __device_forceinline__ double sinh(double a) { return __nv_sinh(a); } __device_forceinline__ float sinhf(float a) { return __nv_sinhf(a); } __device_forceinline__ double sinpi(double a) { return __nv_sinpi(a); } __device_forceinline__ float sinpif(float a) { return __nv_sinpif(a); } __device_forceinline__ double sqrt(double a) { return __nv_sqrt(a); } __device_forceinline__ float sqrtf(float a) { return __nv_sqrtf(a); } __device_forceinline__ double tan(double a) { return __nv_tan(a); } __device_forceinline__ float tanf(float a) { return __nv_tanf(a); } __device_forceinline__ double tanh(double a) { return __nv_tanh(a); } __device_forceinline__ float tanhf(float a) { return __nv_tanhf(a); } __device_forceinline__ double tgamma(double a) { return __nv_tgamma(a); } __device_forceinline__ float tgammaf(float a) { return __nv_tgammaf(a); } __device_forceinline__ double trunc(double a) { return __nv_trunc(a); } __device_forceinline__ float truncf(float a) { return __nv_truncf(a); } __device_forceinline__ unsigned long long ullmax(unsigned long long a, unsigned long long b) { return __nv_ullmax(a, b); } __device_forceinline__ unsigned long long ullmin(unsigned long long a, unsigned long long b) { return __nv_ullmin(a, b); } __device_forceinline__ unsigned int umax(unsigned int a, unsigned int b) { return __nv_umax(a, b); } __device_forceinline__ unsigned int umin(unsigned int a, unsigned int b) { return __nv_umin(a, b); } __device_forceinline__ double y0(double a) { return __nv_y0(a); } __device_forceinline__ float y0f(float a) { return __nv_y0f(a); } __device_forceinline__ double y1(double a) { return __nv_y1(a); } __device_forceinline__ float y1f(float a) { return __nv_y1f(a); } __device_forceinline__ double yn(int a, double b) { return __nv_yn(a, b); } __device_forceinline__ float ynf(int a, float b) { return __nv_ynf(a, b); } #undef __FAST_OR_SLOW // Implementation of a subset of <cuda_fp16.h> functionality struct __half { unsigned short u; }; __device_forceinline__ short __half_as_short(const __half h) { short i; asm("{ mov.b16 %0, %1;}\n" : "=h"(i) : "h"(h)); return i; } __device_forceinline__ __half __short_as_half(const short i) { __half h; asm("{ mov.b16 %0, %1;}\n" : "=h"(h) : "h"(i)); return h; } __device_forceinline__ __half __hadd(const __half a, const __half b) { __half x; asm("{ add.f16 %0, %1, %2;}\n" : "=h"(x) : "h"(a), "h"(b)); return x; } // Implementation of a subset of <cuda_runtime.h> functionality __device_forceinline__ bool isfinite(double x) { return __nv_isfinited(x); } __device_forceinline__ bool isfinite(float x) { return __nv_finitef(x); } __device_forceinline__ unsigned short atomicCAS(unsigned short *address, unsigned short compare, unsigned short val) { unsigned short r; asm volatile ("{atom.global.cas.b16 %0,[%1],%2,%3; }\n" : "=h"(r) : "l"(address), "h"(compare), "h"(val) : "memory"); return r; } __device_forceinline__ int atomicCAS(int *address, int compare, int val) { return __iAtomicCAS(address, compare, val); } __device_forceinline__ __half atomicAdd(__half *const address, const __half val) { unsigned short* address_as_us = (unsigned short*)address; unsigned short old = *address_as_us; unsigned short assumed; do { assumed = old; old = atomicCAS(address_as_us, assumed, __half_as_short(__hadd(val, __short_as_half(assumed)))); } while (assumed != old); return __short_as_half(old); } __device_forceinline__ double atomicAdd(double *const address, const double val) { return __dAtomicAdd(address, val); } __device_forceinline__ float atomicAdd(float *const address, const float val) { return __fAtomicAdd(address, val); } __device_forceinline__ int atomicAdd(int *const address, const int val) { return __iAtomicAdd(address, val); } __device_forceinline__ unsigned int atomicAdd(unsigned int *const address, const unsigned int val) { return __uAtomicAdd(address, val); } __device_forceinline__ unsigned int atomicAdd(unsigned long long *const address, const unsigned long long val) { return __ullAtomicAdd(address, val); } __device_forceinline__ int atomicMin(int *const address, const int val) { return __iAtomicMin(address, val); } __device_forceinline__ int atomicMax(int *const address, const int val) { return __iAtomicMax(address, val); }
68,657
C
64.388571
195
0.666312
NVIDIA/warp/warp/native/volume.cpp
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "cuda_util.h" #include "volume_builder.h" #include "volume_impl.h" #include "warp.h" #include <map> using namespace wp; namespace { struct VolumeDesc { // NanoVDB buffer either in device or host memory void *buffer; uint64_t size_in_bytes; bool owner; // whether the buffer should be deallocated when the volume is destroyed pnanovdb_grid_t grid_data; pnanovdb_tree_t tree_data; // Host-accessible version of the blind metadata (copy if GPU, alias if CPU) pnanovdb_gridblindmetadata_t *blind_metadata; // CUDA context for this volume (NULL if CPU) void *context; pnanovdb_buf_t as_pnano() const { return pnanovdb_make_buf(static_cast<uint32_t *>(buffer), size_in_bytes); } }; // Host-side volume descriptors. Maps each CPU/GPU volume buffer address (id) to a CPU desc std::map<uint64_t, VolumeDesc> g_volume_descriptors; bool volume_get_descriptor(uint64_t id, const VolumeDesc *&volumeDesc) { if (id == 0) return false; const auto &iter = g_volume_descriptors.find(id); if (iter == g_volume_descriptors.end()) return false; else volumeDesc = &iter->second; return true; } bool volume_exists(const void *id) { const VolumeDesc *volume; return volume_get_descriptor((uint64_t)id, volume); } void volume_add_descriptor(uint64_t id, VolumeDesc &&volumeDesc) { g_volume_descriptors[id] = std::move(volumeDesc); } void volume_rem_descriptor(uint64_t id) { g_volume_descriptors.erase(id); } } // anonymous namespace // NB: buf must be a host pointer uint64_t volume_create_host(void *buf, uint64_t size, bool copy, bool owner) { if (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t)) return 0; // This cannot be a valid NanoVDB grid with data if (!copy && volume_exists(buf)) { // descriptor already created for this volume return 0; } VolumeDesc volume; volume.context = NULL; memcpy_h2h(&volume.grid_data, buf, sizeof(pnanovdb_grid_t)); memcpy_h2h(&volume.tree_data, (pnanovdb_grid_t *)buf + 1, sizeof(pnanovdb_tree_t)); if (volume.grid_data.magic != PNANOVDB_MAGIC_NUMBER && volume.grid_data.magic != PNANOVDB_MAGIC_GRID) return 0; if (size == 0) { size = volume.grid_data.grid_size; } // Copy or alias buffer volume.size_in_bytes = size; if (copy) { volume.buffer = alloc_host(size); memcpy_h2h(volume.buffer, buf, size); volume.owner = true; } else { volume.buffer = buf; volume.owner = owner; } // Alias blind metadata volume.blind_metadata = reinterpret_cast<pnanovdb_gridblindmetadata_t *>(static_cast<uint8_t *>(volume.buffer) + volume.grid_data.blind_metadata_offset); uint64_t id = (uint64_t)volume.buffer; volume_add_descriptor(id, std::move(volume)); return id; } // NB: buf must be a pointer on the same device uint64_t volume_create_device(void *context, void *buf, uint64_t size, bool copy, bool owner) { if (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t)) return 0; // This cannot be a valid NanoVDB grid with data if (!copy && volume_exists(buf)) { // descriptor already created for this volume return 0; } ContextGuard guard(context); VolumeDesc volume; volume.context = context ? context : cuda_context_get_current(); memcpy_d2h(WP_CURRENT_CONTEXT, &volume.grid_data, buf, sizeof(pnanovdb_grid_t)); memcpy_d2h(WP_CURRENT_CONTEXT, &volume.tree_data, (pnanovdb_grid_t *)buf + 1, sizeof(pnanovdb_tree_t)); // no sync needed since the above copies are to pageable memory if (volume.grid_data.magic != PNANOVDB_MAGIC_NUMBER && volume.grid_data.magic != PNANOVDB_MAGIC_GRID) return 0; if (size == 0) { size = volume.grid_data.grid_size; } // Copy or alias data buffer volume.size_in_bytes = size; if (copy) { volume.buffer = alloc_device(WP_CURRENT_CONTEXT, size); memcpy_d2d(WP_CURRENT_CONTEXT, volume.buffer, buf, size); volume.owner = true; } else { volume.buffer = buf; volume.owner = owner; } // Make blind metadata accessible on host const uint64_t blindmetadata_size = volume.grid_data.blind_metadata_count * sizeof(pnanovdb_gridblindmetadata_t); volume.blind_metadata = static_cast<pnanovdb_gridblindmetadata_t *>(alloc_pinned(blindmetadata_size)); memcpy_d2h(WP_CURRENT_CONTEXT, volume.blind_metadata, static_cast<uint8_t *>(volume.buffer) + volume.grid_data.blind_metadata_offset, blindmetadata_size); uint64_t id = (uint64_t)volume.buffer; volume_add_descriptor(id, std::move(volume)); return id; } void volume_get_buffer_info(uint64_t id, void **buf, uint64_t *size) { *buf = 0; *size = 0; const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { *buf = volume->buffer; *size = volume->size_in_bytes; } } void volume_get_voxel_size(uint64_t id, float *dx, float *dy, float *dz) { *dx = *dx = *dz = 0.0f; const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { *dx = (float)volume->grid_data.voxel_size[0]; *dy = (float)volume->grid_data.voxel_size[1]; *dz = (float)volume->grid_data.voxel_size[2]; } } void volume_get_tile_and_voxel_count(uint64_t id, uint32_t &tile_count, uint64_t &voxel_count) { tile_count = 0; voxel_count = 0; const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { tile_count = volume->tree_data.node_count_leaf; const uint32_t grid_type = volume->grid_data.grid_type; switch (grid_type) { case PNANOVDB_GRID_TYPE_ONINDEX: case PNANOVDB_GRID_TYPE_ONINDEXMASK: // number of indexable voxels is number of active voxels voxel_count = volume->tree_data.voxel_count; break; default: // all leaf voxels are indexable voxel_count = uint64_t(tile_count) * PNANOVDB_LEAF_TABLE_COUNT; } } } const char *volume_get_grid_info(uint64_t id, uint64_t *grid_size, uint32_t *grid_index, uint32_t *grid_count, float translation[3], float transform[9], char type_str[16]) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { const pnanovdb_grid_t &grid_data = volume->grid_data; *grid_count = grid_data.grid_count; *grid_index = grid_data.grid_index; *grid_size = grid_data.grid_size; memcpy(translation, grid_data.map.vecf, sizeof(grid_data.map.vecf)); memcpy(transform, grid_data.map.matf, sizeof(grid_data.map.matf)); nanovdb::toStr(type_str, static_cast<nanovdb::GridType>(grid_data.grid_type)); return (const char *)grid_data.grid_name; } *grid_size = 0; *grid_index = 0; *grid_count = 0; type_str[0] = 0; return nullptr; } uint32_t volume_get_blind_data_count(uint64_t id) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { return volume->grid_data.blind_metadata_count; } return 0; } const char *volume_get_blind_data_info(uint64_t id, uint32_t data_index, void **buf, uint64_t *value_count, uint32_t *value_size, char type_str[16]) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume) && data_index < volume->grid_data.blind_metadata_count) { const pnanovdb_gridblindmetadata_t &metadata = volume->blind_metadata[data_index]; *value_count = metadata.value_count; *value_size = metadata.value_size; nanovdb::toStr(type_str, static_cast<nanovdb::GridType>(metadata.data_type)); *buf = static_cast<uint8_t *>(volume->buffer) + volume->grid_data.blind_metadata_offset + data_index * sizeof(pnanovdb_gridblindmetadata_t) + metadata.data_offset; return (const char *)metadata.name; } *buf = nullptr; *value_count = 0; *value_size = 0; type_str[0] = 0; return nullptr; } void volume_get_tiles_host(uint64_t id, void *buf) { static constexpr uint32_t MASK = (1u << 3u) - 1u; // mask for bit operations const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { const uint32_t leaf_count = volume->tree_data.node_count_leaf; pnanovdb_coord_t *leaf_coords = static_cast<pnanovdb_coord_t *>(buf); const uint64_t first_leaf = (uint64_t)volume->buffer + sizeof(pnanovdb_grid_t) + volume->tree_data.node_offset_leaf; const uint32_t leaf_stride = PNANOVDB_GRID_TYPE_GET(volume->grid_data.grid_type, leaf_size); const pnanovdb_buf_t pnano_buf = volume->as_pnano(); for (uint32_t i = 0; i < leaf_count; ++i) { pnanovdb_leaf_handle_t leaf = volume::get_leaf(pnano_buf, i); leaf_coords[i] = volume::leaf_origin(pnano_buf, leaf); } } } void volume_get_voxels_host(uint64_t id, void *buf) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { uint32_t leaf_count; uint64_t voxel_count; volume_get_tile_and_voxel_count(id, leaf_count, voxel_count); pnanovdb_coord_t *voxel_coords = static_cast<pnanovdb_coord_t *>(buf); const pnanovdb_buf_t pnano_buf = volume->as_pnano(); for (uint32_t i = 0; i < leaf_count; ++i) { pnanovdb_leaf_handle_t leaf = volume::get_leaf(pnano_buf, i); pnanovdb_coord_t leaf_coords = volume::leaf_origin(pnano_buf, leaf); for (uint32_t n = 0; n < 512; ++n) { pnanovdb_coord_t loc_ijk = volume::leaf_offset_to_local_coord(n); pnanovdb_coord_t ijk = { loc_ijk.x + leaf_coords.x, loc_ijk.y + leaf_coords.y, loc_ijk.z + leaf_coords.z, }; const uint64_t index = volume::leaf_voxel_index(pnano_buf, i, ijk); if (index < voxel_count) { voxel_coords[index] = ijk; } } } } } void volume_destroy_host(uint64_t id) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { if (volume->owner) { free_host(volume->buffer); } volume_rem_descriptor(id); } } void volume_destroy_device(uint64_t id) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { ContextGuard guard(volume->context); if (volume->owner) { free_device(WP_CURRENT_CONTEXT, volume->buffer); } free_pinned(volume->blind_metadata); volume_rem_descriptor(id); } } #if WP_ENABLE_CUDA uint64_t volume_f_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value, float tx, float ty, float tz, bool points_in_world_space) { nanovdb::FloatGrid *grid; size_t gridSize; BuildGridParams<float> params; params.voxel_size = voxel_size; params.background_value = bg_value; params.translation = nanovdb::Vec3f{tx, ty, tz}; build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params); return volume_create_device(context, grid, gridSize, false, true); } uint64_t volume_v_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value_x, float bg_value_y, float bg_value_z, float tx, float ty, float tz, bool points_in_world_space) { nanovdb::Vec3fGrid *grid; size_t gridSize; BuildGridParams<nanovdb::Vec3f> params; params.voxel_size = voxel_size; params.background_value = nanovdb::Vec3f{bg_value_x, bg_value_y, bg_value_z}; params.translation = nanovdb::Vec3f{tx, ty, tz}; build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params); return volume_create_device(context, grid, gridSize, false, true); } uint64_t volume_i_from_tiles_device(void *context, void *points, int num_points, float voxel_size, int bg_value, float tx, float ty, float tz, bool points_in_world_space) { nanovdb::Int32Grid *grid; size_t gridSize; BuildGridParams<int32_t> params; params.voxel_size = voxel_size; params.background_value = (int32_t)(bg_value); params.translation = nanovdb::Vec3f{tx, ty, tz}; build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params); return volume_create_device(context, grid, gridSize, false, true); } uint64_t volume_index_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float tx, float ty, float tz, bool points_in_world_space) { nanovdb::IndexGrid *grid; size_t gridSize; BuildGridParams<nanovdb::ValueIndex> params; params.voxel_size = voxel_size; params.translation = nanovdb::Vec3f{tx, ty, tz}; build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params); return volume_create_device(context, grid, gridSize, false, true); } uint64_t volume_from_active_voxels_device(void *context, void *points, int num_points, float voxel_size, float tx, float ty, float tz, bool points_in_world_space) { nanovdb::OnIndexGrid *grid; size_t gridSize; BuildGridParams<nanovdb::ValueOnIndex> params; params.voxel_size = voxel_size; params.translation = nanovdb::Vec3f{tx, ty, tz}; build_grid_from_points(grid, gridSize, points, num_points, points_in_world_space, params); return volume_create_device(context, grid, gridSize, false, true); } void launch_get_leaf_coords(void *context, const uint32_t leaf_count, pnanovdb_coord_t *leaf_coords, pnanovdb_buf_t buf); void launch_get_voxel_coords(void *context, const uint32_t leaf_count, const uint32_t voxel_count, pnanovdb_coord_t *voxel_coords, pnanovdb_buf_t buf); void volume_get_tiles_device(uint64_t id, void *buf) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { const uint32_t leaf_count = volume->tree_data.node_count_leaf; pnanovdb_coord_t *leaf_coords = static_cast<pnanovdb_coord_t *>(buf); launch_get_leaf_coords(volume->context, leaf_count, leaf_coords, volume->as_pnano()); } } void volume_get_voxels_device(uint64_t id, void *buf) { const VolumeDesc *volume; if (volume_get_descriptor(id, volume)) { uint32_t leaf_count; uint64_t voxel_count; volume_get_tile_and_voxel_count(id, leaf_count, voxel_count); pnanovdb_coord_t *voxel_coords = static_cast<pnanovdb_coord_t *>(buf); launch_get_voxel_coords(volume->context, leaf_count, voxel_count, voxel_coords, volume->as_pnano()); } } #else // stubs for non-CUDA platforms uint64_t volume_f_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value, float tx, float ty, float tz, bool points_in_world_space) { return 0; } uint64_t volume_v_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float bg_value_x, float bg_value_y, float bg_value_z, float tx, float ty, float tz, bool points_in_world_space) { return 0; } uint64_t volume_i_from_tiles_device(void *context, void *points, int num_points, float voxel_size, int bg_value, float tx, float ty, float tz, bool points_in_world_space) { return 0; } uint64_t volume_index_from_tiles_device(void *context, void *points, int num_points, float voxel_size, float tx, float ty, float tz, bool points_in_world_space) { return 0; } uint64_t volume_from_active_voxels_device(void *context, void *points, int num_points, float voxel_size, float tx, float ty, float tz, bool points_in_world_space) { return 0; } void volume_get_tiles_device(uint64_t id, void *buf) { } void volume_get_voxels_device(uint64_t id, void *buf) { } #endif
17,060
C++
31.190566
117
0.624853
NVIDIA/warp/warp/native/hashgrid.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once namespace wp { struct HashGrid { float cell_width; float cell_width_inv; int* point_cells{nullptr}; // cell id of a point int* point_ids{nullptr}; // index to original point int* cell_starts{nullptr}; // start index of a range of indices belonging to a cell, dim_x*dim_y*dim_z in length int* cell_ends{nullptr}; // end index of a range of indices belonging to a cell, dim_x*dim_y*dim_z in length int dim_x; int dim_y; int dim_z; int num_points; int max_points; void* context{nullptr}; }; // convert a virtual (world) cell coordinate to a physical one CUDA_CALLABLE inline int hash_grid_index(const HashGrid& grid, int x, int y, int z) { // offset to ensure positive coordinates (means grid dim should be less than 4096^3) const int origin = 1<<20; x += origin; y += origin; z += origin; assert(0 < x); assert(0 < y); assert(0 < z); // clamp in case any particles fall outside the guard region (-10^20 cell index) x = max(0, x); y = max(0, y); z = max(0, z); // compute physical cell (assume pow2 grid dims) // int cx = x & (grid.dim_x-1); // int cy = y & (grid.dim_y-1); // int cz = z & (grid.dim_z-1); // compute physical cell (arbitrary grid dims) int cx = x%grid.dim_x; int cy = y%grid.dim_y; int cz = z%grid.dim_z; return cz*(grid.dim_x*grid.dim_y) + cy*grid.dim_x + cx; } CUDA_CALLABLE inline int hash_grid_index(const HashGrid& grid, const vec3& p) { return hash_grid_index(grid, int(p[0]*grid.cell_width_inv), int(p[1]*grid.cell_width_inv), int(p[2]*grid.cell_width_inv)); } // stores state required to traverse neighboring cells of a point struct hash_grid_query_t { CUDA_CALLABLE hash_grid_query_t() : x_start(0), y_start(0), z_start(0), x_end(0), y_end(0), z_end(0), x(0), y(0), z(0), cell(0), cell_index(0), cell_end(0), current(0), grid() {} // Required for adjoint computations. CUDA_CALLABLE inline hash_grid_query_t& operator+=(const hash_grid_query_t& other) { return *this; } int x_start; int y_start; int z_start; int x_end; int y_end; int z_end; int x; int y; int z; int cell; int cell_index; // offset in the current cell (index into cell_indices) int cell_end; // index following the end of this cell int current; // index of the current iterator value HashGrid grid; }; CUDA_CALLABLE inline hash_grid_query_t hash_grid_query(uint64_t id, wp::vec3 pos, float radius) { hash_grid_query_t query; query.grid = *(const HashGrid*)(id); // convert coordinate to grid query.x_start = int((pos[0]-radius)*query.grid.cell_width_inv); query.y_start = int((pos[1]-radius)*query.grid.cell_width_inv); query.z_start = int((pos[2]-radius)*query.grid.cell_width_inv); // do not want to visit any cells more than once, so limit large radius offset to one pass over each dimension query.x_end = min(int((pos[0]+radius)*query.grid.cell_width_inv), query.x_start + query.grid.dim_x-1); query.y_end = min(int((pos[1]+radius)*query.grid.cell_width_inv), query.y_start + query.grid.dim_y-1); query.z_end = min(int((pos[2]+radius)*query.grid.cell_width_inv), query.z_start + query.grid.dim_z-1); query.x = query.x_start; query.y = query.y_start; query.z = query.z_start; const int cell = hash_grid_index(query.grid, query.x, query.y, query.z); query.cell_index = query.grid.cell_starts[cell]; query.cell_end = query.grid.cell_ends[cell]; return query; } CUDA_CALLABLE inline bool hash_grid_query_next(hash_grid_query_t& query, int& index) { const HashGrid& grid = query.grid; if (!grid.point_cells) return false; while (1) { if (query.cell_index < query.cell_end) { // write output index index = grid.point_ids[query.cell_index++]; return true; } else { query.x++; if (query.x > query.x_end) { query.x = query.x_start; query.y++; } if (query.y > query.y_end) { query.y = query.y_start; query.z++; } if (query.z > query.z_end) { // finished lookup grid return false; } // update cell pointers const int cell = hash_grid_index(grid, query.x, query.y, query.z); query.cell_index = grid.cell_starts[cell]; query.cell_end = grid.cell_ends[cell]; } } } CUDA_CALLABLE inline int iter_next(hash_grid_query_t& query) { return query.current; } CUDA_CALLABLE inline bool iter_cmp(hash_grid_query_t& query) { bool finished = hash_grid_query_next(query, query.current); return finished; } CUDA_CALLABLE inline hash_grid_query_t iter_reverse(const hash_grid_query_t& query) { // can't reverse grid queries, users should not rely on neighbor ordering return query; } CUDA_CALLABLE inline int hash_grid_point_id(uint64_t id, int& index) { const HashGrid* grid = (const HashGrid*)(id); if (grid->point_ids == nullptr) return -1; return grid->point_ids[index]; } CUDA_CALLABLE inline void adj_hash_grid_query(uint64_t id, wp::vec3 pos, float radius, uint64_t& adj_id, wp::vec3& adj_pos, float& adj_radius, hash_grid_query_t& adj_res) {} CUDA_CALLABLE inline void adj_hash_grid_query_next(hash_grid_query_t& query, int& index, hash_grid_query_t& adj_query, int& adj_index, bool& adj_res) {} CUDA_CALLABLE inline void adj_hash_grid_point_id(uint64_t id, int& index, uint64_t & adj_id, int& adj_index, int& adj_res) {} } // namespace wp
6,496
C
27.495614
173
0.596521
NVIDIA/warp/warp/native/warp.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once // defines all crt + builtin types #include "builtin.h" #define WP_CURRENT_STREAM ((void*)0xffffffffffffffff) struct timing_result_t; // this is the core runtime API exposed on the DLL level extern "C" { WP_API int init(); //WP_API void shutdown(); // get error message from C++ WP_API const char* get_error_string(); // allow disabling error output, which is handy during tests that expect failure WP_API void set_error_output_enabled(int enable); WP_API int is_error_output_enabled(); // whether Warp was compiled with CUDA support WP_API int is_cuda_enabled(); // whether Warp was compiled with enhanced CUDA compatibility WP_API int is_cuda_compatibility_enabled(); // whether Warp was compiled with CUTLASS support WP_API int is_cutlass_enabled(); // whether Warp was compiled with debug support WP_API int is_debug_enabled(); WP_API uint16_t float_to_half_bits(float x); WP_API float half_bits_to_float(uint16_t u); WP_API void* alloc_host(size_t s); WP_API void* alloc_pinned(size_t s); WP_API void* alloc_device(void* context, size_t s); // uses cudaMallocAsync() if supported, cudaMalloc() otherwise WP_API void* alloc_device_default(void* context, size_t s); // uses cudaMalloc() WP_API void* alloc_device_async(void* context, size_t s); // uses cudaMallocAsync() WP_API void free_host(void* ptr); WP_API void free_pinned(void* ptr); WP_API void free_device(void* context, void* ptr); // uses cudaFreeAsync() if supported, cudaFree() otherwise WP_API void free_device_default(void* context, void* ptr); // uses cudaFree() WP_API void free_device_async(void* context, void* ptr); // uses cudaFreeAsync() WP_API bool memcpy_h2h(void* dest, void* src, size_t n); WP_API bool memcpy_h2d(void* context, void* dest, void* src, size_t n, void* stream=WP_CURRENT_STREAM); WP_API bool memcpy_d2h(void* context, void* dest, void* src, size_t n, void* stream=WP_CURRENT_STREAM); WP_API bool memcpy_d2d(void* context, void* dest, void* src, size_t n, void* stream=WP_CURRENT_STREAM); WP_API bool memcpy_p2p(void* dst_context, void* dst, void* src_context, void* src, size_t n, void* stream=WP_CURRENT_STREAM); WP_API void memset_host(void* dest, int value, size_t n); WP_API void memset_device(void* context, void* dest, int value, size_t n); // takes srcsize bytes starting at src and repeats them n times at dst (writes srcsize * n bytes in total): WP_API void memtile_host(void* dest, const void* src, size_t srcsize, size_t n); WP_API void memtile_device(void* context, void* dest, const void* src, size_t srcsize, size_t n); WP_API uint64_t bvh_create_host(wp::vec3* lowers, wp::vec3* uppers, int num_items); WP_API void bvh_destroy_host(uint64_t id); WP_API void bvh_refit_host(uint64_t id); WP_API uint64_t bvh_create_device(void* context, wp::vec3* lowers, wp::vec3* uppers, int num_items); WP_API void bvh_destroy_device(uint64_t id); WP_API void bvh_refit_device(uint64_t id); // create a user-accessible copy of the mesh, it is the // users responsibility to keep-alive the points/tris data for the duration of the mesh lifetime WP_API uint64_t mesh_create_host(wp::array_t<wp::vec3> points, wp::array_t<wp::vec3> velocities, wp::array_t<int> tris, int num_points, int num_tris, int support_winding_number); WP_API void mesh_destroy_host(uint64_t id); WP_API void mesh_refit_host(uint64_t id); WP_API uint64_t mesh_create_device(void* context, wp::array_t<wp::vec3> points, wp::array_t<wp::vec3> velocities, wp::array_t<int> tris, int num_points, int num_tris, int support_winding_number); WP_API void mesh_destroy_device(uint64_t id); WP_API void mesh_refit_device(uint64_t id); WP_API uint64_t hash_grid_create_host(int dim_x, int dim_y, int dim_z); WP_API void hash_grid_reserve_host(uint64_t id, int num_points); WP_API void hash_grid_destroy_host(uint64_t id); WP_API void hash_grid_update_host(uint64_t id, float cell_width, const wp::array_t<wp::vec3>* points); WP_API uint64_t hash_grid_create_device(void* context, int dim_x, int dim_y, int dim_z); WP_API void hash_grid_reserve_device(uint64_t id, int num_points); WP_API void hash_grid_destroy_device(uint64_t id); WP_API void hash_grid_update_device(uint64_t id, float cell_width, const wp::array_t<wp::vec3>* points); WP_API bool cutlass_gemm(void* context, int compute_capability, int m, int n, int k, const char* datatype, const void* a, const void* b, const void* c, void* d, float alpha, float beta, bool row_major_a, bool row_major_b, bool allow_tf32x3_arith, int batch_count); WP_API uint64_t volume_create_host(void* buf, uint64_t size, bool copy, bool owner); WP_API void volume_get_tiles_host(uint64_t id, void* buf); WP_API void volume_get_voxels_host(uint64_t id, void* buf); WP_API void volume_destroy_host(uint64_t id); WP_API uint64_t volume_create_device(void* context, void* buf, uint64_t size, bool copy, bool owner); WP_API void volume_get_tiles_device(uint64_t id, void* buf); WP_API void volume_get_voxels_device(uint64_t id, void* buf); WP_API void volume_destroy_device(uint64_t id); WP_API uint64_t volume_f_from_tiles_device(void* context, void* points, int num_points, float voxel_size, float bg_value, float tx, float ty, float tz, bool points_in_world_space); WP_API uint64_t volume_v_from_tiles_device(void* context, void* points, int num_points, float voxel_size, float bg_value_x, float bg_value_y, float bg_value_z, float tx, float ty, float tz, bool points_in_world_space); WP_API uint64_t volume_i_from_tiles_device(void* context, void* points, int num_points, float voxel_size, int bg_value, float tx, float ty, float tz, bool points_in_world_space); WP_API uint64_t volume_index_from_tiles_device(void* context, void* points, int num_points, float voxel_size, float tx, float ty, float tz, bool points_in_world_space); WP_API uint64_t volume_from_active_voxels_device(void* context, void* points, int num_points, float voxel_size, float tx, float ty, float tz, bool points_in_world_space); WP_API void volume_get_buffer_info(uint64_t id, void** buf, uint64_t* size); WP_API void volume_get_voxel_size(uint64_t id, float* dx, float* dy, float* dz); WP_API void volume_get_tile_and_voxel_count(uint64_t id, uint32_t& tile_count, uint64_t& voxel_count); WP_API const char* volume_get_grid_info(uint64_t id, uint64_t *grid_size, uint32_t *grid_index, uint32_t *grid_count, float translation[3], float transform[9], char type_str[16]); WP_API uint32_t volume_get_blind_data_count(uint64_t id); WP_API const char* volume_get_blind_data_info(uint64_t id, uint32_t data_index, void** buf, uint64_t* value_count, uint32_t* value_size, char type_str[16]); WP_API uint64_t marching_cubes_create_device(void* context); WP_API void marching_cubes_destroy_device(uint64_t id); WP_API int marching_cubes_surface_device(uint64_t id, const float* field, int nx, int ny, int nz, float threshold, wp::vec3* verts, int* triangles, int max_verts, int max_tris, int* out_num_verts, int* out_num_tris); // generic copy supporting non-contiguous arrays WP_API bool array_copy_host(void* dst, void* src, int dst_type, int src_type, int elem_size); WP_API bool array_copy_device(void* context, void* dst, void* src, int dst_type, int src_type, int elem_size); // generic fill for non-contiguous arrays WP_API void array_fill_host(void* arr, int arr_type, const void* value, int value_size); WP_API void array_fill_device(void* context, void* arr, int arr_type, const void* value, int value_size); WP_API void array_inner_float_host(uint64_t a, uint64_t b, uint64_t out, int count, int stride_a, int stride_b, int type_len); WP_API void array_inner_double_host(uint64_t a, uint64_t b, uint64_t out, int count, int stride_a, int stride_b, int type_len); WP_API void array_inner_float_device(uint64_t a, uint64_t b, uint64_t out, int count, int stride_a, int stride_b, int type_len); WP_API void array_inner_double_device(uint64_t a, uint64_t b, uint64_t out, int count, int stride_a, int stride_b, int type_len); WP_API void array_sum_float_device(uint64_t a, uint64_t out, int count, int stride, int type_len); WP_API void array_sum_float_host(uint64_t a, uint64_t out, int count, int stride, int type_len); WP_API void array_sum_double_host(uint64_t a, uint64_t out, int count, int stride, int type_len); WP_API void array_sum_double_device(uint64_t a, uint64_t out, int count, int stride, int type_len); WP_API void array_scan_int_host(uint64_t in, uint64_t out, int len, bool inclusive); WP_API void array_scan_float_host(uint64_t in, uint64_t out, int len, bool inclusive); WP_API void array_scan_int_device(uint64_t in, uint64_t out, int len, bool inclusive); WP_API void array_scan_float_device(uint64_t in, uint64_t out, int len, bool inclusive); WP_API void radix_sort_pairs_int_host(uint64_t keys, uint64_t values, int n); WP_API void radix_sort_pairs_int_device(uint64_t keys, uint64_t values, int n); WP_API void runlength_encode_int_host(uint64_t values, uint64_t run_values, uint64_t run_lengths, uint64_t run_count, int n); WP_API void runlength_encode_int_device(uint64_t values, uint64_t run_values, uint64_t run_lengths, uint64_t run_count, int n); WP_API int bsr_matrix_from_triplets_float_host( int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values); WP_API int bsr_matrix_from_triplets_double_host( int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values); WP_API int bsr_matrix_from_triplets_float_device( int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values); WP_API int bsr_matrix_from_triplets_double_device( int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values); WP_API void bsr_transpose_float_host(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values); WP_API void bsr_transpose_double_host(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values); WP_API void bsr_transpose_float_device(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values); WP_API void bsr_transpose_double_device(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values); WP_API int cuda_driver_version(); // CUDA driver version WP_API int cuda_toolkit_version(); // CUDA Toolkit version used to build Warp WP_API bool cuda_driver_is_initialized(); WP_API int nvrtc_supported_arch_count(); WP_API void nvrtc_supported_archs(int* archs); WP_API int cuda_device_get_count(); WP_API void* cuda_device_get_primary_context(int ordinal); WP_API const char* cuda_device_get_name(int ordinal); WP_API int cuda_device_get_arch(int ordinal); WP_API void cuda_device_get_uuid(int ordinal, char uuid[16]); WP_API int cuda_device_get_pci_domain_id(int ordinal); WP_API int cuda_device_get_pci_bus_id(int ordinal); WP_API int cuda_device_get_pci_device_id(int ordinal); WP_API int cuda_device_is_uva(int ordinal); WP_API int cuda_device_is_mempool_supported(int ordinal); WP_API int cuda_device_set_mempool_release_threshold(int ordinal, uint64_t threshold); WP_API uint64_t cuda_device_get_mempool_release_threshold(int ordinal); WP_API void cuda_device_get_memory_info(int ordinal, size_t* free_mem, size_t* total_mem); WP_API void* cuda_context_get_current(); WP_API void cuda_context_set_current(void* context); WP_API void cuda_context_push_current(void* context); WP_API void cuda_context_pop_current(); WP_API void* cuda_context_create(int device_ordinal); WP_API void cuda_context_destroy(void* context); WP_API int cuda_context_get_device_ordinal(void* context); WP_API int cuda_context_is_primary(void* context); WP_API void* cuda_context_get_stream(void* context); WP_API void cuda_context_set_stream(void* context, void* stream, int sync); // ensures all device side operations have completed in the current context WP_API void cuda_context_synchronize(void* context); // return cudaError_t code WP_API uint64_t cuda_context_check(void* context); // peer access WP_API int cuda_is_peer_access_supported(int target_ordinal, int peer_ordinal); WP_API int cuda_is_peer_access_enabled(void* target_context, void* peer_context); WP_API int cuda_set_peer_access_enabled(void* target_context, void* peer_context, int enable); WP_API int cuda_is_mempool_access_enabled(int target_ordinal, int peer_ordinal); WP_API int cuda_set_mempool_access_enabled(int target_ordinal, int peer_ordinal, int enable); WP_API void* cuda_stream_create(void* context); WP_API void cuda_stream_destroy(void* context, void* stream); WP_API void cuda_stream_register(void* context, void* stream); WP_API void cuda_stream_unregister(void* context, void* stream); WP_API void* cuda_stream_get_current(); WP_API void cuda_stream_synchronize(void* stream); WP_API void cuda_stream_wait_event(void* stream, void* event); WP_API void cuda_stream_wait_stream(void* stream, void* other_stream, void* event); WP_API int cuda_stream_is_capturing(void* stream); WP_API void* cuda_event_create(void* context, unsigned flags); WP_API void cuda_event_destroy(void* event); WP_API void cuda_event_record(void* event, void* stream); WP_API void cuda_event_synchronize(void* event); WP_API float cuda_event_elapsed_time(void* start_event, void* end_event); WP_API bool cuda_graph_begin_capture(void* context, void* stream, int external); WP_API bool cuda_graph_end_capture(void* context, void* stream, void** graph_ret); WP_API bool cuda_graph_launch(void* graph, void* stream); WP_API bool cuda_graph_destroy(void* context, void* graph); WP_API size_t cuda_compile_program(const char* cuda_src, int arch, const char* include_dir, bool debug, bool verbose, bool verify_fp, bool fast_math, const char* output_file); WP_API void* cuda_load_module(void* context, const char* ptx); WP_API void cuda_unload_module(void* context, void* module); WP_API void* cuda_get_kernel(void* context, void* module, const char* name); WP_API size_t cuda_launch_kernel(void* context, void* kernel, size_t dim, int max_blocks, void** args, void* stream); WP_API void cuda_set_context_restore_policy(bool always_restore); WP_API int cuda_get_context_restore_policy(); WP_API void cuda_graphics_map(void* context, void* resource); WP_API void cuda_graphics_unmap(void* context, void* resource); WP_API void cuda_graphics_device_ptr_and_size(void* context, void* resource, uint64_t* ptr, size_t* size); WP_API void* cuda_graphics_register_gl_buffer(void* context, uint32_t gl_buffer, unsigned int flags); WP_API void cuda_graphics_unregister_resource(void* context, void* resource); // CUDA timing WP_API void cuda_timing_begin(int flags); WP_API int cuda_timing_get_result_count(); WP_API void cuda_timing_end(timing_result_t* results, int size); } // extern "C"
17,463
C
53.236025
222
0.692206
NVIDIA/warp/warp/native/error.cpp
/** Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include <stdarg.h> #include <stdio.h> #include <string.h> namespace wp { static char g_error_buffer[4096] = ""; static bool g_error_output_enabled = true; static FILE* g_error_stream = stderr; const char* get_error_string() { return g_error_buffer; } void set_error_string(const char* fmt, ...) { va_list args; va_start(args, fmt); vsnprintf(g_error_buffer, sizeof(g_error_buffer), fmt, args); if (g_error_output_enabled) { vfprintf(g_error_stream, fmt, args); fputc('\n', g_error_stream); fflush(g_error_stream); } va_end(args); } void append_error_string(const char* fmt, ...) { size_t offset = strlen(g_error_buffer); if (offset + 2 > sizeof(g_error_buffer)) return; g_error_buffer[offset++] = '\n'; va_list args; va_start(args, fmt); vsnprintf(g_error_buffer + offset, sizeof(g_error_buffer) - offset, fmt, args); if (g_error_output_enabled) { vfprintf(g_error_stream, fmt, args); fputc('\n', g_error_stream); fflush(g_error_stream); } va_end(args); } void set_error_output_enabled(bool enable) { g_error_output_enabled = enable; } bool is_error_output_enabled() { return g_error_output_enabled; } } // end of namespace wp
1,706
C++
24.477612
83
0.664127
NVIDIA/warp/warp/native/temp_buffer.h
#pragma once #include "cuda_util.h" #include "warp.h" #include <unordered_map> template <typename T = char> struct ScopedTemporary { ScopedTemporary(void *context, size_t size) : m_context(context), m_buffer(static_cast<T*>(alloc_device(m_context, size * sizeof(T)))) { } ~ScopedTemporary() { free_device(m_context, m_buffer); } T *buffer() const { return m_buffer; } private: void *m_context; T *m_buffer; };
488
C
14.774193
98
0.592213
NVIDIA/warp/warp/native/sort.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include <stddef.h> void radix_sort_reserve(void* context, int n, void** mem_out=NULL, size_t* size_out=NULL); void radix_sort_pairs_host(int* keys, int* values, int n); void radix_sort_pairs_device(void* context, int* keys, int* values, int n);
695
C
45.399997
90
0.765468
NVIDIA/warp/warp/native/builtin.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once // All built-in types and functions. To be compatible with runtime NVRTC compilation // this header must be independently compilable (i.e.: without external SDK headers) // to achieve this we redefine a subset of CRT functions (printf, pow, sin, cos, etc) #include "crt.h" #ifdef _WIN32 #define __restrict__ __restrict #endif #if !defined(__CUDACC__) #define CUDA_CALLABLE #define CUDA_CALLABLE_DEVICE #else #define CUDA_CALLABLE __host__ __device__ #define CUDA_CALLABLE_DEVICE __device__ #endif #ifdef WP_VERIFY_FP #define FP_CHECK 1 #define DO_IF_FPCHECK(X) {X} #define DO_IF_NO_FPCHECK(X) #else #define FP_CHECK 0 #define DO_IF_FPCHECK(X) #define DO_IF_NO_FPCHECK(X) {X} #endif #define RAD_TO_DEG 57.29577951308232087679 #define DEG_TO_RAD 0.01745329251994329577 #if defined(__CUDACC__) && !defined(_MSC_VER) __device__ void __debugbreak() {} #endif namespace wp { // numeric types (used from generated kernels) typedef float float32; typedef double float64; typedef int8_t int8; typedef uint8_t uint8; typedef int16_t int16; typedef uint16_t uint16; typedef int32_t int32; typedef uint32_t uint32; typedef int64_t int64; typedef uint64_t uint64; // matches Python string type for constant strings typedef const char* str; struct half; CUDA_CALLABLE half float_to_half(float x); CUDA_CALLABLE float half_to_float(half x); struct half { CUDA_CALLABLE inline half() : u(0) {} CUDA_CALLABLE inline half(float f) { *this = float_to_half(f); } unsigned short u; CUDA_CALLABLE inline bool operator==(const half& h) const { // Use float32 to get IEEE 754 behavior in case of a NaN return float32(h) == float32(*this); } CUDA_CALLABLE inline bool operator!=(const half& h) const { // Use float32 to get IEEE 754 behavior in case of a NaN return float32(h) != float32(*this); } CUDA_CALLABLE inline bool operator>(const half& h) const { return half_to_float(*this) > half_to_float(h); } CUDA_CALLABLE inline bool operator>=(const half& h) const { return half_to_float(*this) >= half_to_float(h); } CUDA_CALLABLE inline bool operator<(const half& h) const { return half_to_float(*this) < half_to_float(h); } CUDA_CALLABLE inline bool operator<=(const half& h) const { return half_to_float(*this) <= half_to_float(h); } CUDA_CALLABLE inline bool operator!() const { return float32(*this) == 0; } CUDA_CALLABLE inline half operator*=(const half& h) { half prod = half(float32(*this) * float32(h)); this->u = prod.u; return *this; } CUDA_CALLABLE inline half operator/=(const half& h) { half quot = half(float32(*this) / float32(h)); this->u = quot.u; return *this; } CUDA_CALLABLE inline half operator+=(const half& h) { half sum = half(float32(*this) + float32(h)); this->u = sum.u; return *this; } CUDA_CALLABLE inline half operator-=(const half& h) { half diff = half(float32(*this) - float32(h)); this->u = diff.u; return *this; } CUDA_CALLABLE inline operator float32() const { return float32(half_to_float(*this)); } CUDA_CALLABLE inline operator float64() const { return float64(half_to_float(*this)); } CUDA_CALLABLE inline operator int8() const { return int8(half_to_float(*this)); } CUDA_CALLABLE inline operator uint8() const { return uint8(half_to_float(*this)); } CUDA_CALLABLE inline operator int16() const { return int16(half_to_float(*this)); } CUDA_CALLABLE inline operator uint16() const { return uint16(half_to_float(*this)); } CUDA_CALLABLE inline operator int32() const { return int32(half_to_float(*this)); } CUDA_CALLABLE inline operator uint32() const { return uint32(half_to_float(*this)); } CUDA_CALLABLE inline operator int64() const { return int64(half_to_float(*this)); } CUDA_CALLABLE inline operator uint64() const { return uint64(half_to_float(*this)); } }; static_assert(sizeof(half) == 2, "Size of half / float16 type must be 2-bytes"); typedef half float16; #if defined(__CUDA_ARCH__) CUDA_CALLABLE inline half float_to_half(float x) { half h; asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(h.u) : "f"(x)); return h; } CUDA_CALLABLE inline float half_to_float(half x) { float val; asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(x.u)); return val; } #elif defined(__clang__) // _Float16 is Clang's native half-precision floating-point type inline half float_to_half(float x) { _Float16 f16 = static_cast<_Float16>(x); return *reinterpret_cast<half*>(&f16); } inline float half_to_float(half h) { _Float16 f16 = *reinterpret_cast<_Float16*>(&h); return static_cast<float>(f16); } #else // Native C++ for Warp builtins outside of kernels extern "C" WP_API uint16_t float_to_half_bits(float x); extern "C" WP_API float half_bits_to_float(uint16_t u); inline half float_to_half(float x) { half h; h.u = float_to_half_bits(x); return h; } inline float half_to_float(half h) { return half_bits_to_float(h.u); } #endif // BAD operator implementations for fp16 arithmetic... // negation: inline CUDA_CALLABLE half operator - (half a) { return float_to_half( -half_to_float(a) ); } inline CUDA_CALLABLE half operator + (half a,half b) { return float_to_half( half_to_float(a) + half_to_float(b) ); } inline CUDA_CALLABLE half operator - (half a,half b) { return float_to_half( half_to_float(a) - half_to_float(b) ); } inline CUDA_CALLABLE half operator * (half a,half b) { return float_to_half( half_to_float(a) * half_to_float(b) ); } inline CUDA_CALLABLE half operator * (half a,double b) { return float_to_half( half_to_float(a) * b ); } inline CUDA_CALLABLE half operator * (double a,half b) { return float_to_half( a * half_to_float(b) ); } inline CUDA_CALLABLE half operator / (half a,half b) { return float_to_half( half_to_float(a) / half_to_float(b) ); } template <typename T> CUDA_CALLABLE float cast_float(T x) { return (float)(x); } template <typename T> CUDA_CALLABLE int cast_int(T x) { return (int)(x); } template <typename T> CUDA_CALLABLE void adj_cast_float(T x, T& adj_x, float adj_ret) { adj_x += T(adj_ret); } template <typename T> CUDA_CALLABLE void adj_cast_int(T x, T& adj_x, int adj_ret) { adj_x += adj_ret; } template <typename T> CUDA_CALLABLE inline void adj_int8(T, T&, int8) {} template <typename T> CUDA_CALLABLE inline void adj_uint8(T, T&, uint8) {} template <typename T> CUDA_CALLABLE inline void adj_int16(T, T&, int16) {} template <typename T> CUDA_CALLABLE inline void adj_uint16(T, T&, uint16) {} template <typename T> CUDA_CALLABLE inline void adj_int32(T, T&, int32) {} template <typename T> CUDA_CALLABLE inline void adj_uint32(T, T&, uint32) {} template <typename T> CUDA_CALLABLE inline void adj_int64(T, T&, int64) {} template <typename T> CUDA_CALLABLE inline void adj_uint64(T, T&, uint64) {} template <typename T> CUDA_CALLABLE inline void adj_float16(T x, T& adj_x, float16 adj_ret) { adj_x += T(adj_ret); } template <typename T> CUDA_CALLABLE inline void adj_float32(T x, T& adj_x, float32 adj_ret) { adj_x += T(adj_ret); } template <typename T> CUDA_CALLABLE inline void adj_float64(T x, T& adj_x, float64 adj_ret) { adj_x += T(adj_ret); } #define kEps 0.0f // basic ops for integer types #define DECLARE_INT_OPS(T) \ inline CUDA_CALLABLE T mul(T a, T b) { return a*b; } \ inline CUDA_CALLABLE T div(T a, T b) { return a/b; } \ inline CUDA_CALLABLE T add(T a, T b) { return a+b; } \ inline CUDA_CALLABLE T sub(T a, T b) { return a-b; } \ inline CUDA_CALLABLE T mod(T a, T b) { return a%b; } \ inline CUDA_CALLABLE T min(T a, T b) { return a<b?a:b; } \ inline CUDA_CALLABLE T max(T a, T b) { return a>b?a:b; } \ inline CUDA_CALLABLE T clamp(T x, T a, T b) { return min(max(a, x), b); } \ inline CUDA_CALLABLE T floordiv(T a, T b) { return a/b; } \ inline CUDA_CALLABLE T nonzero(T x) { return x == T(0) ? T(0) : T(1); } \ inline CUDA_CALLABLE T sqrt(T x) { return 0; } \ inline CUDA_CALLABLE T bit_and(T a, T b) { return a&b; } \ inline CUDA_CALLABLE T bit_or(T a, T b) { return a|b; } \ inline CUDA_CALLABLE T bit_xor(T a, T b) { return a^b; } \ inline CUDA_CALLABLE T lshift(T a, T b) { return a<<b; } \ inline CUDA_CALLABLE T rshift(T a, T b) { return a>>b; } \ inline CUDA_CALLABLE T invert(T x) { return ~x; } \ inline CUDA_CALLABLE bool isfinite(T x) { return ::isfinite(double(x)); } \ inline CUDA_CALLABLE bool isnan(T x) { return ::isnan(double(x)); } \ inline CUDA_CALLABLE bool isinf(T x) { return ::isinf(double(x)); } \ inline CUDA_CALLABLE void adj_mul(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_div(T a, T b, T ret, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_add(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_sub(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_mod(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_min(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_max(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_abs(T x, T adj_x, T& adj_ret) { } \ inline CUDA_CALLABLE void adj_sign(T x, T adj_x, T& adj_ret) { } \ inline CUDA_CALLABLE void adj_clamp(T x, T a, T b, T& adj_x, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_floordiv(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_step(T x, T& adj_x, T adj_ret) { } \ inline CUDA_CALLABLE void adj_nonzero(T x, T& adj_x, T adj_ret) { } \ inline CUDA_CALLABLE void adj_sqrt(T x, T adj_x, T& adj_ret) { } \ inline CUDA_CALLABLE void adj_bit_and(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_bit_or(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_bit_xor(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_lshift(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_rshift(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_invert(T x, T adj_x, T& adj_ret) { } \ inline CUDA_CALLABLE void adj_isnan(const T&, T&, bool) { } \ inline CUDA_CALLABLE void adj_isinf(const T&, T&, bool) { } \ inline CUDA_CALLABLE void adj_isfinite(const T&, T&, bool) { } inline CUDA_CALLABLE int8 abs(int8 x) { return ::abs(x); } inline CUDA_CALLABLE int16 abs(int16 x) { return ::abs(x); } inline CUDA_CALLABLE int32 abs(int32 x) { return ::abs(x); } inline CUDA_CALLABLE int64 abs(int64 x) { return ::llabs(x); } inline CUDA_CALLABLE uint8 abs(uint8 x) { return x; } inline CUDA_CALLABLE uint16 abs(uint16 x) { return x; } inline CUDA_CALLABLE uint32 abs(uint32 x) { return x; } inline CUDA_CALLABLE uint64 abs(uint64 x) { return x; } DECLARE_INT_OPS(int8) DECLARE_INT_OPS(int16) DECLARE_INT_OPS(int32) DECLARE_INT_OPS(int64) DECLARE_INT_OPS(uint8) DECLARE_INT_OPS(uint16) DECLARE_INT_OPS(uint32) DECLARE_INT_OPS(uint64) inline CUDA_CALLABLE int8 step(int8 x) { return x < 0 ? 1 : 0; } inline CUDA_CALLABLE int16 step(int16 x) { return x < 0 ? 1 : 0; } inline CUDA_CALLABLE int32 step(int32 x) { return x < 0 ? 1 : 0; } inline CUDA_CALLABLE int64 step(int64 x) { return x < 0 ? 1 : 0; } inline CUDA_CALLABLE uint8 step(uint8 x) { return 0; } inline CUDA_CALLABLE uint16 step(uint16 x) { return 0; } inline CUDA_CALLABLE uint32 step(uint32 x) { return 0; } inline CUDA_CALLABLE uint64 step(uint64 x) { return 0; } inline CUDA_CALLABLE int8 sign(int8 x) { return x < 0 ? -1 : 1; } inline CUDA_CALLABLE int8 sign(int16 x) { return x < 0 ? -1 : 1; } inline CUDA_CALLABLE int8 sign(int32 x) { return x < 0 ? -1 : 1; } inline CUDA_CALLABLE int8 sign(int64 x) { return x < 0 ? -1 : 1; } inline CUDA_CALLABLE uint8 sign(uint8 x) { return 1; } inline CUDA_CALLABLE uint16 sign(uint16 x) { return 1; } inline CUDA_CALLABLE uint32 sign(uint32 x) { return 1; } inline CUDA_CALLABLE uint64 sign(uint64 x) { return 1; } // Catch-all for non-float, non-integer types template<typename T> inline bool CUDA_CALLABLE isfinite(const T&) { return true; } inline bool CUDA_CALLABLE isfinite(half x) { return ::isfinite(float(x)); } inline bool CUDA_CALLABLE isfinite(float x) { return ::isfinite(x); } inline bool CUDA_CALLABLE isfinite(double x) { return ::isfinite(x); } inline bool CUDA_CALLABLE isnan(half x) { return ::isnan(float(x)); } inline bool CUDA_CALLABLE isnan(float x) { return ::isnan(x); } inline bool CUDA_CALLABLE isnan(double x) { return ::isnan(x); } inline bool CUDA_CALLABLE isinf(half x) { return ::isinf(float(x)); } inline bool CUDA_CALLABLE isinf(float x) { return ::isinf(x); } inline bool CUDA_CALLABLE isinf(double x) { return ::isinf(x); } template<typename T> inline CUDA_CALLABLE void print(const T&) { printf("<type without print implementation>\n"); } inline CUDA_CALLABLE void print(float16 f) { printf("%g\n", half_to_float(f)); } inline CUDA_CALLABLE void print(float f) { printf("%g\n", f); } inline CUDA_CALLABLE void print(double f) { printf("%g\n", f); } // basic ops for float types #define DECLARE_FLOAT_OPS(T) \ inline CUDA_CALLABLE T mul(T a, T b) { return a*b; } \ inline CUDA_CALLABLE T add(T a, T b) { return a+b; } \ inline CUDA_CALLABLE T sub(T a, T b) { return a-b; } \ inline CUDA_CALLABLE T min(T a, T b) { return a<b?a:b; } \ inline CUDA_CALLABLE T max(T a, T b) { return a>b?a:b; } \ inline CUDA_CALLABLE T sign(T x) { return x < T(0) ? -1 : 1; } \ inline CUDA_CALLABLE T step(T x) { return x < T(0) ? T(1) : T(0); }\ inline CUDA_CALLABLE T nonzero(T x) { return x == T(0) ? T(0) : T(1); }\ inline CUDA_CALLABLE T clamp(T x, T a, T b) { return min(max(a, x), b); }\ inline CUDA_CALLABLE void adj_abs(T x, T& adj_x, T adj_ret) \ {\ if (x < T(0))\ adj_x -= adj_ret;\ else\ adj_x += adj_ret;\ }\ inline CUDA_CALLABLE void adj_mul(T a, T b, T& adj_a, T& adj_b, T adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; } \ inline CUDA_CALLABLE void adj_add(T a, T b, T& adj_a, T& adj_b, T adj_ret) { adj_a += adj_ret; adj_b += adj_ret; } \ inline CUDA_CALLABLE void adj_sub(T a, T b, T& adj_a, T& adj_b, T adj_ret) { adj_a += adj_ret; adj_b -= adj_ret; } \ inline CUDA_CALLABLE void adj_min(T a, T b, T& adj_a, T& adj_b, T adj_ret) \ { \ if (a < b) \ adj_a += adj_ret; \ else \ adj_b += adj_ret; \ } \ inline CUDA_CALLABLE void adj_max(T a, T b, T& adj_a, T& adj_b, T adj_ret) \ { \ if (a > b) \ adj_a += adj_ret; \ else \ adj_b += adj_ret; \ } \ inline CUDA_CALLABLE void adj_floordiv(T a, T b, T& adj_a, T& adj_b, T adj_ret) { } \ inline CUDA_CALLABLE void adj_mod(T a, T b, T& adj_a, T& adj_b, T adj_ret){ adj_a += adj_ret; }\ inline CUDA_CALLABLE void adj_sign(T x, T adj_x, T& adj_ret) { }\ inline CUDA_CALLABLE void adj_step(T x, T& adj_x, T adj_ret) { }\ inline CUDA_CALLABLE void adj_nonzero(T x, T& adj_x, T adj_ret) { }\ inline CUDA_CALLABLE void adj_clamp(T x, T a, T b, T& adj_x, T& adj_a, T& adj_b, T adj_ret)\ {\ if (x < a)\ adj_a += adj_ret;\ else if (x > b)\ adj_b += adj_ret;\ else\ adj_x += adj_ret;\ }\ inline CUDA_CALLABLE T div(T a, T b)\ {\ DO_IF_FPCHECK(\ if (!isfinite(a) || !isfinite(b) || b == T(0))\ {\ printf("%s:%d div(%f, %f)\n", __FILE__, __LINE__, float(a), float(b));\ assert(0);\ })\ return a/b;\ }\ inline CUDA_CALLABLE void adj_div(T a, T b, T ret, T& adj_a, T& adj_b, T adj_ret)\ {\ adj_a += adj_ret/b;\ adj_b -= adj_ret*(ret)/b;\ DO_IF_FPCHECK(\ if (!isfinite(adj_a) || !isfinite(adj_b))\ {\ printf("%s:%d - adj_div(%f, %f, %f, %f, %f)\n", __FILE__, __LINE__, float(a), float(b), float(adj_a), float(adj_b), float(adj_ret));\ assert(0);\ })\ }\ inline CUDA_CALLABLE void adj_isnan(const T&, T&, bool) { }\ inline CUDA_CALLABLE void adj_isinf(const T&, T&, bool) { }\ inline CUDA_CALLABLE void adj_isfinite(const T&, T&, bool) { } DECLARE_FLOAT_OPS(float16) DECLARE_FLOAT_OPS(float32) DECLARE_FLOAT_OPS(float64) // basic ops for float types inline CUDA_CALLABLE float16 mod(float16 a, float16 b) { #if FP_CHECK if (!isfinite(a) || !isfinite(b) || float(b) == 0.0f) { printf("%s:%d mod(%f, %f)\n", __FILE__, __LINE__, float(a), float(b)); assert(0); } #endif return fmodf(float(a), float(b)); } inline CUDA_CALLABLE float32 mod(float32 a, float32 b) { #if FP_CHECK if (!isfinite(a) || !isfinite(b) || b == 0.0f) { printf("%s:%d mod(%f, %f)\n", __FILE__, __LINE__, a, b); assert(0); } #endif return fmodf(a, b); } inline CUDA_CALLABLE double mod(double a, double b) { #if FP_CHECK if (!isfinite(a) || !isfinite(b) || b == 0.0f) { printf("%s:%d mod(%f, %f)\n", __FILE__, __LINE__, a, b); assert(0); } #endif return fmod(a, b); } inline CUDA_CALLABLE half log(half a) { #if FP_CHECK if (!isfinite(a) || float(a) < 0.0f) { printf("%s:%d log(%f)\n", __FILE__, __LINE__, float(a)); assert(0); } #endif return ::logf(a); } inline CUDA_CALLABLE float log(float a) { #if FP_CHECK if (!isfinite(a) || a < 0.0f) { printf("%s:%d log(%f)\n", __FILE__, __LINE__, a); assert(0); } #endif return ::logf(a); } inline CUDA_CALLABLE double log(double a) { #if FP_CHECK if (!isfinite(a) || a < 0.0) { printf("%s:%d log(%f)\n", __FILE__, __LINE__, a); assert(0); } #endif return ::log(a); } inline CUDA_CALLABLE half log2(half a) { #if FP_CHECK if (!isfinite(a) || float(a) < 0.0f) { printf("%s:%d log2(%f)\n", __FILE__, __LINE__, float(a)); assert(0); } #endif return ::log2f(float(a)); } inline CUDA_CALLABLE float log2(float a) { #if FP_CHECK if (!isfinite(a) || a < 0.0f) { printf("%s:%d log2(%f)\n", __FILE__, __LINE__, a); assert(0); } #endif return ::log2f(a); } inline CUDA_CALLABLE double log2(double a) { #if FP_CHECK if (!isfinite(a) || a < 0.0) { printf("%s:%d log2(%f)\n", __FILE__, __LINE__, a); assert(0); } #endif return ::log2(a); } inline CUDA_CALLABLE half log10(half a) { #if FP_CHECK if (!isfinite(a) || float(a) < 0.0f) { printf("%s:%d log10(%f)\n", __FILE__, __LINE__, float(a)); assert(0); } #endif return ::log10f(float(a)); } inline CUDA_CALLABLE float log10(float a) { #if FP_CHECK if (!isfinite(a) || a < 0.0f) { printf("%s:%d log10(%f)\n", __FILE__, __LINE__, a); assert(0); } #endif return ::log10f(a); } inline CUDA_CALLABLE double log10(double a) { #if FP_CHECK if (!isfinite(a) || a < 0.0) { printf("%s:%d log10(%f)\n", __FILE__, __LINE__, a); assert(0); } #endif return ::log10(a); } inline CUDA_CALLABLE half exp(half a) { half result = ::expf(float(a)); #if FP_CHECK if (!isfinite(a) || !isfinite(result)) { printf("%s:%d exp(%f) = %f\n", __FILE__, __LINE__, float(a), float(result)); assert(0); } #endif return result; } inline CUDA_CALLABLE float exp(float a) { float result = ::expf(a); #if FP_CHECK if (!isfinite(a) || !isfinite(result)) { printf("%s:%d exp(%f) = %f\n", __FILE__, __LINE__, a, result); assert(0); } #endif return result; } inline CUDA_CALLABLE double exp(double a) { double result = ::exp(a); #if FP_CHECK if (!isfinite(a) || !isfinite(result)) { printf("%s:%d exp(%f) = %f\n", __FILE__, __LINE__, a, result); assert(0); } #endif return result; } inline CUDA_CALLABLE half pow(half a, half b) { float result = ::powf(float(a), float(b)); #if FP_CHECK if (!isfinite(float(a)) || !isfinite(float(b)) || !isfinite(result)) { printf("%s:%d pow(%f, %f) = %f\n", __FILE__, __LINE__, float(a), float(b), result); assert(0); } #endif return result; } inline CUDA_CALLABLE float pow(float a, float b) { float result = ::powf(a, b); #if FP_CHECK if (!isfinite(a) || !isfinite(b) || !isfinite(result)) { printf("%s:%d pow(%f, %f) = %f\n", __FILE__, __LINE__, a, b, result); assert(0); } #endif return result; } inline CUDA_CALLABLE double pow(double a, double b) { double result = ::pow(a, b); #if FP_CHECK if (!isfinite(a) || !isfinite(b) || !isfinite(result)) { printf("%s:%d pow(%f, %f) = %f\n", __FILE__, __LINE__, a, b, result); assert(0); } #endif return result; } inline CUDA_CALLABLE half floordiv(half a, half b) { #if FP_CHECK if (!isfinite(a) || !isfinite(b) || float(b) == 0.0f) { printf("%s:%d mod(%f, %f)\n", __FILE__, __LINE__, float(a), float(b)); assert(0); } #endif return floorf(float(a/b)); } inline CUDA_CALLABLE float floordiv(float a, float b) { #if FP_CHECK if (!isfinite(a) || !isfinite(b) || b == 0.0f) { printf("%s:%d mod(%f, %f)\n", __FILE__, __LINE__, a, b); assert(0); } #endif return floorf(a/b); } inline CUDA_CALLABLE double floordiv(double a, double b) { #if FP_CHECK if (!isfinite(a) || !isfinite(b) || b == 0.0) { printf("%s:%d mod(%f, %f)\n", __FILE__, __LINE__, a, b); assert(0); } #endif return ::floor(a/b); } inline CUDA_CALLABLE float leaky_min(float a, float b, float r) { return min(a, b); } inline CUDA_CALLABLE float leaky_max(float a, float b, float r) { return max(a, b); } inline CUDA_CALLABLE half abs(half x) { return ::fabsf(float(x)); } inline CUDA_CALLABLE float abs(float x) { return ::fabsf(x); } inline CUDA_CALLABLE double abs(double x) { return ::fabs(x); } inline CUDA_CALLABLE float acos(float x){ return ::acosf(min(max(x, -1.0f), 1.0f)); } inline CUDA_CALLABLE float asin(float x){ return ::asinf(min(max(x, -1.0f), 1.0f)); } inline CUDA_CALLABLE float atan(float x) { return ::atanf(x); } inline CUDA_CALLABLE float atan2(float y, float x) { return ::atan2f(y, x); } inline CUDA_CALLABLE float sin(float x) { return ::sinf(x); } inline CUDA_CALLABLE float cos(float x) { return ::cosf(x); } inline CUDA_CALLABLE double acos(double x){ return ::acos(min(max(x, -1.0), 1.0)); } inline CUDA_CALLABLE double asin(double x){ return ::asin(min(max(x, -1.0), 1.0)); } inline CUDA_CALLABLE double atan(double x) { return ::atan(x); } inline CUDA_CALLABLE double atan2(double y, double x) { return ::atan2(y, x); } inline CUDA_CALLABLE double sin(double x) { return ::sin(x); } inline CUDA_CALLABLE double cos(double x) { return ::cos(x); } inline CUDA_CALLABLE half acos(half x){ return ::acosf(min(max(float(x), -1.0f), 1.0f)); } inline CUDA_CALLABLE half asin(half x){ return ::asinf(min(max(float(x), -1.0f), 1.0f)); } inline CUDA_CALLABLE half atan(half x) { return ::atanf(float(x)); } inline CUDA_CALLABLE half atan2(half y, half x) { return ::atan2f(float(y), float(x)); } inline CUDA_CALLABLE half sin(half x) { return ::sinf(float(x)); } inline CUDA_CALLABLE half cos(half x) { return ::cosf(float(x)); } inline CUDA_CALLABLE float sqrt(float x) { #if FP_CHECK if (x < 0.0f) { printf("%s:%d sqrt(%f)\n", __FILE__, __LINE__, x); assert(0); } #endif return ::sqrtf(x); } inline CUDA_CALLABLE double sqrt(double x) { #if FP_CHECK if (x < 0.0) { printf("%s:%d sqrt(%f)\n", __FILE__, __LINE__, x); assert(0); } #endif return ::sqrt(x); } inline CUDA_CALLABLE half sqrt(half x) { #if FP_CHECK if (float(x) < 0.0f) { printf("%s:%d sqrt(%f)\n", __FILE__, __LINE__, float(x)); assert(0); } #endif return ::sqrtf(float(x)); } inline CUDA_CALLABLE float cbrt(float x) { return ::cbrtf(x); } inline CUDA_CALLABLE double cbrt(double x) { return ::cbrt(x); } inline CUDA_CALLABLE half cbrt(half x) { return ::cbrtf(float(x)); } inline CUDA_CALLABLE float tan(float x) { return ::tanf(x); } inline CUDA_CALLABLE float sinh(float x) { return ::sinhf(x);} inline CUDA_CALLABLE float cosh(float x) { return ::coshf(x);} inline CUDA_CALLABLE float tanh(float x) { return ::tanhf(x);} inline CUDA_CALLABLE float degrees(float x) { return x * RAD_TO_DEG;} inline CUDA_CALLABLE float radians(float x) { return x * DEG_TO_RAD;} inline CUDA_CALLABLE double tan(double x) { return ::tan(x); } inline CUDA_CALLABLE double sinh(double x) { return ::sinh(x);} inline CUDA_CALLABLE double cosh(double x) { return ::cosh(x);} inline CUDA_CALLABLE double tanh(double x) { return ::tanh(x);} inline CUDA_CALLABLE double degrees(double x) { return x * RAD_TO_DEG;} inline CUDA_CALLABLE double radians(double x) { return x * DEG_TO_RAD;} inline CUDA_CALLABLE half tan(half x) { return ::tanf(float(x)); } inline CUDA_CALLABLE half sinh(half x) { return ::sinhf(float(x));} inline CUDA_CALLABLE half cosh(half x) { return ::coshf(float(x));} inline CUDA_CALLABLE half tanh(half x) { return ::tanhf(float(x));} inline CUDA_CALLABLE half degrees(half x) { return x * RAD_TO_DEG;} inline CUDA_CALLABLE half radians(half x) { return x * DEG_TO_RAD;} inline CUDA_CALLABLE float round(float x) { return ::roundf(x); } inline CUDA_CALLABLE float rint(float x) { return ::rintf(x); } inline CUDA_CALLABLE float trunc(float x) { return ::truncf(x); } inline CUDA_CALLABLE float floor(float x) { return ::floorf(x); } inline CUDA_CALLABLE float ceil(float x) { return ::ceilf(x); } inline CUDA_CALLABLE float frac(float x) { return x - trunc(x); } inline CUDA_CALLABLE double round(double x) { return ::round(x); } inline CUDA_CALLABLE double rint(double x) { return ::rint(x); } inline CUDA_CALLABLE double trunc(double x) { return ::trunc(x); } inline CUDA_CALLABLE double floor(double x) { return ::floor(x); } inline CUDA_CALLABLE double ceil(double x) { return ::ceil(x); } inline CUDA_CALLABLE double frac(double x) { return x - trunc(x); } inline CUDA_CALLABLE half round(half x) { return ::roundf(float(x)); } inline CUDA_CALLABLE half rint(half x) { return ::rintf(float(x)); } inline CUDA_CALLABLE half trunc(half x) { return ::truncf(float(x)); } inline CUDA_CALLABLE half floor(half x) { return ::floorf(float(x)); } inline CUDA_CALLABLE half ceil(half x) { return ::ceilf(float(x)); } inline CUDA_CALLABLE half frac(half x) { return float(x) - trunc(float(x)); } #define DECLARE_ADJOINTS(T)\ inline CUDA_CALLABLE void adj_log(T a, T& adj_a, T adj_ret)\ {\ adj_a += (T(1)/a)*adj_ret;\ DO_IF_FPCHECK(if (!isfinite(adj_a))\ {\ printf("%s:%d - adj_log(%f, %f, %f)\n", __FILE__, __LINE__, float(a), float(adj_a), float(adj_ret));\ assert(0);\ })\ }\ inline CUDA_CALLABLE void adj_log2(T a, T& adj_a, T adj_ret)\ { \ adj_a += (T(1)/a)*(T(1)/log(T(2)))*adj_ret; \ DO_IF_FPCHECK(if (!isfinite(adj_a))\ {\ printf("%s:%d - adj_log2(%f, %f, %f)\n", __FILE__, __LINE__, float(a), float(adj_a), float(adj_ret));\ assert(0);\ }) \ }\ inline CUDA_CALLABLE void adj_log10(T a, T& adj_a, T adj_ret)\ {\ adj_a += (T(1)/a)*(T(1)/log(T(10)))*adj_ret; \ DO_IF_FPCHECK(if (!isfinite(adj_a))\ {\ printf("%s:%d - adj_log10(%f, %f, %f)\n", __FILE__, __LINE__, float(a), float(adj_a), float(adj_ret));\ assert(0);\ })\ }\ inline CUDA_CALLABLE void adj_exp(T a, T ret, T& adj_a, T adj_ret) { adj_a += ret*adj_ret; }\ inline CUDA_CALLABLE void adj_pow(T a, T b, T ret, T& adj_a, T& adj_b, T adj_ret)\ { \ adj_a += b*pow(a, b-T(1))*adj_ret;\ adj_b += log(a)*ret*adj_ret;\ DO_IF_FPCHECK(if (!isfinite(adj_a) || !isfinite(adj_b))\ {\ printf("%s:%d - adj_pow(%f, %f, %f, %f, %f)\n", __FILE__, __LINE__, float(a), float(b), float(adj_a), float(adj_b), float(adj_ret));\ assert(0);\ })\ }\ inline CUDA_CALLABLE void adj_leaky_min(T a, T b, T r, T& adj_a, T& adj_b, T& adj_r, T adj_ret)\ {\ if (a < b)\ adj_a += adj_ret;\ else\ {\ adj_a += r*adj_ret;\ adj_b += adj_ret;\ }\ }\ inline CUDA_CALLABLE void adj_leaky_max(T a, T b, T r, T& adj_a, T& adj_b, T& adj_r, T adj_ret)\ {\ if (a > b)\ adj_a += adj_ret;\ else\ {\ adj_a += r*adj_ret;\ adj_b += adj_ret;\ }\ }\ inline CUDA_CALLABLE void adj_acos(T x, T& adj_x, T adj_ret)\ {\ T d = sqrt(T(1)-x*x);\ DO_IF_FPCHECK(adj_x -= (T(1)/d)*adj_ret;\ if (!isfinite(d) || !isfinite(adj_x))\ {\ printf("%s:%d - adj_acos(%f, %f, %f)\n", __FILE__, __LINE__, float(x), float(adj_x), float(adj_ret)); \ assert(0);\ })\ DO_IF_NO_FPCHECK(if (d > T(0))\ adj_x -= (T(1)/d)*adj_ret;)\ }\ inline CUDA_CALLABLE void adj_asin(T x, T& adj_x, T adj_ret)\ {\ T d = sqrt(T(1)-x*x);\ DO_IF_FPCHECK(adj_x += (T(1)/d)*adj_ret;\ if (!isfinite(d) || !isfinite(adj_x))\ {\ printf("%s:%d - adj_asin(%f, %f, %f)\n", __FILE__, __LINE__, float(x), float(adj_x), float(adj_ret)); \ assert(0);\ })\ DO_IF_NO_FPCHECK(if (d > T(0))\ adj_x += (T(1)/d)*adj_ret;)\ }\ inline CUDA_CALLABLE void adj_tan(T x, T& adj_x, T adj_ret)\ {\ T cos_x = cos(x);\ DO_IF_FPCHECK(adj_x += (T(1)/(cos_x*cos_x))*adj_ret;\ if (!isfinite(adj_x) || cos_x == T(0))\ {\ printf("%s:%d - adj_tan(%f, %f, %f)\n", __FILE__, __LINE__, float(x), float(adj_x), float(adj_ret));\ assert(0);\ })\ DO_IF_NO_FPCHECK(if (cos_x != T(0))\ adj_x += (T(1)/(cos_x*cos_x))*adj_ret;)\ }\ inline CUDA_CALLABLE void adj_atan(T x, T& adj_x, T adj_ret)\ {\ adj_x += adj_ret /(x*x + T(1));\ }\ inline CUDA_CALLABLE void adj_atan2(T y, T x, T& adj_y, T& adj_x, T adj_ret)\ {\ T d = x*x + y*y;\ DO_IF_FPCHECK(adj_x -= y/d*adj_ret;\ adj_y += x/d*adj_ret;\ if (!isfinite(adj_x) || !isfinite(adj_y) || d == T(0))\ {\ printf("%s:%d - adj_atan2(%f, %f, %f, %f, %f)\n", __FILE__, __LINE__, float(y), float(x), float(adj_y), float(adj_x), float(adj_ret));\ assert(0);\ })\ DO_IF_NO_FPCHECK(if (d > T(0))\ {\ adj_x -= (y/d)*adj_ret;\ adj_y += (x/d)*adj_ret;\ })\ }\ inline CUDA_CALLABLE void adj_sin(T x, T& adj_x, T adj_ret)\ {\ adj_x += cos(x)*adj_ret;\ }\ inline CUDA_CALLABLE void adj_cos(T x, T& adj_x, T adj_ret)\ {\ adj_x -= sin(x)*adj_ret;\ }\ inline CUDA_CALLABLE void adj_sinh(T x, T& adj_x, T adj_ret)\ {\ adj_x += cosh(x)*adj_ret;\ }\ inline CUDA_CALLABLE void adj_cosh(T x, T& adj_x, T adj_ret)\ {\ adj_x += sinh(x)*adj_ret;\ }\ inline CUDA_CALLABLE void adj_tanh(T x, T ret, T& adj_x, T adj_ret)\ {\ adj_x += (T(1) - ret*ret)*adj_ret;\ }\ inline CUDA_CALLABLE void adj_sqrt(T x, T ret, T& adj_x, T adj_ret)\ {\ adj_x += T(0.5)*(T(1)/ret)*adj_ret;\ DO_IF_FPCHECK(if (!isfinite(adj_x))\ {\ printf("%s:%d - adj_sqrt(%f, %f, %f)\n", __FILE__, __LINE__, float(x), float(adj_x), float(adj_ret));\ assert(0);\ })\ }\ inline CUDA_CALLABLE void adj_cbrt(T x, T ret, T& adj_x, T adj_ret)\ {\ adj_x += (T(1)/T(3))*(T(1)/(ret*ret))*adj_ret;\ DO_IF_FPCHECK(if (!isfinite(adj_x))\ {\ printf("%s:%d - adj_cbrt(%f, %f, %f)\n", __FILE__, __LINE__, float(x), float(adj_x), float(adj_ret));\ assert(0);\ })\ }\ inline CUDA_CALLABLE void adj_degrees(T x, T& adj_x, T adj_ret)\ {\ adj_x += RAD_TO_DEG * adj_ret;\ }\ inline CUDA_CALLABLE void adj_radians(T x, T& adj_x, T adj_ret)\ {\ adj_x += DEG_TO_RAD * adj_ret;\ }\ inline CUDA_CALLABLE void adj_round(T x, T& adj_x, T adj_ret){ }\ inline CUDA_CALLABLE void adj_rint(T x, T& adj_x, T adj_ret){ }\ inline CUDA_CALLABLE void adj_trunc(T x, T& adj_x, T adj_ret){ }\ inline CUDA_CALLABLE void adj_floor(T x, T& adj_x, T adj_ret){ }\ inline CUDA_CALLABLE void adj_ceil(T x, T& adj_x, T adj_ret){ }\ inline CUDA_CALLABLE void adj_frac(T x, T& adj_x, T adj_ret){ } DECLARE_ADJOINTS(float16) DECLARE_ADJOINTS(float32) DECLARE_ADJOINTS(float64) template <typename C, typename T> CUDA_CALLABLE inline T select(const C& cond, const T& a, const T& b) { // The double NOT operator !! casts to bool without compiler warnings. return (!!cond) ? b : a; } template <typename C, typename T> CUDA_CALLABLE inline void adj_select(const C& cond, const T& a, const T& b, C& adj_cond, T& adj_a, T& adj_b, const T& adj_ret) { // The double NOT operator !! casts to bool without compiler warnings. if (!!cond) adj_b += adj_ret; else adj_a += adj_ret; } template <typename T> CUDA_CALLABLE inline T copy(const T& src) { return src; } template <typename T> CUDA_CALLABLE inline void adj_copy(const T& src, T& adj_src, T& adj_dest) { adj_src += adj_dest; adj_dest = T{}; } template <typename T> CUDA_CALLABLE inline void assign(T& dest, const T& src) { dest = src; } template <typename T> CUDA_CALLABLE inline void adj_assign(T& dest, const T& src, T& adj_dest, T& adj_src) { // this is generally a non-differentiable operation since it violates SSA, // except in read-modify-write statements which are reversible through backpropagation adj_src = adj_dest; adj_dest = T{}; } // some helpful operator overloads (just for C++ use, these are not adjointed) template <typename T> CUDA_CALLABLE inline T& operator += (T& a, const T& b) { a = add(a, b); return a; } template <typename T> CUDA_CALLABLE inline T& operator -= (T& a, const T& b) { a = sub(a, b); return a; } template <typename T> CUDA_CALLABLE inline T operator+(const T& a, const T& b) { return add(a, b); } template <typename T> CUDA_CALLABLE inline T operator-(const T& a, const T& b) { return sub(a, b); } template <typename T> CUDA_CALLABLE inline T pos(const T& x) { return x; } template <typename T> CUDA_CALLABLE inline void adj_pos(const T& x, T& adj_x, const T& adj_ret) { adj_x += T(adj_ret); } // unary negation implemented as negative multiply, not sure the fp implications of this // may be better as 0.0 - x? template <typename T> CUDA_CALLABLE inline T neg(const T& x) { return T(0.0) - x; } template <typename T> CUDA_CALLABLE inline void adj_neg(const T& x, T& adj_x, const T& adj_ret) { adj_x += T(-adj_ret); } // unary boolean negation template <typename T> CUDA_CALLABLE inline bool unot(const T& b) { return !b; } template <typename T> CUDA_CALLABLE inline void adj_unot(const T& b, T& adj_b, const bool& adj_ret) { } const int LAUNCH_MAX_DIMS = 4; // should match types.py struct launch_bounds_t { int shape[LAUNCH_MAX_DIMS]; // size of each dimension int ndim; // number of valid dimension size_t size; // total number of threads }; #ifndef __CUDACC__ static size_t s_threadIdx; #endif inline CUDA_CALLABLE size_t grid_index() { #ifdef __CUDACC__ // Need to cast at least one of the variables being multiplied so that type promotion happens before the multiplication size_t grid_index = static_cast<size_t>(blockDim.x) * static_cast<size_t>(blockIdx.x) + static_cast<size_t>(threadIdx.x); return grid_index; #else return s_threadIdx; #endif } inline CUDA_CALLABLE int tid(size_t index) { // For the 1-D tid() we need to warn the user if we're about to provide a truncated index // Only do this in _DEBUG when called from device to avoid excessive register allocation #if defined(_DEBUG) || !defined(__CUDA_ARCH__) if (index > 2147483647) { printf("Warp warning: tid() is returning an overflowed int\n"); } #endif return static_cast<int>(index); } inline CUDA_CALLABLE_DEVICE void tid(int& i, int& j, size_t index, const launch_bounds_t& launch_bounds) { const size_t n = launch_bounds.shape[1]; // convert to work item i = index/n; j = index%n; } inline CUDA_CALLABLE_DEVICE void tid(int& i, int& j, int& k, size_t index, const launch_bounds_t& launch_bounds) { const size_t n = launch_bounds.shape[1]; const size_t o = launch_bounds.shape[2]; // convert to work item i = index/(n*o); j = index%(n*o)/o; k = index%o; } inline CUDA_CALLABLE_DEVICE void tid(int& i, int& j, int& k, int& l, size_t index, const launch_bounds_t& launch_bounds) { const size_t n = launch_bounds.shape[1]; const size_t o = launch_bounds.shape[2]; const size_t p = launch_bounds.shape[3]; // convert to work item i = index/(n*o*p); j = index%(n*o*p)/(o*p); k = index%(o*p)/p; l = index%p; } template<typename T> inline CUDA_CALLABLE T atomic_add(T* buf, T value) { #if !defined(__CUDA_ARCH__) T old = buf[0]; buf[0] += value; return old; #else return atomicAdd(buf, value); #endif } template<> inline CUDA_CALLABLE float16 atomic_add(float16* buf, float16 value) { #if !defined(__CUDA_ARCH__) float16 old = buf[0]; buf[0] += value; return old; #elif defined(__clang__) // CUDA compiled by Clang __half r = atomicAdd(reinterpret_cast<__half*>(buf), *reinterpret_cast<__half*>(&value)); return *reinterpret_cast<float16*>(&r); #else // CUDA compiled by NVRTC //return atomicAdd(buf, value); /* Define __PTR for atomicAdd prototypes below, undef after done */ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) #define __PTR "l" #else #define __PTR "r" #endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ half r = 0.0; #if __CUDA_ARCH__ >= 700 asm volatile ("{ atom.add.noftz.f16 %0,[%1],%2; }\n" : "=h"(r.u) : __PTR(buf), "h"(value.u) : "memory"); #endif return r; #undef __PTR #endif // CUDA compiled by NVRTC } // emulate atomic float max inline CUDA_CALLABLE float atomic_max(float* address, float val) { #if defined(__CUDA_ARCH__) int *address_as_int = (int*)address; int old = *address_as_int, assumed; while (val > __int_as_float(old)) { assumed = old; old = atomicCAS(address_as_int, assumed, __float_as_int(val)); } return __int_as_float(old); #else float old = *address; *address = max(old, val); return old; #endif } // emulate atomic float min/max with atomicCAS() inline CUDA_CALLABLE float atomic_min(float* address, float val) { #if defined(__CUDA_ARCH__) int *address_as_int = (int*)address; int old = *address_as_int, assumed; while (val < __int_as_float(old)) { assumed = old; old = atomicCAS(address_as_int, assumed, __float_as_int(val)); } return __int_as_float(old); #else float old = *address; *address = min(old, val); return old; #endif } inline CUDA_CALLABLE int atomic_max(int* address, int val) { #if defined(__CUDA_ARCH__) return atomicMax(address, val); #else int old = *address; *address = max(old, val); return old; #endif } // atomic int min inline CUDA_CALLABLE int atomic_min(int* address, int val) { #if defined(__CUDA_ARCH__) return atomicMin(address, val); #else int old = *address; *address = min(old, val); return old; #endif } // default behavior for adjoint of atomic min/max operation that accumulates gradients for all elements matching the min/max value template <typename T> CUDA_CALLABLE inline void adj_atomic_minmax(T *addr, T *adj_addr, const T &value, T &adj_value) { if (value == *addr) adj_value += *adj_addr; } // for integral types we do not accumulate gradients CUDA_CALLABLE inline void adj_atomic_minmax(int8* buf, int8* adj_buf, const int8 &value, int8 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(uint8* buf, uint8* adj_buf, const uint8 &value, uint8 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(int16* buf, int16* adj_buf, const int16 &value, int16 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(uint16* buf, uint16* adj_buf, const uint16 &value, uint16 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(int32* buf, int32* adj_buf, const int32 &value, int32 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(uint32* buf, uint32* adj_buf, const uint32 &value, uint32 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(int64* buf, int64* adj_buf, const int64 &value, int64 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(uint64* buf, uint64* adj_buf, const uint64 &value, uint64 &adj_value) { } CUDA_CALLABLE inline void adj_atomic_minmax(bool* buf, bool* adj_buf, const bool &value, bool &adj_value) { } } // namespace wp // bool and printf are defined outside of the wp namespace in crt.h, hence // their adjoint counterparts are also defined in the global namespace. template <typename T> CUDA_CALLABLE inline void adj_bool(T, T&, bool) {} inline CUDA_CALLABLE void adj_printf(const char* fmt, ...) {} #include "vec.h" #include "mat.h" #include "quat.h" #include "spatial.h" #include "intersect.h" #include "intersect_adj.h" //-------------- namespace wp { // dot for scalar types just to make some templates compile for scalar/vector inline CUDA_CALLABLE float dot(float a, float b) { return mul(a, b); } inline CUDA_CALLABLE void adj_dot(float a, float b, float& adj_a, float& adj_b, float adj_ret) { adj_mul(a, b, adj_a, adj_b, adj_ret); } inline CUDA_CALLABLE float tensordot(float a, float b) { return mul(a, b); } #define DECLARE_INTERP_FUNCS(T) \ CUDA_CALLABLE inline T smoothstep(T edge0, T edge1, T x)\ {\ x = clamp((x - edge0) / (edge1 - edge0), T(0), T(1));\ return x * x * (T(3) - T(2) * x);\ }\ CUDA_CALLABLE inline void adj_smoothstep(T edge0, T edge1, T x, T& adj_edge0, T& adj_edge1, T& adj_x, T adj_ret)\ {\ T ab = edge0 - edge1;\ T ax = edge0 - x;\ T bx = edge1 - x;\ T xb = x - edge1;\ \ if (bx / ab >= T(0) || ax / ab <= T(0))\ {\ return;\ }\ \ T ab3 = ab * ab * ab;\ T ab4 = ab3 * ab;\ adj_edge0 += adj_ret * ((T(6) * ax * bx * bx) / ab4);\ adj_edge1 += adj_ret * ((T(6) * ax * ax * xb) / ab4);\ adj_x += adj_ret * ((T(6) * ax * bx ) / ab3);\ }\ CUDA_CALLABLE inline T lerp(const T& a, const T& b, T t)\ {\ return a*(T(1)-t) + b*t;\ }\ CUDA_CALLABLE inline void adj_lerp(const T& a, const T& b, T t, T& adj_a, T& adj_b, T& adj_t, const T& adj_ret)\ {\ adj_a += adj_ret*(T(1)-t);\ adj_b += adj_ret*t;\ adj_t += b*adj_ret - a*adj_ret;\ } DECLARE_INTERP_FUNCS(float16) DECLARE_INTERP_FUNCS(float32) DECLARE_INTERP_FUNCS(float64) inline CUDA_CALLABLE void print(const str s) { printf("%s\n", s); } inline CUDA_CALLABLE void print(int i) { printf("%d\n", i); } inline CUDA_CALLABLE void print(short i) { printf("%hd\n", i); } inline CUDA_CALLABLE void print(long i) { printf("%ld\n", i); } inline CUDA_CALLABLE void print(long long i) { printf("%lld\n", i); } inline CUDA_CALLABLE void print(unsigned i) { printf("%u\n", i); } inline CUDA_CALLABLE void print(unsigned short i) { printf("%hu\n", i); } inline CUDA_CALLABLE void print(unsigned long i) { printf("%lu\n", i); } inline CUDA_CALLABLE void print(unsigned long long i) { printf("%llu\n", i); } template<unsigned Length, typename Type> inline CUDA_CALLABLE void print(vec_t<Length, Type> v) { for( unsigned i=0; i < Length; ++i ) { printf("%g ", float(v[i])); } printf("\n"); } template<typename Type> inline CUDA_CALLABLE void print(quat_t<Type> i) { printf("%g %g %g %g\n", float(i.x), float(i.y), float(i.z), float(i.w)); } template<unsigned Rows,unsigned Cols,typename Type> inline CUDA_CALLABLE void print(const mat_t<Rows,Cols,Type> &m) { for( unsigned i=0; i< Rows; ++i ) { for( unsigned j=0; j< Cols; ++j ) { printf("%g ",float(m.data[i][j])); } printf("\n"); } } template<typename Type> inline CUDA_CALLABLE void print(transform_t<Type> t) { printf("(%g %g %g) (%g %g %g %g)\n", float(t.p[0]), float(t.p[1]), float(t.p[2]), float(t.q.x), float(t.q.y), float(t.q.z), float(t.q.w)); } inline CUDA_CALLABLE void adj_print(int i, int adj_i) { printf("%d adj: %d\n", i, adj_i); } inline CUDA_CALLABLE void adj_print(float f, float adj_f) { printf("%g adj: %g\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(short f, short adj_f) { printf("%hd adj: %hd\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(long f, long adj_f) { printf("%ld adj: %ld\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(long long f, long long adj_f) { printf("%lld adj: %lld\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(unsigned f, unsigned adj_f) { printf("%u adj: %u\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(unsigned short f, unsigned short adj_f) { printf("%hu adj: %hu\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(unsigned long f, unsigned long adj_f) { printf("%lu adj: %lu\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(unsigned long long f, unsigned long long adj_f) { printf("%llu adj: %llu\n", f, adj_f); } inline CUDA_CALLABLE void adj_print(half h, half adj_h) { printf("%g adj: %g\n", half_to_float(h), half_to_float(adj_h)); } inline CUDA_CALLABLE void adj_print(double f, double adj_f) { printf("%g adj: %g\n", f, adj_f); } template<unsigned Length, typename Type> inline CUDA_CALLABLE void adj_print(vec_t<Length, Type> v, vec_t<Length, Type>& adj_v) { printf("%g %g adj: %g %g \n", v[0], v[1], adj_v[0], adj_v[1]); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_print(mat_t<Rows, Cols, Type> m, mat_t<Rows, Cols, Type>& adj_m) { } template<typename Type> inline CUDA_CALLABLE void adj_print(quat_t<Type> q, quat_t<Type>& adj_q) { printf("%g %g %g %g adj: %g %g %g %g\n", q.x, q.y, q.z, q.w, adj_q.x, adj_q.y, adj_q.z, adj_q.w); } template<typename Type> inline CUDA_CALLABLE void adj_print(transform_t<Type> t, transform_t<Type>& adj_t) {} inline CUDA_CALLABLE void adj_print(str t, str& adj_t) {} template <typename T> inline CUDA_CALLABLE void expect_eq(const T& actual, const T& expected) { if (!(actual == expected)) { printf("Error, expect_eq() failed:\n"); printf("\t Expected: "); print(expected); printf("\t Actual: "); print(actual); } } template <typename T> inline CUDA_CALLABLE void adj_expect_eq(const T& a, const T& b, T& adj_a, T& adj_b) { // nop } template <typename T> inline CUDA_CALLABLE void expect_neq(const T& actual, const T& expected) { if (actual == expected) { printf("Error, expect_neq() failed:\n"); printf("\t Expected: "); print(expected); printf("\t Actual: "); print(actual); } } template <typename T> inline CUDA_CALLABLE void adj_expect_neq(const T& a, const T& b, T& adj_a, T& adj_b) { // nop } template <typename T> inline CUDA_CALLABLE void expect_near(const T& actual, const T& expected, const T& tolerance) { if (abs(actual - expected) > tolerance) { printf("Error, expect_near() failed with tolerance "); print(tolerance); printf("\t Expected: "); print(expected); printf("\t Actual: "); print(actual); } } inline CUDA_CALLABLE void expect_near(const vec3& actual, const vec3& expected, const float& tolerance) { const float diff = max(max(abs(actual[0] - expected[0]), abs(actual[1] - expected[1])), abs(actual[2] - expected[2])); if (diff > tolerance) { printf("Error, expect_near() failed with tolerance "); print(tolerance); printf("\t Expected: "); print(expected); printf("\t Actual: "); print(actual); } } template <typename T> inline CUDA_CALLABLE void adj_expect_near(const T& actual, const T& expected, const T& tolerance, T& adj_actual, T& adj_expected, T& adj_tolerance) { // nop } inline CUDA_CALLABLE void adj_expect_near(const vec3& actual, const vec3& expected, float tolerance, vec3& adj_actual, vec3& adj_expected, float adj_tolerance) { // nop } } // namespace wp // include array.h so we have the print, isfinite functions for the inner array types defined #include "array.h" #include "mesh.h" #include "bvh.h" #include "svd.h" #include "hashgrid.h" #include "volume.h" #include "range.h" #include "rand.h" #include "noise.h" #include "matnn.h"
48,739
C
29.386534
174
0.614005
NVIDIA/warp/warp/native/range.h
#pragma once namespace wp { // All iterable types should implement 3 methods: // // T iter_next(iter) - returns the current value and moves iterator to next state // int iter_cmp(iter) - returns 0 if finished // iter iter_reverse(iter) - return an iterator of the same type representing the reverse order // // iter_next() should also be registered as a built-in hidden function so that code-gen // can call it and generate the appropriate variable storage // represents a built-in Python range() loop struct range_t { CUDA_CALLABLE range_t() : start(0), end(0), step(0), i(0) {} int start; int end; int step; int i; }; CUDA_CALLABLE inline range_t range(int end) { range_t r; r.start = 0; r.end = end; r.step = 1; r.i = r.start; return r; } CUDA_CALLABLE inline range_t range(int start, int end) { range_t r; r.start = start; r.end = end; r.step = 1; r.i = r.start; return r; } CUDA_CALLABLE inline range_t range(int start, int end, int step) { range_t r; r.start = start; r.end = end; r.step = step; r.i = r.start; return r; } CUDA_CALLABLE inline void adj_range(int end, int adj_end, range_t& adj_ret) {} CUDA_CALLABLE inline void adj_range(int start, int end, int adj_start, int adj_end, range_t& adj_ret) {} CUDA_CALLABLE inline void adj_range(int start, int end, int step, int adj_start, int adj_end, int adj_step, range_t& adj_ret) {} CUDA_CALLABLE inline int iter_next(range_t& r) { int iter = r.i; r.i += r.step; return iter; } CUDA_CALLABLE inline bool iter_cmp(const range_t& r) { // implements for-loop comparison to emulate Python range() loops with negative arguments if (r.step == 0) // degenerate case where step == 0 return false; if (r.step > 0) // normal case where step > 0 return r.i < r.end; else // reverse case where step < 0 return r.i > r.end; } CUDA_CALLABLE inline range_t iter_reverse(const range_t& r) { // generates a reverse range, equivalent to reversed(range()) range_t rev; rev.start = r.end-1; rev.end = r.start-1; rev.step = -r.step; rev.i = rev.start; return rev; } } // namespace wp
2,315
C
20.247706
128
0.609071
NVIDIA/warp/warp/native/intersect_adj.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "builtin.h" namespace wp { //---------------------------------------------------------------- // Generated adjoint for closest_point_edge_edge. // See intersect.h for forward mode and comments. static CUDA_CALLABLE void adj_closest_point_edge_edge(vec3 var_p1, vec3 var_q1, vec3 var_p2, vec3 var_q2, float32 var_epsilon, vec3 & adj_p1, vec3 & adj_q1, vec3 & adj_p2, vec3 & adj_q2, float32 & adj_epsilon, vec3 & adj_ret) { //--------- // primal vars vec3 var_0; vec3 var_1; vec3 var_2; float32 var_3; float32 var_4; float32 var_5; const float32 var_6 = 0.0; float32 var_7; float32 var_8; vec3 var_9; float32 var_10; bool var_11; bool var_12; bool var_13; // vec3 var_14; bool var_15; float32 var_16; float32 var_17; float32 var_18; float32 var_19; float32 var_20; float32 var_21; bool var_22; float32 var_23; float32 var_24; const float32 var_25 = 1.0; float32 var_26; float32 var_27; float32 var_28; float32 var_29; float32 var_30; float32 var_31; float32 var_32; float32 var_33; bool var_34; float32 var_35; float32 var_36; float32 var_37; float32 var_38; float32 var_39; float32 var_40; float32 var_41; float32 var_42; float32 var_43; float32 var_44; bool var_45; float32 var_46; float32 var_47; float32 var_48; float32 var_49; float32 var_50; bool var_51; float32 var_52; float32 var_53; float32 var_54; float32 var_55; float32 var_56; float32 var_57; float32 var_58; float32 var_59; float32 var_60; float32 var_61; float32 var_62; vec3 var_63; vec3 var_64; vec3 var_65; vec3 var_66; vec3 var_67; vec3 var_68; vec3 var_69; float32 var_70; // vec3 var_71; //--------- // dual vars vec3 adj_0 = 0; vec3 adj_1 = 0; vec3 adj_2 = 0; float32 adj_3 = 0; float32 adj_4 = 0; float32 adj_5 = 0; float32 adj_6 = 0; float32 adj_7 = 0; float32 adj_8 = 0; vec3 adj_9 = 0; float32 adj_10 = 0; //bool adj_11 = 0; //bool adj_12 = 0; //bool adj_13 = 0; vec3 adj_14 = 0; bool adj_15 = 0; float32 adj_16 = 0; float32 adj_17 = 0; float32 adj_18 = 0; float32 adj_19 = 0; float32 adj_20 = 0; float32 adj_21 = 0; bool adj_22 = 0; float32 adj_23 = 0; float32 adj_24 = 0; float32 adj_25 = 0; float32 adj_26 = 0; float32 adj_27 = 0; float32 adj_28 = 0; float32 adj_29 = 0; float32 adj_30 = 0; float32 adj_31 = 0; float32 adj_32 = 0; float32 adj_33 = 0; bool adj_34 = 0; float32 adj_35 = 0; float32 adj_36 = 0; float32 adj_37 = 0; float32 adj_38 = 0; float32 adj_39 = 0; float32 adj_40 = 0; float32 adj_41 = 0; float32 adj_42 = 0; float32 adj_43 = 0; float32 adj_44 = 0; bool adj_45 = 0; float32 adj_46 = 0; float32 adj_47 = 0; float32 adj_48 = 0; float32 adj_49 = 0; float32 adj_50 = 0; bool adj_51 = 0; float32 adj_52 = 0; float32 adj_53 = 0; float32 adj_54 = 0; float32 adj_55 = 0; float32 adj_56 = 0; float32 adj_57 = 0; float32 adj_58 = 0; float32 adj_59 = 0; float32 adj_60 = 0; float32 adj_61 = 0; float32 adj_62 = 0; vec3 adj_63 = 0; vec3 adj_64 = 0; vec3 adj_65 = 0; vec3 adj_66 = 0; vec3 adj_67 = 0; vec3 adj_68 = 0; vec3 adj_69 = 0; float32 adj_70 = 0; vec3 adj_71 = 0; //--------- // forward var_0 = wp::sub(var_q1, var_p1); var_1 = wp::sub(var_q2, var_p2); var_2 = wp::sub(var_p1, var_p2); var_3 = wp::dot(var_0, var_0); var_4 = wp::dot(var_1, var_1); var_5 = wp::dot(var_1, var_2); var_7 = wp::cast_float(var_6); var_8 = wp::cast_float(var_6); var_9 = wp::sub(var_p2, var_p1); var_10 = wp::length(var_9); var_11 = (var_3 <= var_epsilon); var_12 = (var_4 <= var_epsilon); var_13 = var_11 && var_12; if (var_13) { // var_14 = wp::vec3(var_7, var_8, var_10); goto label0; } var_15 = (var_3 <= var_epsilon); if (var_15) { var_16 = wp::cast_float(var_6); var_17 = wp::div(var_5, var_4); var_18 = wp::cast_float(var_17); } var_19 = wp::select(var_15, var_7, var_16); var_20 = wp::select(var_15, var_8, var_18); if (!var_15) { var_21 = wp::dot(var_0, var_2); var_22 = (var_4 <= var_epsilon); if (var_22) { var_23 = wp::neg(var_21); var_24 = wp::div(var_23, var_3); var_26 = wp::clamp(var_24, var_6, var_25); var_27 = wp::cast_float(var_6); } var_28 = wp::select(var_22, var_19, var_26); var_29 = wp::select(var_22, var_20, var_27); if (!var_22) { var_30 = wp::dot(var_0, var_1); var_31 = wp::mul(var_3, var_4); var_32 = wp::mul(var_30, var_30); var_33 = wp::sub(var_31, var_32); var_34 = (var_33 != var_6); if (var_34) { var_35 = wp::mul(var_30, var_5); var_36 = wp::mul(var_21, var_4); var_37 = wp::sub(var_35, var_36); var_38 = wp::div(var_37, var_33); var_39 = wp::clamp(var_38, var_6, var_25); } var_40 = wp::select(var_34, var_28, var_39); if (!var_34) { } var_41 = wp::select(var_34, var_6, var_40); var_42 = wp::mul(var_30, var_41); var_43 = wp::add(var_42, var_5); var_44 = wp::div(var_43, var_4); var_45 = (var_44 < var_6); if (var_45) { var_46 = wp::neg(var_21); var_47 = wp::div(var_46, var_3); var_48 = wp::clamp(var_47, var_6, var_25); } var_49 = wp::select(var_45, var_41, var_48); var_50 = wp::select(var_45, var_44, var_6); if (!var_45) { var_51 = (var_50 > var_25); if (var_51) { var_52 = wp::sub(var_30, var_21); var_53 = wp::div(var_52, var_3); var_54 = wp::clamp(var_53, var_6, var_25); } var_55 = wp::select(var_51, var_49, var_54); var_56 = wp::select(var_51, var_50, var_25); } var_57 = wp::select(var_45, var_55, var_49); var_58 = wp::select(var_45, var_56, var_50); } var_59 = wp::select(var_22, var_57, var_28); var_60 = wp::select(var_22, var_58, var_29); } var_61 = wp::select(var_15, var_59, var_19); var_62 = wp::select(var_15, var_60, var_20); var_63 = wp::sub(var_q1, var_p1); var_64 = wp::mul(var_63, var_61); var_65 = wp::add(var_p1, var_64); var_66 = wp::sub(var_q2, var_p2); var_67 = wp::mul(var_66, var_62); var_68 = wp::add(var_p2, var_67); var_69 = wp::sub(var_68, var_65); var_70 = wp::length(var_69); // var_71 = wp::vec3(var_61, var_62, var_70); goto label1; //--------- // reverse label1:; adj_71 += adj_ret; wp::adj_vec3(var_61, var_62, var_70, adj_61, adj_62, adj_70, adj_71); wp::adj_length(var_69, var_70, adj_69, adj_70); wp::adj_sub(var_68, var_65, adj_68, adj_65, adj_69); wp::adj_add(var_p2, var_67, adj_p2, adj_67, adj_68); wp::adj_mul(var_66, var_62, adj_66, adj_62, adj_67); wp::adj_sub(var_q2, var_p2, adj_q2, adj_p2, adj_66); wp::adj_add(var_p1, var_64, adj_p1, adj_64, adj_65); wp::adj_mul(var_63, var_61, adj_63, adj_61, adj_64); wp::adj_sub(var_q1, var_p1, adj_q1, adj_p1, adj_63); wp::adj_select(var_15, var_60, var_20, adj_15, adj_60, adj_20, adj_62); wp::adj_select(var_15, var_59, var_19, adj_15, adj_59, adj_19, adj_61); if (!var_15) { wp::adj_select(var_22, var_58, var_29, adj_22, adj_58, adj_29, adj_60); wp::adj_select(var_22, var_57, var_28, adj_22, adj_57, adj_28, adj_59); if (!var_22) { wp::adj_select(var_45, var_56, var_50, adj_45, adj_56, adj_50, adj_58); wp::adj_select(var_45, var_55, var_49, adj_45, adj_55, adj_49, adj_57); if (!var_45) { wp::adj_select(var_51, var_50, var_25, adj_51, adj_50, adj_25, adj_56); wp::adj_select(var_51, var_49, var_54, adj_51, adj_49, adj_54, adj_55); if (var_51) { wp::adj_clamp(var_53, var_6, var_25, adj_53, adj_6, adj_25, adj_54); wp::adj_div(var_52, var_3, var_53, adj_52, adj_3, adj_53); wp::adj_sub(var_30, var_21, adj_30, adj_21, adj_52); } } wp::adj_select(var_45, var_44, var_6, adj_45, adj_44, adj_6, adj_50); wp::adj_select(var_45, var_41, var_48, adj_45, adj_41, adj_48, adj_49); if (var_45) { wp::adj_clamp(var_47, var_6, var_25, adj_47, adj_6, adj_25, adj_48); wp::adj_div(var_46, var_3, var_47, adj_46, adj_3, adj_47); wp::adj_neg(var_21, adj_21, adj_46); } wp::adj_div(var_43, var_4, var_44, adj_43, adj_4, adj_44); wp::adj_add(var_42, var_5, adj_42, adj_5, adj_43); wp::adj_mul(var_30, var_41, adj_30, adj_41, adj_42); wp::adj_select(var_34, var_6, var_40, adj_34, adj_6, adj_40, adj_41); if (!var_34) { } wp::adj_select(var_34, var_28, var_39, adj_34, adj_28, adj_39, adj_40); if (var_34) { wp::adj_clamp(var_38, var_6, var_25, adj_38, adj_6, adj_25, adj_39); wp::adj_div(var_37, var_33, var_38, adj_37, adj_33, adj_38); wp::adj_sub(var_35, var_36, adj_35, adj_36, adj_37); wp::adj_mul(var_21, var_4, adj_21, adj_4, adj_36); wp::adj_mul(var_30, var_5, adj_30, adj_5, adj_35); } wp::adj_sub(var_31, var_32, adj_31, adj_32, adj_33); wp::adj_mul(var_30, var_30, adj_30, adj_30, adj_32); wp::adj_mul(var_3, var_4, adj_3, adj_4, adj_31); wp::adj_dot(var_0, var_1, adj_0, adj_1, adj_30); } wp::adj_select(var_22, var_20, var_27, adj_22, adj_20, adj_27, adj_29); wp::adj_select(var_22, var_19, var_26, adj_22, adj_19, adj_26, adj_28); if (var_22) { wp::adj_cast_float(var_6, adj_6, adj_27); wp::adj_clamp(var_24, var_6, var_25, adj_24, adj_6, adj_25, adj_26); wp::adj_div(var_23, var_3, var_24, adj_23, adj_3, adj_24); wp::adj_neg(var_21, adj_21, adj_23); } wp::adj_dot(var_0, var_2, adj_0, adj_2, adj_21); } wp::adj_select(var_15, var_8, var_18, adj_15, adj_8, adj_18, adj_20); wp::adj_select(var_15, var_7, var_16, adj_15, adj_7, adj_16, adj_19); if (var_15) { wp::adj_cast_float(var_17, adj_17, adj_18); wp::adj_div(var_5, var_4, var_17, adj_5, adj_4, adj_17); wp::adj_cast_float(var_6, adj_6, adj_16); } if (var_13) { label0:; adj_14 += adj_ret; wp::adj_vec3(var_7, var_8, var_10, adj_7, adj_8, adj_10, adj_14); } wp::adj_length(var_9, var_10, adj_9, adj_10); wp::adj_sub(var_p2, var_p1, adj_p2, adj_p1, adj_9); wp::adj_cast_float(var_6, adj_6, adj_8); wp::adj_cast_float(var_6, adj_6, adj_7); wp::adj_dot(var_1, var_2, adj_1, adj_2, adj_5); wp::adj_dot(var_1, var_1, adj_1, adj_1, adj_4); wp::adj_dot(var_0, var_0, adj_0, adj_0, adj_3); wp::adj_sub(var_p1, var_p2, adj_p1, adj_p2, adj_2); wp::adj_sub(var_q2, var_p2, adj_q2, adj_p2, adj_1); wp::adj_sub(var_q1, var_p1, adj_q1, adj_p1, adj_0); return; } } // namespace wp
11,563
C
30.595628
78
0.540344
NVIDIA/warp/warp/native/crt.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once // This file declares a subset of the C runtime (CRT) functions and macros for // use by compute kernel modules. There are three environments in which this // file gets included: // - CUDA kernel modules (WP_NO_CRT and __CUDACC__). CUDA already has implicitly // declared builtins for most functions. printf() and macro definitions are // the notable exceptions. // - C++ kernel modules (WP_NO_CRT and !__CUDACC__). These can't use the CRT // directly when using a standalone compiler. The functions get obtained from // the compiler library instead (clang.dll). // - Warp runtime (!WP_NO_CRT). When building warp.dll it's fine to include the // standard C library headers, and it avoids mismatched redefinitions. #if !defined(__CUDA_ARCH__) #if defined(_WIN32) #define WP_API __declspec(dllexport) #else #define WP_API __attribute__ ((visibility ("default"))) #endif #else #define WP_API #endif #if !defined(__CUDA_ARCH__) // Helper for implementing assert() macro extern "C" WP_API void _wp_assert(const char* message, const char* file, unsigned int line); // Helper for implementing isfinite() extern "C" WP_API int _wp_isfinite(double); // Helper for implementing isnan() extern "C" WP_API int _wp_isnan(double); // Helper for implementing isinf() extern "C" WP_API int _wp_isinf(double); #endif // !__CUDA_ARCH__ #if !defined(WP_NO_CRT) #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <float.h> #include <string.h> #else // These definitions are taken from Jitify: https://github.com/NVIDIA/jitify /// float.h #define FLT_RADIX 2 #define FLT_MANT_DIG 24 #define DBL_MANT_DIG 53 #define FLT_DIG 6 #define DBL_DIG 15 #define FLT_MIN_EXP -125 #define DBL_MIN_EXP -1021 #define FLT_MIN_10_EXP -37 #define DBL_MIN_10_EXP -307 #define FLT_MAX_EXP 128 #define DBL_MAX_EXP 1024 #define FLT_MAX_10_EXP 38 #define DBL_MAX_10_EXP 308 #define FLT_MAX 3.4028234e38f #define DBL_MAX 1.7976931348623157e308 #define FLT_EPSILON 1.19209289e-7f #define DBL_EPSILON 2.220440492503130e-16 #define FLT_MIN 1.1754943e-38f #define DBL_MIN 2.2250738585072013e-308 #define FLT_ROUNDS 1 #if defined __cplusplus && __cplusplus >= 201103L #define FLT_EVAL_METHOD 0 #define DECIMAL_DIG 21 #endif /// limits.h #if defined _WIN32 || defined _WIN64 #define __WORDSIZE 32 #else #if defined __x86_64__ && !defined __ILP32__ #define __WORDSIZE 64 #else #define __WORDSIZE 32 #endif #endif #define MB_LEN_MAX 16 #define CHAR_BIT 8 #define SCHAR_MIN (-128) #define SCHAR_MAX 127 #define UCHAR_MAX 255 enum { _JITIFY_CHAR_IS_UNSIGNED = (char)-1 >= 0, CHAR_MIN = _JITIFY_CHAR_IS_UNSIGNED ? 0 : SCHAR_MIN, CHAR_MAX = _JITIFY_CHAR_IS_UNSIGNED ? UCHAR_MAX : SCHAR_MAX, }; #define SHRT_MIN (-32768) #define SHRT_MAX 32767 #define USHRT_MAX 65535 #define INT_MIN (-INT_MAX - 1) #define INT_MAX 2147483647 #define UINT_MAX 4294967295U #if __WORDSIZE == 64 # define LONG_MAX 9223372036854775807L #else # define LONG_MAX 2147483647L #endif #define LONG_MIN (-LONG_MAX - 1L) #if __WORDSIZE == 64 #define ULONG_MAX 18446744073709551615UL #else #define ULONG_MAX 4294967295UL #endif #define LLONG_MAX 9223372036854775807LL #define LLONG_MIN (-LLONG_MAX - 1LL) #define ULLONG_MAX 18446744073709551615ULL #define INFINITY ((float)(DBL_MAX * DBL_MAX)) #define HUGE_VAL ((double)INFINITY) #define HUGE_VALF ((float)INFINITY) #define NAN ((float)(0.0 / 0.0)) /// stdint.h typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef signed long long int64_t; //typedef signed char int_fast8_t; //typedef signed short int_fast16_t; //typedef signed int int_fast32_t; //typedef signed long long int_fast64_t; //typedef signed char int_least8_t; //typedef signed short int_least16_t; //typedef signed int int_least32_t; //typedef signed long long int_least64_t; //typedef signed long long intmax_t; //typedef signed long intptr_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; typedef unsigned long long uint64_t; //typedef unsigned char uint_fast8_t; //typedef unsigned short uint_fast16_t; //typedef unsigned int uint_fast32_t; //typedef unsigned long long uint_fast64_t; //typedef unsigned char uint_least8_t; //typedef unsigned short uint_least16_t; //typedef unsigned int uint_least32_t; //typedef unsigned long long uint_least64_t; //typedef unsigned long long uintmax_t; /// math.h // #if __cplusplus >= 201103L // #define DEFINE_MATH_UNARY_FUNC_WRAPPER(f) \ // inline double f(double x) { return ::f(x); } \ // inline float f##f(float x) { return ::f(x); } \ // /*inline long double f##l(long double x) { return ::f(x); }*/ \ // inline float f(float x) { return ::f(x); } \ // /*inline long double f(long double x) { return ::f(x); }*/ // #else // #define DEFINE_MATH_UNARY_FUNC_WRAPPER(f) \ // inline double f(double x) { return ::f(x); } \ // inline float f##f(float x) { return ::f(x); } \ // /*inline long double f##l(long double x) { return ::f(x); }*/ // #endif // DEFINE_MATH_UNARY_FUNC_WRAPPER(cos) // DEFINE_MATH_UNARY_FUNC_WRAPPER(sin) // DEFINE_MATH_UNARY_FUNC_WRAPPER(tan) // DEFINE_MATH_UNARY_FUNC_WRAPPER(acos) // DEFINE_MATH_UNARY_FUNC_WRAPPER(asin) // DEFINE_MATH_UNARY_FUNC_WRAPPER(atan) // template<typename T> inline T atan2(T y, T x) { return ::atan2(y, x); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(cosh) // DEFINE_MATH_UNARY_FUNC_WRAPPER(sinh) // DEFINE_MATH_UNARY_FUNC_WRAPPER(tanh) // DEFINE_MATH_UNARY_FUNC_WRAPPER(exp) // template<typename T> inline T frexp(T x, int* exp) { return ::frexp(x, exp); } // template<typename T> inline T ldexp(T x, int exp) { return ::ldexp(x, exp); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(log) // DEFINE_MATH_UNARY_FUNC_WRAPPER(log10) // template<typename T> inline T modf(T x, T* intpart) { return ::modf(x, intpart); } // template<typename T> inline T pow(T x, T y) { return ::pow(x, y); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(sqrt) // template<typename T> inline T fmod(T n, T d) { return ::fmod(n, d); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(fabs) // template<typename T> inline T abs(T x) { return ::abs(x); } // #if __cplusplus >= 201103L // DEFINE_MATH_UNARY_FUNC_WRAPPER(acosh) // DEFINE_MATH_UNARY_FUNC_WRAPPER(asinh) // DEFINE_MATH_UNARY_FUNC_WRAPPER(atanh) // DEFINE_MATH_UNARY_FUNC_WRAPPER(exp2) // DEFINE_MATH_UNARY_FUNC_WRAPPER(expm1) // template<typename T> inline int ilogb(T x) { return ::ilogb(x); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(log1p) // DEFINE_MATH_UNARY_FUNC_WRAPPER(log2) // DEFINE_MATH_UNARY_FUNC_WRAPPER(logb) // template<typename T> inline T scalbn (T x, int n) { return ::scalbn(x, n); } // template<typename T> inline T scalbln(T x, long n) { return ::scalbn(x, n); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(cbrt) // template<typename T> inline T hypot(T x, T y) { return ::hypot(x, y); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(erf) // DEFINE_MATH_UNARY_FUNC_WRAPPER(erfc) // DEFINE_MATH_UNARY_FUNC_WRAPPER(tgamma) // DEFINE_MATH_UNARY_FUNC_WRAPPER(lgamma) // DEFINE_MATH_UNARY_FUNC_WRAPPER(round) // DEFINE_MATH_UNARY_FUNC_WRAPPER(rint) // DEFINE_MATH_UNARY_FUNC_WRAPPER(trunc) // DEFINE_MATH_UNARY_FUNC_WRAPPER(floor) // DEFINE_MATH_UNARY_FUNC_WRAPPER(ceil) // template<typename T> inline long lround(T x) { return ::lround(x); } // template<typename T> inline long long llround(T x) { return ::llround(x); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(rint) // template<typename T> inline long lrint(T x) { return ::lrint(x); } // template<typename T> inline long long llrint(T x) { return ::llrint(x); } // DEFINE_MATH_UNARY_FUNC_WRAPPER(nearbyint) // //DEFINE_MATH_UNARY_FUNC_WRAPPER(isfinite) // // TODO: remainder, remquo, copysign, nan, nextafter, nexttoward, fdim, // // fmax, fmin, fma // #endif // #undef DEFINE_MATH_UNARY_FUNC_WRAPPER #define M_PI 3.14159265358979323846 #if defined(__CUDACC__) #if defined(__clang__) // When compiling CUDA with barebones Clang we need to define its builtins and runtime functions ourselves. #include "cuda_crt.h" #endif #else extern "C" { // stdio.h int printf(const char * format, ... ); // stdlib.h int abs(int); long long llabs(long long); // math.h float fmodf(float, float); double fmod(double, double); float logf(float); double log(double); float log2f(float); double log2(double); float log10f(float); double log10(double); float expf(float); double exp(double); float sqrtf(float); double sqrt(double); float cbrtf(float); double cbrt(double); float powf(float, float); double pow(double, double); float floorf(float); double floor(double); float ceilf(float); double ceil(double); float fabsf(float); double fabs(double); float roundf(float); double round(double); float truncf(float); double trunc(double); float rintf(float); double rint(double); float acosf(float); double acos(double); float asinf(float); double asin(double); float atanf(float); double atan(double); float atan2f(float, float); double atan2(double, double); float cosf(float); double cos(double); float sinf(float); double sin(double); float tanf(float); double tan(double); float sinhf(float); double sinh(double); float coshf(float); double cosh(double); float tanhf(float); double tanh(double); float fmaf(float, float, float); // stddef.h #if defined(_WIN32) using size_t = unsigned __int64; #else using size_t = unsigned long; #endif // string.h void* memset(void*, int, size_t); void* memcpy(void*, const void*, size_t); // stdlib.h void* malloc(size_t); void free(void*); } // extern "C" // cmath inline bool isfinite(double x) { return _wp_isfinite(x); } inline bool isnan(double x) { return _wp_isnan(x); } inline bool isinf(double x) { return _wp_isinf(x); } // assert.h #ifdef NDEBUG #define assert(expression) ((void)0) #else #define assert(expression) (void)( \ (!!(expression)) || \ (_wp_assert((#expression), (__FILE__), (unsigned)(__LINE__)), 0) \ ) #endif #endif // !__CUDACC__ #endif // WP_NO_CRT
10,788
C
29.563739
107
0.678346
NVIDIA/warp/warp/native/rand.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ # pragma once #include "array.h" #ifndef M_PI_F #define M_PI_F 3.14159265358979323846f #endif namespace wp { inline CUDA_CALLABLE uint32 rand_pcg(uint32 state) { uint32 b = state * 747796405u + 2891336453u; uint32 c = ((b >> ((b >> 28u) + 4u)) ^ b) * 277803737u; return (c >> 22u) ^ c; } inline CUDA_CALLABLE uint32 rand_init(int seed) { return rand_pcg(uint32(seed)); } inline CUDA_CALLABLE uint32 rand_init(int seed, int offset) { return rand_pcg(uint32(seed) + rand_pcg(uint32(offset))); } inline CUDA_CALLABLE int randi(uint32& state) { state = rand_pcg(state); return int(state); } inline CUDA_CALLABLE int randi(uint32& state, int min, int max) { state = rand_pcg(state); return state % (max - min) + min; } inline CUDA_CALLABLE float randf(uint32& state) { state = rand_pcg(state); return (state >> 8) * (1.0f / 16777216.0f); } inline CUDA_CALLABLE float randf(uint32& state, float min, float max) { return (max - min) * randf(state) + min; } // Box-Muller method inline CUDA_CALLABLE float randn(uint32& state) { return sqrt(-2.f * log(randf(state))) * cos(2.f * M_PI_F * randf(state)); } inline CUDA_CALLABLE void adj_rand_init(int seed, int& adj_seed, float adj_ret) {} inline CUDA_CALLABLE void adj_rand_init(int seed, int offset, int& adj_seed, int& adj_offset, float adj_ret) {} inline CUDA_CALLABLE void adj_randi(uint32& state, uint32& adj_state, float adj_ret) {} inline CUDA_CALLABLE void adj_randi(uint32& state, int min, int max, uint32& adj_state, int& adj_min, int& adj_max, float adj_ret) {} inline CUDA_CALLABLE void adj_randf(uint32& state, uint32& adj_state, float adj_ret) {} inline CUDA_CALLABLE void adj_randf(uint32& state, float min, float max, uint32& adj_state, float& adj_min, float& adj_max, float adj_ret) {} inline CUDA_CALLABLE void adj_randn(uint32& state, uint32& adj_state, float adj_ret) {} inline CUDA_CALLABLE int sample_cdf(uint32& state, const array_t<float>& cdf) { float u = randf(state); return lower_bound<float>(cdf, u); } inline CUDA_CALLABLE vec2 sample_triangle(uint32& state) { float r = sqrt(randf(state)); float u = 1.f - r; float v = randf(state) * r; return vec2(u, v); } inline CUDA_CALLABLE vec2 sample_unit_ring(uint32& state) { float theta = randf(state, 0.f, 2.f*M_PI_F); float x = cos(theta); float y = sin(theta); return vec2(x, y); } inline CUDA_CALLABLE vec2 sample_unit_disk(uint32& state) { float r = sqrt(randf(state)); float theta = randf(state, 0.f, 2.f*M_PI_F); float x = r * cos(theta); float y = r * sin(theta); return vec2(x, y); } inline CUDA_CALLABLE vec3 sample_unit_sphere_surface(uint32& state) { float phi = acos(1.f - 2.f * randf(state)); float theta = randf(state, 0.f, 2.f*M_PI_F); float x = cos(theta) * sin(phi); float y = sin(theta) * sin(phi); float z = cos(phi); return vec3(x, y, z); } inline CUDA_CALLABLE vec3 sample_unit_sphere(uint32& state) { float phi = acos(1.f - 2.f * randf(state)); float theta = randf(state, 0.f, 2.f*M_PI_F); float r = pow(randf(state), 1.f/3.f); float x = r * cos(theta) * sin(phi); float y = r * sin(theta) * sin(phi); float z = r * cos(phi); return vec3(x, y, z); } inline CUDA_CALLABLE vec3 sample_unit_hemisphere_surface(uint32& state) { float phi = acos(1.f - randf(state)); float theta = randf(state, 0.f, 2.f*M_PI_F); float x = cos(theta) * sin(phi); float y = sin(theta) * sin(phi); float z = cos(phi); return vec3(x, y, z); } inline CUDA_CALLABLE vec3 sample_unit_hemisphere(uint32& state) { float phi = acos(1.f - randf(state)); float theta = randf(state, 0.f, 2.f*M_PI_F); float r = pow(randf(state), 1.f/3.f); float x = r * cos(theta) * sin(phi); float y = r * sin(theta) * sin(phi); float z = r * cos(phi); return vec3(x, y, z); } inline CUDA_CALLABLE vec2 sample_unit_square(uint32& state) { float x = randf(state) - 0.5f; float y = randf(state) - 0.5f; return vec2(x, y); } inline CUDA_CALLABLE vec3 sample_unit_cube(uint32& state) { float x = randf(state) - 0.5f; float y = randf(state) - 0.5f; float z = randf(state) - 0.5f; return vec3(x, y, z); } inline CUDA_CALLABLE vec4 sample_unit_hypercube(uint32& state) { float a = randf(state) - 0.5f; float b = randf(state) - 0.5f; float c = randf(state) - 0.5f; float d = randf(state) - 0.5f; return vec4(a, b, c, d); } inline CUDA_CALLABLE void adj_sample_cdf(uint32& state, const array_t<float>& cdf, uint32& adj_state, array_t<float>& adj_cdf, const int& adj_ret) {} inline CUDA_CALLABLE void adj_sample_triangle(uint32& state, uint32& adj_state, const vec2& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_ring(uint32& state, uint32& adj_state, const vec2& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_disk(uint32& state, uint32& adj_state, const vec2& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_sphere_surface(uint32& state, uint32& adj_state, const vec3& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_sphere(uint32& state, uint32& adj_state, const vec3& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_hemisphere_surface(uint32& state, uint32& adj_state, const vec3& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_hemisphere(uint32& state, uint32& adj_state, const vec3& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_square(uint32& state, uint32& adj_state, const vec2& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_cube(uint32& state, uint32& adj_state, const vec3& adj_ret) {} inline CUDA_CALLABLE void adj_sample_unit_hypercube(uint32& state, uint32& adj_state, const vec3& adj_ret) {} /* * log-gamma function to support some of these distributions. The * algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their * book "Computation of Special Functions", 1996, John Wiley & Sons, Inc. * * If random_loggam(k+1) is being used to compute log(k!) for an integer k, consider * using logfactorial(k) instead. */ inline CUDA_CALLABLE float random_loggam(float x) { float x0, x2, lg2pi, gl, gl0; uint32 n; const float a[10] = {8.333333333333333e-02f, -2.777777777777778e-03f, 7.936507936507937e-04f, -5.952380952380952e-04f, 8.417508417508418e-04f, -1.917526917526918e-03f, 6.410256410256410e-03f, -2.955065359477124e-02f, 1.796443723688307e-01f, -1.39243221690590e+00f}; if ((x == 1.f) || (x == 2.f)) { return 0.f; } else if (x < 7.f) { n = uint32((7 - x)); } else { n = 0; } x0 = x + float(n); x2 = (1.f / x0) * (1.f / x0); // log(2 * M_PI_F) lg2pi = 1.8378770664093453f; gl0 = a[9]; for (int i = 8; i >= 0; i--) { gl0 *= x2; gl0 += a[i]; } gl = gl0 / x0 + 0.5f * lg2pi + (x0 - 0.5f) * log(x0) - x0; if (x < 7.f) { for (uint32 k = 1; k <= n; k++) { gl -= log(x0 - 1.f); x0 -= 1.f; } } return gl; } inline CUDA_CALLABLE uint32 random_poisson_mult(uint32& state, float lam) { uint32 X; float prod, U, enlam; enlam = exp(-lam); X = 0; prod = 1.f; while (1) { U = randf(state); prod *= U; if (prod > enlam) { X += 1; } else { return X; } } } /* * The transformed rejection method for generating Poisson random variables * W. Hoermann * Insurance: Mathematics and Economics 12, 39-45 (1993) */ inline CUDA_CALLABLE uint32 random_poisson(uint32& state, float lam) { uint32 k; float U, V, slam, loglam, a, b, invalpha, vr, us; slam = sqrt(lam); loglam = log(lam); b = 0.931f + 2.53f * slam; a = -0.059f + 0.02483f * b; invalpha = 1.1239f + 1.1328f / (b - 3.4f); vr = 0.9277f - 3.6224f / (b - 2.f); while (1) { U = randf(state) - 0.5f; V = randf(state); us = 0.5f - abs(U); k = uint32(floor((2.f * a / us + b) * U + lam + 0.43f)); if ((us >= 0.07f) && (V <= vr)) { return k; } if ((us < 0.013f) && (V > us)) { continue; } if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <= (-lam + k * loglam - random_loggam(k + 1))) { return k; } } } /* * Adapted from NumPy's implementation * Warp's state variable is half the precision of NumPy's so * poisson implementation uses half the precision used in NumPy's implementation * both precisions appear to converge in the statistical limit */ inline CUDA_CALLABLE uint32 poisson(uint32& state, float lam) { if (lam >= 10.f) { return random_poisson(state, lam); } else if (lam == 0.f) { return 0; } else { return random_poisson_mult(state, lam); } } inline CUDA_CALLABLE void adj_random_loggam(float x, float& adj_x, const float adj_ret) {} inline CUDA_CALLABLE void random_poisson_mult(uint32& state, float lam, uint32& adj_state, float& adj_lam, const uint32& adj_ret) {} inline CUDA_CALLABLE void adj_random_poisson(uint32& state, float lam, uint32& adj_state, float& adj_lam, const uint32& adj_ret) {} inline CUDA_CALLABLE void adj_poisson(uint32& state, float lam, uint32& adj_state, float& adj_lam, const uint32& adj_ret) {} } // namespace wp
9,904
C
32.016667
149
0.622476
NVIDIA/warp/warp/native/marching.cpp
// not implemented yet
24
C++
7.333331
22
0.708333
NVIDIA/warp/warp/native/bvh.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "builtin.h" #include "intersect.h" namespace wp { struct bounds3 { CUDA_CALLABLE inline bounds3() : lower( FLT_MAX) , upper(-FLT_MAX) {} CUDA_CALLABLE inline bounds3(const vec3& lower, const vec3& upper) : lower(lower), upper(upper) {} CUDA_CALLABLE inline vec3 center() const { return 0.5f*(lower+upper); } CUDA_CALLABLE inline vec3 edges() const { return upper-lower; } CUDA_CALLABLE inline void expand(float r) { lower -= vec3(r); upper += vec3(r); } CUDA_CALLABLE inline void expand(const vec3& r) { lower -= r; upper += r; } CUDA_CALLABLE inline bool empty() const { return lower[0] >= upper[0] || lower[1] >= upper[1] || lower[2] >= upper[2]; } CUDA_CALLABLE inline bool overlaps(const vec3& p) const { if (p[0] < lower[0] || p[1] < lower[1] || p[2] < lower[2] || p[0] > upper[0] || p[1] > upper[1] || p[2] > upper[2]) { return false; } else { return true; } } CUDA_CALLABLE inline bool overlaps(const bounds3& b) const { if (lower[0] > b.upper[0] || lower[1] > b.upper[1] || lower[2] > b.upper[2] || upper[0] < b.lower[0] || upper[1] < b.lower[1] || upper[2] < b.lower[2]) { return false; } else { return true; } } CUDA_CALLABLE inline void add_point(const vec3& p) { lower = min(lower, p); upper = max(upper, p); } CUDA_CALLABLE inline float area() const { vec3 e = upper-lower; return 2.0f*(e[0]*e[1] + e[0]*e[2] + e[1]*e[2]); } vec3 lower; vec3 upper; }; CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const vec3& b) { return bounds3(min(a.lower, b), max(a.upper, b)); } CUDA_CALLABLE inline bounds3 bounds_union(const bounds3& a, const bounds3& b) { return bounds3(min(a.lower, b.lower), max(a.upper, b.upper)); } CUDA_CALLABLE inline bounds3 bounds_intersection(const bounds3& a, const bounds3& b) { return bounds3(max(a.lower, b.lower), min(a.upper, b.upper)); } struct BVHPackedNodeHalf { float x; float y; float z; unsigned int i : 31; unsigned int b : 1; }; struct BVH { BVHPackedNodeHalf* node_lowers; BVHPackedNodeHalf* node_uppers; // used for fast refits int* node_parents; int* node_counts; int max_depth; int max_nodes; int num_nodes; // pointer (CPU or GPU) to a single integer index in node_lowers, node_uppers // representing the root of the tree, this is not always the first node // for bottom-up builders int* root; // item bounds are not owned by the BVH but by the caller vec3* item_lowers; vec3* item_uppers; int num_items; // cuda context void* context; }; CUDA_CALLABLE inline BVHPackedNodeHalf make_node(const vec3& bound, int child, bool leaf) { BVHPackedNodeHalf n; n.x = bound[0]; n.y = bound[1]; n.z = bound[2]; n.i = (unsigned int)child; n.b = (unsigned int)(leaf?1:0); return n; } // variation of make_node through volatile pointers used in build_hierarchy CUDA_CALLABLE inline void make_node(volatile BVHPackedNodeHalf* n, const vec3& bound, int child, bool leaf) { n->x = bound[0]; n->y = bound[1]; n->z = bound[2]; n->i = (unsigned int)child; n->b = (unsigned int)(leaf?1:0); } CUDA_CALLABLE inline int clz(int x) { int n; if (x == 0) return 32; for (n = 0; ((x & 0x80000000) == 0); n++, x <<= 1); return n; } CUDA_CALLABLE inline uint32_t part1by2(uint32_t n) { n = (n ^ (n << 16)) & 0xff0000ff; n = (n ^ (n << 8)) & 0x0300f00f; n = (n ^ (n << 4)) & 0x030c30c3; n = (n ^ (n << 2)) & 0x09249249; return n; } // Takes values in the range [0, 1] and assigns an index based Morton codes of length 3*lwp2(dim) bits template <int dim> CUDA_CALLABLE inline uint32_t morton3(float x, float y, float z) { uint32_t ux = clamp(int(x*dim), 0, dim-1); uint32_t uy = clamp(int(y*dim), 0, dim-1); uint32_t uz = clamp(int(z*dim), 0, dim-1); return (part1by2(uz) << 2) | (part1by2(uy) << 1) | part1by2(ux); } // making the class accessible from python CUDA_CALLABLE inline BVH bvh_get(uint64_t id) { return *(BVH*)(id); } CUDA_CALLABLE inline int bvh_get_num_bounds(uint64_t id) { BVH bvh = bvh_get(id); return bvh.num_items; } // stores state required to traverse the BVH nodes that // overlap with a query AABB. struct bvh_query_t { CUDA_CALLABLE bvh_query_t() : bvh(), stack(), count(0), is_ray(false), input_lower(), input_upper(), bounds_nr(0) {} // Required for adjoint computations. CUDA_CALLABLE inline bvh_query_t& operator+=(const bvh_query_t& other) { return *this; } BVH bvh; // BVH traversal stack: int stack[32]; int count; // inputs bool is_ray; wp::vec3 input_lower; // start for ray wp::vec3 input_upper; // dir for ray int bounds_nr; }; CUDA_CALLABLE inline bvh_query_t bvh_query( uint64_t id, bool is_ray, const vec3& lower, const vec3& upper) { // This routine traverses the BVH tree until it finds // the first overlapping bound. // initialize empty bvh_query_t query; query.bounds_nr = -1; BVH bvh = bvh_get(id); query.bvh = bvh; query.is_ray = is_ray; // optimization: make the latest query.stack[0] = *bvh.root; query.count = 1; query.input_lower = lower; query.input_upper = upper; wp::bounds3 input_bounds(query.input_lower, query.input_upper); // Navigate through the bvh, find the first overlapping leaf node. while (query.count) { const int node_index = query.stack[--query.count]; BVHPackedNodeHalf node_lower = bvh.node_lowers[node_index]; BVHPackedNodeHalf node_upper = bvh.node_uppers[node_index]; wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z); wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z); wp::bounds3 current_bounds(lower_pos, upper_pos); if (query.is_ray) { float t = 0.0f; if (!intersect_ray_aabb(query.input_lower, query.input_upper, current_bounds.lower, current_bounds.upper, t)) // Skip this box, it doesn't overlap with our ray. continue; } else { if (!input_bounds.overlaps(current_bounds)) // Skip this box, it doesn't overlap with our target box. continue; } const int left_index = node_lower.i; const int right_index = node_upper.i; // Make bounds from this AABB if (node_lower.b) { // found very first leaf index. // Back up one level and return query.stack[query.count++] = node_index; return query; } else { query.stack[query.count++] = left_index; query.stack[query.count++] = right_index; } } return query; } CUDA_CALLABLE inline bvh_query_t bvh_query_aabb( uint64_t id, const vec3& lower, const vec3& upper) { return bvh_query(id, false, lower, upper); } CUDA_CALLABLE inline bvh_query_t bvh_query_ray( uint64_t id, const vec3& start, const vec3& dir) { return bvh_query(id, true, start, dir); } //Stub CUDA_CALLABLE inline void adj_bvh_query_aabb(uint64_t id, const vec3& lower, const vec3& upper, uint64_t, vec3&, vec3&, bvh_query_t&) { } CUDA_CALLABLE inline void adj_bvh_query_ray(uint64_t id, const vec3& start, const vec3& dir, uint64_t, vec3&, vec3&, bvh_query_t&) { } CUDA_CALLABLE inline bool bvh_query_next(bvh_query_t& query, int& index) { BVH bvh = query.bvh; wp::bounds3 input_bounds(query.input_lower, query.input_upper); // Navigate through the bvh, find the first overlapping leaf node. while (query.count) { const int node_index = query.stack[--query.count]; BVHPackedNodeHalf node_lower = bvh.node_lowers[node_index]; BVHPackedNodeHalf node_upper = bvh.node_uppers[node_index]; wp::vec3 lower_pos(node_lower.x, node_lower.y, node_lower.z); wp::vec3 upper_pos(node_upper.x, node_upper.y, node_upper.z); wp::bounds3 current_bounds(lower_pos, upper_pos); if (query.is_ray) { float t = 0.0f; if (!intersect_ray_aabb(query.input_lower, query.input_upper, current_bounds.lower, current_bounds.upper, t)) // Skip this box, it doesn't overlap with our ray. continue; } else { if (!input_bounds.overlaps(current_bounds)) // Skip this box, it doesn't overlap with our target box. continue; } const int left_index = node_lower.i; const int right_index = node_upper.i; if (node_lower.b) { // found leaf query.bounds_nr = left_index; index = left_index; return true; } else { query.stack[query.count++] = left_index; query.stack[query.count++] = right_index; } } return false; } CUDA_CALLABLE inline int iter_next(bvh_query_t& query) { return query.bounds_nr; } CUDA_CALLABLE inline bool iter_cmp(bvh_query_t& query) { bool finished = bvh_query_next(query, query.bounds_nr); return finished; } CUDA_CALLABLE inline bvh_query_t iter_reverse(const bvh_query_t& query) { // can't reverse BVH queries, users should not rely on traversal ordering return query; } // stub CUDA_CALLABLE inline void adj_bvh_query_next(bvh_query_t& query, int& index, bvh_query_t&, int&, bool&) { } CUDA_CALLABLE bool bvh_get_descriptor(uint64_t id, BVH& bvh); CUDA_CALLABLE void bvh_add_descriptor(uint64_t id, const BVH& bvh); CUDA_CALLABLE void bvh_rem_descriptor(uint64_t id); #if !__CUDA_ARCH__ void bvh_create_host(vec3* lowers, vec3* uppers, int num_items, BVH& bvh); void bvh_destroy_host(wp::BVH& bvh); void bvh_refit_host(wp::BVH& bvh); void bvh_destroy_device(wp::BVH& bvh); void bvh_refit_device(uint64_t id); #endif } // namespace wp
10,181
C
22.62413
121
0.636873
NVIDIA/warp/warp/native/runlength_encode.cpp
#include "warp.h" #include <cstdint> template <typename T> void runlength_encode_host(int n, const T *values, T *run_values, int *run_lengths, int *run_count) { if (n == 0) { *run_count = 0; return; } const T *end = values + n; *run_count = 1; *run_lengths = 1; *run_values = *values; while (++values != end) { if (*values == *run_values) { ++*run_lengths; } else { ++*run_count; *(++run_lengths) = 1; *(++run_values) = *values; } } } void runlength_encode_int_host( uint64_t values, uint64_t run_values, uint64_t run_lengths, uint64_t run_count, int n) { runlength_encode_host<int>(n, reinterpret_cast<const int *>(values), reinterpret_cast<int *>(run_values), reinterpret_cast<int *>(run_lengths), reinterpret_cast<int *>(run_count)); } #if !WP_ENABLE_CUDA void runlength_encode_int_device( uint64_t values, uint64_t run_values, uint64_t run_lengths, uint64_t run_count, int n) { } #endif
1,325
C++
20.387096
69
0.453585
NVIDIA/warp/warp/native/scan.h
#pragma once template<typename T> void scan_host(const T* values_in, T* values_out, int n, bool inclusive = true); template<typename T> void scan_device(const T* values_in, T* values_out, int n, bool inclusive = true);
221
C
26.749997
82
0.723982
NVIDIA/warp/warp/native/reduce.cpp
#include "warp.h" namespace { // Specialized accumulation functions for common type sizes template <int N, typename T> void fixed_len_sum(const T *val, T *sum, int value_size) { for (int i = 0; i < N; ++i, ++val, ++sum) { *sum += *val; } } template <typename T> void dyn_len_sum(const T *val, T *sum, int value_size) { for (int i = 0; i < value_size; ++i, ++val, ++sum) { *sum += *val; } } template <int N, typename T> void fixed_len_inner(const T *a, const T *b, T *dot, int value_size) { for (int i = 0; i < N; ++i, ++a, ++b) { *dot += *a * *b; } } template <typename T> void dyn_len_inner(const T *a, const T *b, T *dot, int value_size) { for (int i = 0; i < value_size; ++i, ++a, ++b) { *dot += *a * *b; } } } // namespace template <typename T> void array_inner_host(const T *ptr_a, const T *ptr_b, T *ptr_out, int count, int byte_stride_a, int byte_stride_b, int type_length) { assert((byte_stride_a % sizeof(T)) == 0); assert((byte_stride_b % sizeof(T)) == 0); const int stride_a = byte_stride_a / sizeof(T); const int stride_b = byte_stride_b / sizeof(T); void (*inner_func)(const T *, const T *, T *, int); switch (type_length) { case 1: inner_func = fixed_len_inner<1, T>; break; case 2: inner_func = fixed_len_inner<2, T>; break; case 3: inner_func = fixed_len_inner<3, T>; break; case 4: inner_func = fixed_len_inner<4, T>; break; default: inner_func = dyn_len_inner<T>; } *ptr_out = 0.0f; for (int i = 0; i < count; ++i) { inner_func(ptr_a + i * stride_a, ptr_b + i * stride_b, ptr_out, type_length); } } template <typename T> void array_sum_host(const T *ptr_a, T *ptr_out, int count, int byte_stride, int type_length) { assert((byte_stride % sizeof(T)) == 0); const int stride = byte_stride / sizeof(T); void (*accumulate_func)(const T *, T *, int); switch (type_length) { case 1: accumulate_func = fixed_len_sum<1, T>; break; case 2: accumulate_func = fixed_len_sum<2, T>; break; case 3: accumulate_func = fixed_len_sum<3, T>; break; case 4: accumulate_func = fixed_len_sum<4, T>; break; default: accumulate_func = dyn_len_sum<T>; } memset(ptr_out, 0, sizeof(T)*type_length); for (int i = 0; i < count; ++i) accumulate_func(ptr_a + i * stride, ptr_out, type_length); } void array_inner_float_host(uint64_t a, uint64_t b, uint64_t out, int count, int byte_stride_a, int byte_stride_b, int type_length) { const float *ptr_a = (const float *)(a); const float *ptr_b = (const float *)(b); float *ptr_out = (float *)(out); array_inner_host(ptr_a, ptr_b, ptr_out, count, byte_stride_a, byte_stride_b, type_length); } void array_inner_double_host(uint64_t a, uint64_t b, uint64_t out, int count, int byte_stride_a, int byte_stride_b, int type_length) { const double *ptr_a = (const double *)(a); const double *ptr_b = (const double *)(b); double *ptr_out = (double *)(out); array_inner_host(ptr_a, ptr_b, ptr_out, count, byte_stride_a, byte_stride_b, type_length); } void array_sum_float_host(uint64_t a, uint64_t out, int count, int byte_stride_a, int type_length) { const float *ptr_a = (const float *)(a); float *ptr_out = (float *)(out); array_sum_host(ptr_a, ptr_out, count, byte_stride_a, type_length); } void array_sum_double_host(uint64_t a, uint64_t out, int count, int byte_stride_a, int type_length) { const double *ptr_a = (const double *)(a); double *ptr_out = (double *)(out); array_sum_host(ptr_a, ptr_out, count, byte_stride_a, type_length); } #if !WP_ENABLE_CUDA void array_inner_float_device(uint64_t a, uint64_t b, uint64_t out, int count, int byte_stride_a, int byte_stride_b, int type_length) { } void array_inner_double_device(uint64_t a, uint64_t b, uint64_t out, int count, int byte_stride_a, int byte_stride_b, int type_length) { } void array_sum_float_device(uint64_t a, uint64_t out, int count, int byte_stride_a, int type_length) { } void array_sum_double_device(uint64_t a, uint64_t out, int count, int byte_stride_a, int type_length) { } #endif
4,460
C++
27.414013
117
0.574439
NVIDIA/warp/warp/native/sparse.cpp
#include "warp.h" #include <algorithm> #include <numeric> #include <vector> namespace { // Specialized is_zero and accumulation function for common block sizes // Rely on compiler to unroll loops when block size is known template <int N, typename T> bool bsr_fixed_block_is_zero(const T *val, int value_size) { return std::all_of(val, val + N, [](float v) { return v == T(0); }); } template <typename T> bool bsr_dyn_block_is_zero(const T *val, int value_size) { return std::all_of(val, val + value_size, [](float v) { return v == T(0); }); } template <int N, typename T> void bsr_fixed_block_accumulate(const T *val, T *sum, int value_size) { for (int i = 0; i < N; ++i, ++val, ++sum) { *sum += *val; } } template <typename T> void bsr_dyn_block_accumulate(const T *val, T *sum, int value_size) { for (int i = 0; i < value_size; ++i, ++val, ++sum) { *sum += *val; } } template <int Rows, int Cols, typename T> void bsr_fixed_block_transpose(const T *src, T *dest, int row_count, int col_count) { for (int r = 0; r < Rows; ++r) { for (int c = 0; c < Cols; ++c) { dest[c * Rows + r] = src[r * Cols + c]; } } } template <typename T> void bsr_dyn_block_transpose(const T *src, T *dest, int row_count, int col_count) { for (int r = 0; r < row_count; ++r) { for (int c = 0; c < col_count; ++c) { dest[c * row_count + r] = src[r * col_count + c]; } } } } // namespace template <typename T> int bsr_matrix_from_triplets_host(const int rows_per_block, const int cols_per_block, const int row_count, const int nnz, const int *tpl_rows, const int *tpl_columns, const T *tpl_values, int *bsr_offsets, int *bsr_columns, T *bsr_values) { // get specialized accumulator for common block sizes (1,1), (1,2), (1,3), // (2,2), (2,3), (3,3) const int block_size = rows_per_block * cols_per_block; void (*block_accumulate_func)(const T *, T *, int); bool (*block_is_zero_func)(const T *, int); switch (block_size) { case 1: block_accumulate_func = bsr_fixed_block_accumulate<1, T>; block_is_zero_func = bsr_fixed_block_is_zero<1, T>; break; case 2: block_accumulate_func = bsr_fixed_block_accumulate<2, T>; block_is_zero_func = bsr_fixed_block_is_zero<2, T>; break; case 3: block_accumulate_func = bsr_fixed_block_accumulate<3, T>; block_is_zero_func = bsr_fixed_block_is_zero<3, T>; break; case 4: block_accumulate_func = bsr_fixed_block_accumulate<4, T>; block_is_zero_func = bsr_fixed_block_is_zero<4, T>; break; case 6: block_accumulate_func = bsr_fixed_block_accumulate<6, T>; block_is_zero_func = bsr_fixed_block_is_zero<6, T>; break; case 9: block_accumulate_func = bsr_fixed_block_accumulate<9, T>; block_is_zero_func = bsr_fixed_block_is_zero<9, T>; break; default: block_accumulate_func = bsr_dyn_block_accumulate<T>; block_is_zero_func = bsr_dyn_block_is_zero<T>; } std::vector<int> block_indices(nnz); std::iota(block_indices.begin(), block_indices.end(), 0); // remove zero block indices if (tpl_values) { block_indices.erase(std::remove_if(block_indices.begin(), block_indices.end(), [block_is_zero_func, tpl_values, block_size](int i) { return block_is_zero_func(tpl_values + i * block_size, block_size); }), block_indices.end()); } // sort block indices according to lexico order std::sort(block_indices.begin(), block_indices.end(), [tpl_rows, tpl_columns](int i, int j) -> bool { return tpl_rows[i] < tpl_rows[j] || (tpl_rows[i] == tpl_rows[j] && tpl_columns[i] < tpl_columns[j]); }); // accumulate blocks at same locations, count blocks per row std::fill_n(bsr_offsets, row_count + 1, 0); int current_row = -1; int current_col = -1; // so that we get back to the start for the first block if (bsr_values) { bsr_values -= block_size; } for (int i = 0; i < block_indices.size(); ++i) { int idx = block_indices[i]; int row = tpl_rows[idx]; int col = tpl_columns[idx]; const T *val = tpl_values + idx * block_size; if (row == current_row && col == current_col) { if (bsr_values) { block_accumulate_func(val, bsr_values, block_size); } } else { *(bsr_columns++) = col; if (bsr_values) { bsr_values += block_size; std::copy_n(val, block_size, bsr_values); } bsr_offsets[row + 1]++; current_row = row; current_col = col; } } // build postfix sum of row counts std::partial_sum(bsr_offsets, bsr_offsets + row_count + 1, bsr_offsets); return bsr_offsets[row_count]; } template <typename T> void bsr_transpose_host(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, const int *bsr_offsets, const int *bsr_columns, const T *bsr_values, int *transposed_bsr_offsets, int *transposed_bsr_columns, T *transposed_bsr_values) { const int block_size = rows_per_block * cols_per_block; void (*block_transpose_func)(const T *, T *, int, int) = bsr_dyn_block_transpose<T>; switch (rows_per_block) { case 1: switch (cols_per_block) { case 1: block_transpose_func = bsr_fixed_block_transpose<1, 1, T>; break; case 2: block_transpose_func = bsr_fixed_block_transpose<1, 2, T>; break; case 3: block_transpose_func = bsr_fixed_block_transpose<1, 3, T>; break; } break; case 2: switch (cols_per_block) { case 1: block_transpose_func = bsr_fixed_block_transpose<2, 1, T>; break; case 2: block_transpose_func = bsr_fixed_block_transpose<2, 2, T>; break; case 3: block_transpose_func = bsr_fixed_block_transpose<2, 3, T>; break; } break; case 3: switch (cols_per_block) { case 1: block_transpose_func = bsr_fixed_block_transpose<3, 1, T>; break; case 2: block_transpose_func = bsr_fixed_block_transpose<3, 2, T>; break; case 3: block_transpose_func = bsr_fixed_block_transpose<3, 3, T>; break; } break; } std::vector<int> block_indices(nnz), bsr_rows(nnz); std::iota(block_indices.begin(), block_indices.end(), 0); // Fill row indices from offsets for (int row = 0; row < row_count; ++row) { std::fill(bsr_rows.begin() + bsr_offsets[row], bsr_rows.begin() + bsr_offsets[row + 1], row); } // sort block indices according to (transposed) lexico order std::sort(block_indices.begin(), block_indices.end(), [&bsr_rows, bsr_columns](int i, int j) -> bool { return bsr_columns[i] < bsr_columns[j] || (bsr_columns[i] == bsr_columns[j] && bsr_rows[i] < bsr_rows[j]); }); // Count blocks per column and transpose blocks std::fill_n(transposed_bsr_offsets, col_count + 1, 0); for (int i = 0; i < nnz; ++i) { int idx = block_indices[i]; int row = bsr_rows[idx]; int col = bsr_columns[idx]; ++transposed_bsr_offsets[col + 1]; transposed_bsr_columns[i] = row; const T *src_block = bsr_values + idx * block_size; T *dst_block = transposed_bsr_values + i * block_size; block_transpose_func(src_block, dst_block, rows_per_block, cols_per_block); } // build postfix sum of column counts std::partial_sum(transposed_bsr_offsets, transposed_bsr_offsets + col_count + 1, transposed_bsr_offsets); } WP_API int bsr_matrix_from_triplets_float_host(int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values) { return bsr_matrix_from_triplets_host( rows_per_block, cols_per_block, row_count, nnz, reinterpret_cast<const int *>(tpl_rows), reinterpret_cast<const int *>(tpl_columns), reinterpret_cast<const float *>(tpl_values), reinterpret_cast<int *>(bsr_offsets), reinterpret_cast<int *>(bsr_columns), reinterpret_cast<float *>(bsr_values)); } WP_API int bsr_matrix_from_triplets_double_host(int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values) { return bsr_matrix_from_triplets_host( rows_per_block, cols_per_block, row_count, nnz, reinterpret_cast<const int *>(tpl_rows), reinterpret_cast<const int *>(tpl_columns), reinterpret_cast<const double *>(tpl_values), reinterpret_cast<int *>(bsr_offsets), reinterpret_cast<int *>(bsr_columns), reinterpret_cast<double *>(bsr_values)); } WP_API void bsr_transpose_float_host(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values) { bsr_transpose_host(rows_per_block, cols_per_block, row_count, col_count, nnz, reinterpret_cast<const int *>(bsr_offsets), reinterpret_cast<const int *>(bsr_columns), reinterpret_cast<const float *>(bsr_values), reinterpret_cast<int *>(transposed_bsr_offsets), reinterpret_cast<int *>(transposed_bsr_columns), reinterpret_cast<float *>(transposed_bsr_values)); } WP_API void bsr_transpose_double_host(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values) { bsr_transpose_host(rows_per_block, cols_per_block, row_count, col_count, nnz, reinterpret_cast<const int *>(bsr_offsets), reinterpret_cast<const int *>(bsr_columns), reinterpret_cast<const double *>(bsr_values), reinterpret_cast<int *>(transposed_bsr_offsets), reinterpret_cast<int *>(transposed_bsr_columns), reinterpret_cast<double *>(transposed_bsr_values)); } #if !WP_ENABLE_CUDA WP_API int bsr_matrix_from_triplets_float_device(int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values) { return 0; } WP_API int bsr_matrix_from_triplets_double_device(int rows_per_block, int cols_per_block, int row_count, int nnz, uint64_t tpl_rows, uint64_t tpl_columns, uint64_t tpl_values, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values) { return 0; } WP_API void bsr_transpose_float_device(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values) { } WP_API void bsr_transpose_double_device(int rows_per_block, int cols_per_block, int row_count, int col_count, int nnz, uint64_t bsr_offsets, uint64_t bsr_columns, uint64_t bsr_values, uint64_t transposed_bsr_offsets, uint64_t transposed_bsr_columns, uint64_t transposed_bsr_values) { } #endif
12,954
C++
37.328402
118
0.558669
NVIDIA/warp/warp/native/fabric.h
#pragma once #include "builtin.h" namespace wp { struct fabricbucket_t { size_t index_start; size_t index_end; void* ptr; size_t* lengths; }; template <typename T> struct fabricarray_t { CUDA_CALLABLE inline fabricarray_t() : nbuckets(0), size(0) {} CUDA_CALLABLE inline bool empty() const { return !size; } fabricbucket_t* buckets; // array of fabricbucket_t on the correct device size_t nbuckets; size_t size; }; template <typename T> struct indexedfabricarray_t { CUDA_CALLABLE inline indexedfabricarray_t() : indices(), size(0) {} CUDA_CALLABLE inline bool empty() const { return !size; } fabricarray_t<T> fa; // TODO: we use 32-bit indices for consistency with other Warp indexed arrays, // but Fabric uses 64-bit indexing. int* indices; size_t size; }; #ifndef FABRICARRAY_USE_BINARY_SEARCH #define FABRICARRAY_USE_BINARY_SEARCH 1 #endif template <typename T> CUDA_CALLABLE inline const fabricbucket_t* fabricarray_find_bucket(const fabricarray_t<T>& fa, size_t i) { #if FABRICARRAY_USE_BINARY_SEARCH // use binary search to find the right bucket const fabricbucket_t* bucket = nullptr; size_t lo = 0; size_t hi = fa.nbuckets - 1; while (hi >= lo) { size_t mid = (lo + hi) >> 1; bucket = fa.buckets + mid; if (i >= bucket->index_end) lo = mid + 1; else if (i < bucket->index_start) hi = mid - 1; else return bucket; } return nullptr; #else // use linear search to find the right bucket const fabricbucket_t* bucket = fa.buckets; const fabricbucket_t* bucket_end = bucket + fa.nbuckets; for (; bucket < bucket_end; ++bucket) { if (i < bucket->index_end) return bucket; } return nullptr; #endif } // Compute the pointer to a fabricarray element at index i. // This function is similar to wp::index(), but the array data type doesn't need to be known at compile time. CUDA_CALLABLE inline void* fabricarray_element_ptr(const fabricarray_t<void>& fa, size_t i, size_t elem_size) { const fabricbucket_t* bucket = fabricarray_find_bucket(fa, i); size_t index_in_bucket = i - bucket->index_start; return (char*)bucket->ptr + index_in_bucket * elem_size; } template <typename T> CUDA_CALLABLE inline T& index(const fabricarray_t<T>& fa, size_t i) { const fabricbucket_t* bucket = fabricarray_find_bucket(fa, i); assert(bucket && "Fabric array index out of range"); size_t index_in_bucket = i - bucket->index_start; T& result = *((T*)bucket->ptr + index_in_bucket); FP_VERIFY_FWD_1(result) return result; } // indexing for fabric array of arrays template <typename T> CUDA_CALLABLE inline T& index(const fabricarray_t<T>& fa, size_t i, size_t j) { const fabricbucket_t* bucket = fabricarray_find_bucket(fa, i); assert(bucket && "Fabric array index out of range"); assert(bucket->lengths && "Missing inner array lengths"); size_t index_in_bucket = i - bucket->index_start; void* ptr = *((void**)bucket->ptr + index_in_bucket); size_t length = *((size_t*)bucket->lengths + index_in_bucket); assert(j < length && "Fabric array inner index out of range"); T& result = *((T*)ptr + j); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline array_t<T> view(fabricarray_t<T>& fa, size_t i) { const fabricbucket_t* bucket = fabricarray_find_bucket(fa, i); assert(bucket && "Fabric array index out of range"); assert(bucket->lengths && "Missing inner array lengths"); size_t index_in_bucket = i - bucket->index_start; void* ptr = *((void**)bucket->ptr + index_in_bucket); size_t length = *((size_t*)bucket->lengths + index_in_bucket); return array_t<T>((T*)ptr, int(length)); } template <typename T> CUDA_CALLABLE inline T& index(const indexedfabricarray_t<T>& ifa, size_t i) { // index lookup assert(i < ifa.size); i = size_t(ifa.indices[i]); const fabricbucket_t* bucket = fabricarray_find_bucket(ifa.fa, i); assert(bucket && "Fabric array index out of range"); size_t index_in_bucket = i - bucket->index_start; T& result = *((T*)bucket->ptr + index_in_bucket); FP_VERIFY_FWD_1(result) return result; } // indexing for fabric array of arrays template <typename T> CUDA_CALLABLE inline T& index(const indexedfabricarray_t<T>& ifa, size_t i, size_t j) { // index lookup assert(i < ifa.size); i = size_t(ifa.indices[i]); const fabricbucket_t* bucket = fabricarray_find_bucket(ifa.fa, i); assert(bucket && "Fabric array index out of range"); assert(bucket->lengths && "Missing inner array lengths"); size_t index_in_bucket = i - bucket->index_start; void* ptr = *((void**)bucket->ptr + index_in_bucket); size_t length = *((size_t*)bucket->lengths + index_in_bucket); assert(j < length && "Fabric array inner index out of range"); T& result = *((T*)ptr + j); FP_VERIFY_FWD_1(result) return result; } template <typename T> CUDA_CALLABLE inline array_t<T> view(indexedfabricarray_t<T>& ifa, size_t i) { // index lookup assert(i < ifa.size); i = size_t(ifa.indices[i]); const fabricbucket_t* bucket = fabricarray_find_bucket(ifa.fa, i); assert(bucket && "Fabric array index out of range"); assert(bucket->lengths && "Missing inner array lengths"); size_t index_in_bucket = i - bucket->index_start; void* ptr = *((void**)bucket->ptr + index_in_bucket); size_t length = *((size_t*)bucket->lengths + index_in_bucket); return array_t<T>((T*)ptr, int(length)); } } // namespace wp
5,750
C
24.113537
109
0.63913
NVIDIA/warp/warp/native/bvh.cpp
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include <vector> #include <algorithm> #include "bvh.h" #include "warp.h" #include "cuda_util.h" #include <map> using namespace wp; namespace wp { ///////////////////////////////////////////////////////////////////////////////////////////// class MedianBVHBuilder { public: void build(BVH& bvh, const vec3* lowers, const vec3* uppers, int n); private: bounds3 calc_bounds(const vec3* lowers, const vec3* uppers, const int* indices, int start, int end); int partition_median(const vec3* lowers, const vec3* uppers, int* indices, int start, int end, bounds3 range_bounds); int partition_midpoint(const vec3* lowers, const vec3* uppers, int* indices, int start, int end, bounds3 range_bounds); int partition_sah(const vec3* lowers, const vec3* uppers, int* indices, int start, int end, bounds3 range_bounds); int build_recursive(BVH& bvh, const vec3* lowers, const vec3* uppers, int* indices, int start, int end, int depth, int parent); }; ////////////////////////////////////////////////////////////////////// void MedianBVHBuilder::build(BVH& bvh, const vec3* lowers, const vec3* uppers, int n) { bvh.max_depth = 0; bvh.max_nodes = 2*n-1; bvh.node_lowers = new BVHPackedNodeHalf[bvh.max_nodes]; bvh.node_uppers = new BVHPackedNodeHalf[bvh.max_nodes]; bvh.node_parents = new int[bvh.max_nodes]; bvh.node_counts = NULL; // root is always in first slot for top down builders bvh.root = new int[1]; bvh.root[0] = 0; if (n == 0) return; std::vector<int> indices(n); for (int i=0; i < n; ++i) indices[i] = i; build_recursive(bvh, lowers, uppers, &indices[0], 0, n, 0, -1); } bounds3 MedianBVHBuilder::calc_bounds(const vec3* lowers, const vec3* uppers, const int* indices, int start, int end) { bounds3 u; for (int i=start; i < end; ++i) { u.add_point(lowers[indices[i]]); u.add_point(uppers[indices[i]]); } return u; } struct PartitionPredicateMedian { PartitionPredicateMedian(const vec3* lowers, const vec3* uppers, int a) : lowers(lowers), uppers(uppers), axis(a) {} bool operator()(int a, int b) const { vec3 a_center = 0.5f*(lowers[a] + uppers[a]); vec3 b_center = 0.5f*(lowers[b] + uppers[b]); return a_center[axis] < b_center[axis]; } const vec3* lowers; const vec3* uppers; int axis; }; int MedianBVHBuilder::partition_median(const vec3* lowers, const vec3* uppers, int* indices, int start, int end, bounds3 range_bounds) { assert(end-start >= 2); vec3 edges = range_bounds.edges(); int axis = longest_axis(edges); const int k = (start+end)/2; std::nth_element(&indices[start], &indices[k], &indices[end], PartitionPredicateMedian(lowers, uppers, axis)); return k; } struct PartitionPredictateMidPoint { PartitionPredictateMidPoint(const vec3* lowers, const vec3* uppers, int a, float m) : lowers(lowers), uppers(uppers), axis(a), mid(m) {} bool operator()(int index) const { vec3 center = 0.5f*(lowers[index] + uppers[index]); return center[axis] <= mid; } const vec3* lowers; const vec3* uppers; int axis; float mid; }; int MedianBVHBuilder::partition_midpoint(const vec3* lowers, const vec3* uppers, int* indices, int start, int end, bounds3 range_bounds) { assert(end-start >= 2); vec3 edges = range_bounds.edges(); vec3 center = range_bounds.center(); int axis = longest_axis(edges); float mid = center[axis]; int* upper = std::partition(indices+start, indices+end, PartitionPredictateMidPoint(lowers, uppers, axis, mid)); int k = upper-indices; // if we failed to split items then just split in the middle if (k == start || k == end) k = (start+end)/2; return k; } // disable std::sort workaround for macOS error #if 0 int MedianBVHBuilder::partition_sah(const bounds3* bounds, int* indices, int start, int end, bounds3 range_bounds) { assert(end-start >= 2); int n = end-start; vec3 edges = range_bounds.edges(); int longestAxis = longest_axis(edges); // sort along longest axis std::sort(&indices[0]+start, &indices[0]+end, PartitionPredicateMedian(&bounds[0], longestAxis)); // total area for range from [0, split] std::vector<float> left_areas(n); // total area for range from (split, end] std::vector<float> right_areas(n); bounds3 left; bounds3 right; // build cumulative bounds and area from left and right for (int i=0; i < n; ++i) { left = bounds_union(left, bounds[indices[start+i]]); right = bounds_union(right, bounds[indices[end-i-1]]); left_areas[i] = left.area(); right_areas[n-i-1] = right.area(); } float invTotalArea = 1.0f/range_bounds.area(); // find split point i that minimizes area(left[i]) + area(right[i]) int minSplit = 0; float minCost = FLT_MAX; for (int i=0; i < n; ++i) { float pBelow = left_areas[i]*invTotalArea; float pAbove = right_areas[i]*invTotalArea; float cost = pBelow*i + pAbove*(n-i); if (cost < minCost) { minCost = cost; minSplit = i; } } return start + minSplit + 1; } #endif int MedianBVHBuilder::build_recursive(BVH& bvh, const vec3* lowers, const vec3* uppers, int* indices, int start, int end, int depth, int parent) { assert(start < end); const int n = end-start; const int node_index = bvh.num_nodes++; assert(node_index < bvh.max_nodes); if (depth > bvh.max_depth) bvh.max_depth = depth; bounds3 b = calc_bounds(lowers, uppers, indices, start, end); const int kMaxItemsPerLeaf = 1; if (n <= kMaxItemsPerLeaf) { bvh.node_lowers[node_index] = make_node(b.lower, indices[start], true); bvh.node_uppers[node_index] = make_node(b.upper, indices[start], false); bvh.node_parents[node_index] = parent; } else { //int split = partition_midpoint(bounds, indices, start, end, b); int split = partition_median(lowers, uppers, indices, start, end, b); //int split = partition_sah(bounds, indices, start, end, b); if (split == start || split == end) { // partitioning failed, split down the middle split = (start+end)/2; } int left_child = build_recursive(bvh, lowers, uppers, indices, start, split, depth+1, node_index); int right_child = build_recursive(bvh, lowers, uppers, indices, split, end, depth+1, node_index); bvh.node_lowers[node_index] = make_node(b.lower, left_child, false); bvh.node_uppers[node_index] = make_node(b.upper, right_child, false); bvh.node_parents[node_index] = parent; } return node_index; } void bvh_refit_recursive(BVH& bvh, int index) { BVHPackedNodeHalf& lower = bvh.node_lowers[index]; BVHPackedNodeHalf& upper = bvh.node_uppers[index]; if (lower.b) { const int leaf_index = lower.i; // update leaf from items (vec3&)lower = bvh.item_lowers[leaf_index]; (vec3&)upper = bvh.item_uppers[leaf_index]; } else { int left_index = lower.i; int right_index = upper.i; bvh_refit_recursive(bvh, left_index); bvh_refit_recursive(bvh, right_index); // compute union of children const vec3& left_lower = (vec3&)bvh.node_lowers[left_index]; const vec3& left_upper = (vec3&)bvh.node_uppers[left_index]; const vec3& right_lower = (vec3&)bvh.node_lowers[right_index]; const vec3& right_upper = (vec3&)bvh.node_uppers[right_index]; // union of child bounds vec3 new_lower = min(left_lower, right_lower); vec3 new_upper = max(left_upper, right_upper); // write new BVH nodes (vec3&)lower = new_lower; (vec3&)upper = new_upper; } } void bvh_refit_host(BVH& bvh) { bvh_refit_recursive(bvh, 0); } } // namespace wp // making the class accessible from python namespace { // host-side copy of bvh descriptors, maps GPU bvh address (id) to a CPU desc std::map<uint64_t, BVH> g_bvh_descriptors; } // anonymous namespace namespace wp { bool bvh_get_descriptor(uint64_t id, BVH& bvh) { const auto& iter = g_bvh_descriptors.find(id); if (iter == g_bvh_descriptors.end()) return false; else bvh = iter->second; return true; } void bvh_add_descriptor(uint64_t id, const BVH& bvh) { g_bvh_descriptors[id] = bvh; } void bvh_rem_descriptor(uint64_t id) { g_bvh_descriptors.erase(id); } // create in-place given existing descriptor void bvh_create_host(vec3* lowers, vec3* uppers, int num_items, BVH& bvh) { memset(&bvh, 0, sizeof(BVH)); bvh.item_lowers = lowers; bvh.item_uppers = uppers; bvh.num_items = num_items; MedianBVHBuilder builder; builder.build(bvh, lowers, uppers, num_items); } void bvh_destroy_host(BVH& bvh) { delete[] bvh.node_lowers; delete[] bvh.node_uppers; delete[] bvh.node_parents; delete[] bvh.root; bvh.node_lowers = NULL; bvh.node_uppers = NULL; bvh.node_parents = NULL; bvh.root = NULL; bvh.max_nodes = 0; bvh.num_items = 0; } } // namespace wp uint64_t bvh_create_host(vec3* lowers, vec3* uppers, int num_items) { BVH* bvh = new BVH(); wp::bvh_create_host(lowers, uppers, num_items, *bvh); return (uint64_t)bvh; } void bvh_refit_host(uint64_t id) { BVH* bvh = (BVH*)(id); bvh_refit_host(*bvh); } void bvh_destroy_host(uint64_t id) { BVH* bvh = (BVH*)(id); bvh_destroy_host(*bvh); delete bvh; } // stubs for non-CUDA platforms #if !WP_ENABLE_CUDA uint64_t bvh_create_device(void* context, wp::vec3* lowers, wp::vec3* uppers, int num_items) { return 0; } void bvh_refit_device(uint64_t id) {} void bvh_destroy_device(uint64_t id) {} #endif // !WP_ENABLE_CUDA
10,474
C++
24.992556
144
0.621253
NVIDIA/warp/warp/native/mat.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "initializer_array.h" namespace wp { //---------------------------------------------------------- // mat template<typename T> struct quat_t; template<unsigned Rows, unsigned Cols, typename Type> struct mat_t { inline CUDA_CALLABLE mat_t() : data() {} inline CUDA_CALLABLE mat_t(Type s) { for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) data[i][j] = s; } template <typename OtherType> inline explicit CUDA_CALLABLE mat_t(const mat_t<Rows, Cols, OtherType>& other) { for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) data[i][j] = other.data[i][j]; } inline CUDA_CALLABLE mat_t(vec_t<2,Type> c0, vec_t<2,Type> c1) { data[0][0] = c0[0]; data[1][0] = c0[1]; data[0][1] = c1[0]; data[1][1] = c1[1]; } inline CUDA_CALLABLE mat_t(vec_t<3,Type> c0, vec_t<3,Type> c1, vec_t<3,Type> c2) { data[0][0] = c0[0]; data[1][0] = c0[1]; data[2][0] = c0[2]; data[0][1] = c1[0]; data[1][1] = c1[1]; data[2][1] = c1[2]; data[0][2] = c2[0]; data[1][2] = c2[1]; data[2][2] = c2[2]; } inline CUDA_CALLABLE mat_t(vec_t<4,Type> c0, vec_t<4,Type> c1, vec_t<4,Type> c2, vec_t<4,Type> c3) { data[0][0] = c0[0]; data[1][0] = c0[1]; data[2][0] = c0[2]; data[3][0] = c0[3]; data[0][1] = c1[0]; data[1][1] = c1[1]; data[2][1] = c1[2]; data[3][1] = c1[3]; data[0][2] = c2[0]; data[1][2] = c2[1]; data[2][2] = c2[2]; data[3][2] = c2[3]; data[0][3] = c3[0]; data[1][3] = c3[1]; data[2][3] = c3[2]; data[3][3] = c3[3]; } inline CUDA_CALLABLE mat_t(Type m00, Type m01, Type m10, Type m11) { data[0][0] = m00; data[1][0] = m10; data[0][1] = m01; data[1][1] = m11; } inline CUDA_CALLABLE mat_t( Type m00, Type m01, Type m02, Type m10, Type m11, Type m12, Type m20, Type m21, Type m22) { data[0][0] = m00; data[1][0] = m10; data[2][0] = m20; data[0][1] = m01; data[1][1] = m11; data[2][1] = m21; data[0][2] = m02; data[1][2] = m12; data[2][2] = m22; } inline CUDA_CALLABLE mat_t( Type m00, Type m01, Type m02, Type m03, Type m10, Type m11, Type m12, Type m13, Type m20, Type m21, Type m22, Type m23, Type m30, Type m31, Type m32, Type m33) { data[0][0] = m00; data[1][0] = m10; data[2][0] = m20; data[3][0] = m30; data[0][1] = m01; data[1][1] = m11; data[2][1] = m21; data[3][1] = m31; data[0][2] = m02; data[1][2] = m12; data[2][2] = m22; data[3][2] = m32; data[0][3] = m03; data[1][3] = m13; data[2][3] = m23; data[3][3] = m33; } // implemented in quat.h inline CUDA_CALLABLE mat_t(const vec_t<3,Type>& pos, const quat_t<Type>& rot, const vec_t<3,Type>& scale); inline CUDA_CALLABLE mat_t(const initializer_array<Rows * Cols, Type> &l) { for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { data[i][j] = l[i * Cols + j]; } } } inline CUDA_CALLABLE mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &l) { for (unsigned j=0; j < Cols; ++j) { for (unsigned i=0; i < Rows; ++i) { data[i][j] = l[j][i]; } } } CUDA_CALLABLE vec_t<Cols,Type> get_row(int index) const { return (vec_t<Cols,Type>&)data[index]; } CUDA_CALLABLE void set_row(int index, const vec_t<Cols,Type>& v) { (vec_t<Cols,Type>&)data[index] = v; } CUDA_CALLABLE vec_t<Rows,Type> get_col(int index) const { vec_t<Rows,Type> ret; for( unsigned i=0;i < Rows; ++i ) { ret[i] = data[i][index]; } return ret; } CUDA_CALLABLE void set_col(int index, const vec_t<Rows,Type>& v) { for( unsigned i=0;i < Rows; ++i ) { data[i][index] = v[i]; } } // row major storage assumed to be compatible with PyTorch Type data[Rows][Cols]; }; template<unsigned Rows, typename Type> inline CUDA_CALLABLE mat_t<Rows, Rows, Type> identity() { mat_t<Rows, Rows, Type> m; for( unsigned i=0; i < Rows; ++i ) { m.data[i][i] = Type(1); } return m; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE bool operator==(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b) { for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) if (a.data[i][j] != b.data[i][j]) return false; return true; } // negation: template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator - (mat_t<Rows,Cols,Type> a) { // NB: this constructor will initialize all ret's components to 0, which is // unnecessary... mat_t<Rows,Cols,Type> ret; for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) ret.data[i][j] = -a.data[i][j]; // Wonder if this does a load of copying when it returns... hopefully not as it's inlined? return ret; } template<unsigned Rows, unsigned Cols, typename Type> CUDA_CALLABLE inline mat_t<Rows,Cols,Type> pos(const mat_t<Rows,Cols,Type>& x) { return x; } template<unsigned Rows, unsigned Cols, typename Type> CUDA_CALLABLE inline void adj_pos(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret) { adj_x += adj_ret; } template<unsigned Rows, unsigned Cols, typename Type> CUDA_CALLABLE inline mat_t<Rows,Cols,Type> neg(const mat_t<Rows,Cols,Type>& x) { return -x; } template<unsigned Rows, unsigned Cols, typename Type> CUDA_CALLABLE inline void adj_neg(const mat_t<Rows,Cols,Type>& x, mat_t<Rows,Cols,Type>& adj_x, const mat_t<Rows,Cols,Type>& adj_ret) { adj_x -= adj_ret; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_add(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value) { mat_t<Rows,Cols,Type> m; for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) m.data[i][j] = atomic_add(&addr->data[i][j], value.data[i][j]); return m; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_min(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value) { mat_t<Rows,Cols,Type> m; for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) m.data[i][j] = atomic_min(&addr->data[i][j], value.data[i][j]); return m; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> atomic_max(mat_t<Rows,Cols,Type> * addr, mat_t<Rows,Cols,Type> value) { mat_t<Rows,Cols,Type> m; for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) m.data[i][j] = atomic_max(&addr->data[i][j], value.data[i][j]); return m; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_atomic_minmax( mat_t<Rows,Cols,Type> *addr, mat_t<Rows,Cols,Type> *adj_addr, const mat_t<Rows,Cols,Type> &value, mat_t<Rows,Cols,Type> &adj_value) { for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) adj_atomic_minmax(&addr->data[i][j], &adj_addr->data[i][j], value.data[i][j], adj_value.data[i][j]); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE vec_t<Cols,Type> extract(const mat_t<Rows,Cols,Type>& m, int row) { vec_t<Cols,Type> ret; for(unsigned i=0; i < Cols; ++i) { ret.c[i] = m.data[row][i]; } return ret; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE Type extract(const mat_t<Rows,Cols,Type>& m, int row, int col) { #ifndef NDEBUG if (row < 0 || row >= Rows) { printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__); assert(0); } if (col < 0 || col >= Cols) { printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__); assert(0); } #endif return m.data[row][col]; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE vec_t<Cols, Type>* index(mat_t<Rows,Cols,Type>& m, int row) { #ifndef NDEBUG if (row < 0 || row >= Rows) { printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__); assert(0); } #endif return reinterpret_cast<vec_t<Cols, Type>*>(&m.data[row]); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE Type* index(mat_t<Rows,Cols,Type>& m, int row, int col) { #ifndef NDEBUG if (row < 0 || row >= Rows) { printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__); assert(0); } if (col < 0 || col >= Cols) { printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__); assert(0); } #endif return &m.data[row][col]; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row, const mat_t<Rows,Cols,Type>& adj_m, int adj_row, const vec_t<Cols, Type>& adj_value) { // nop } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_index(const mat_t<Rows,Cols,Type>& m, int row, int col, const mat_t<Rows,Cols,Type>& adj_m, int adj_row, int adj_col, Type adj_value) { // nop } template<unsigned Rows, unsigned Cols, typename Type> inline bool CUDA_CALLABLE isfinite(const mat_t<Rows,Cols,Type>& m) { for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) if (!isfinite(m.data[i][j])) return false; return true; } template<unsigned Rows, unsigned Cols, typename Type> inline void CUDA_CALLABLE adj_isfinite(const mat_t<Rows,Cols,Type>& m, mat_t<Rows,Cols,Type>& adj_m, const bool &adj_ret) { } template<unsigned Rows, unsigned Cols, typename Type> inline bool CUDA_CALLABLE isnan(const mat_t<Rows,Cols,Type>& m) { for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) if (isnan(m.data[i][j])) return true; return false; } template<unsigned Rows, unsigned Cols, typename Type> inline void CUDA_CALLABLE adj_isnan(const mat_t<Rows,Cols,Type>& m, mat_t<Rows,Cols,Type>& adj_m, const bool &adj_ret) { } template<unsigned Rows, unsigned Cols, typename Type> inline bool CUDA_CALLABLE isinf(const mat_t<Rows,Cols,Type>& m) { for (unsigned i=0; i < Rows; ++i) for (unsigned j=0; j < Cols; ++j) if (isinf(m.data[i][j])) return true; return false; } template<unsigned Rows, unsigned Cols, typename Type> inline void CUDA_CALLABLE adj_isinf(const mat_t<Rows,Cols,Type>& m, mat_t<Rows,Cols,Type>& adj_m, const bool &adj_ret) { } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b) { mat_t<Rows,Cols,Type> t; for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { t.data[i][j] = a.data[i][j] + b.data[i][j]; } } return t; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b) { mat_t<Rows,Cols,Type> t; for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { t.data[i][j] = a.data[i][j] - b.data[i][j]; } } return t; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(const mat_t<Rows,Cols,Type>& a, Type b) { mat_t<Rows,Cols,Type> t; for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { t.data[i][j] = a.data[i][j]/b; } } return t; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> div(Type b, const mat_t<Rows,Cols,Type>& a) { mat_t<Rows,Cols,Type> t; for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { t.data[i][j] = b / a.data[i][j]; } } return t; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(const mat_t<Rows,Cols,Type>& a, Type b) { mat_t<Rows,Cols,Type> t; for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { t.data[i][j] = a.data[i][j]*b; } } return t; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> mul(Type b, const mat_t<Rows,Cols,Type>& a) { return mul(a,b); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*(Type b, const mat_t<Rows,Cols,Type>& a) { return mul(a,b); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> operator*( const mat_t<Rows,Cols,Type>& a, Type b) { return mul(a,b); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE vec_t<Rows,Type> mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b) { vec_t<Rows,Type> r = a.get_col(0)*b[0]; for( unsigned i=1; i < Cols; ++i ) { r += a.get_col(i)*b[i]; } return r; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE vec_t<Cols,Type> mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a) { vec_t<Cols,Type> r = a.get_row(0)*b[0]; for( unsigned i=1; i < Rows; ++i ) { r += a.get_row(i)*b[i]; } return r; } template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type> inline CUDA_CALLABLE mat_t<Rows,ColsOut,Type> mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b) { mat_t<Rows,ColsOut,Type> t(0); for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < ColsOut; ++j) { for (unsigned k=0; k < Cols; ++k) { t.data[i][j] += a.data[i][k]*b.data[k][j]; } } } return t; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE Type ddot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b) { // double dot product between a and b: Type r(0); for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { r += a.data[i][j] * b.data[i][j]; } } return r; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE Type tensordot(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b) { // corresponds to `np.tensordot()` with all axes being contracted return ddot(a, b); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Cols,Rows,Type> transpose(const mat_t<Rows,Cols,Type>& a) { mat_t<Cols,Rows,Type> t; for (unsigned i=0; i < Cols; ++i) { for (unsigned j=0; j < Rows; ++j) { t.data[i][j] = a.data[j][i]; } } return t; } // Only implementing determinants for 2x2, 3x3 and 4x4 matrices for now... template<typename Type> inline CUDA_CALLABLE Type determinant(const mat_t<2,2,Type>& m) { return m.data[0][0]*m.data[1][1] - m.data[1][0]*m.data[0][1]; } template<typename Type> inline CUDA_CALLABLE Type determinant(const mat_t<3,3,Type>& m) { return dot( vec_t<3,Type>(m.data[0][0],m.data[0][1],m.data[0][2]), cross( vec_t<3,Type>(m.data[1][0],m.data[1][1],m.data[1][2]), vec_t<3,Type>(m.data[2][0],m.data[2][1],m.data[2][2]) ) ); } template<typename Type> inline CUDA_CALLABLE Type determinant(const mat_t<4,4,Type>& m) { // adapted from USD GfMatrix4f::Inverse() Type x00, x01, x02, x03; Type x10, x11, x12, x13; Type x20, x21, x22, x23; Type x30, x31, x32, x33; double y01, y02, y03, y12, y13, y23; Type z00, z10, z20, z30; // Pickle 1st two columns of matrix into registers x00 = m.data[0][0]; x01 = m.data[0][1]; x10 = m.data[1][0]; x11 = m.data[1][1]; x20 = m.data[2][0]; x21 = m.data[2][1]; x30 = m.data[3][0]; x31 = m.data[3][1]; // Compute all six 2x2 determinants of 1st two columns y01 = x00*x11 - x10*x01; y02 = x00*x21 - x20*x01; y03 = x00*x31 - x30*x01; y12 = x10*x21 - x20*x11; y13 = x10*x31 - x30*x11; y23 = x20*x31 - x30*x21; // Pickle 2nd two columns of matrix into registers x02 = m.data[0][2]; x03 = m.data[0][3]; x12 = m.data[1][2]; x13 = m.data[1][3]; x22 = m.data[2][2]; x23 = m.data[2][3]; x32 = m.data[3][2]; x33 = m.data[3][3]; // Compute all six 2x2 determinants of 2nd two columns y01 = x02*x13 - x12*x03; y02 = x02*x23 - x22*x03; y03 = x02*x33 - x32*x03; y12 = x12*x23 - x22*x13; y13 = x12*x33 - x32*x13; y23 = x22*x33 - x32*x23; // Compute all 3x3 cofactors for 1st two columns z30 = x11*y02 - x21*y01 - x01*y12; z20 = x01*y13 - x11*y03 + x31*y01; z10 = x21*y03 - x31*y02 - x01*y23; z00 = x11*y23 - x21*y13 + x31*y12; // compute 4x4 determinant & its reciprocal double det = x30*z30 + x20*z20 + x10*z10 + x00*z00; return det; } template<unsigned Rows, typename Type> inline CUDA_CALLABLE Type trace(const mat_t<Rows,Rows,Type>& m) { Type ret = m.data[0][0]; for( unsigned i=1; i < Rows; ++i ) { ret += m.data[i][i]; } return ret; } template<unsigned Rows, typename Type> inline CUDA_CALLABLE vec_t<Rows, Type> get_diag(const mat_t<Rows,Rows,Type>& m) { vec_t<Rows, Type> ret; for( unsigned i=0; i < Rows; ++i ) { ret[i] = m.data[i][i]; } return ret; } // Only implementing inverses for 2x2, 3x3 and 4x4 matrices for now... template<typename Type> inline CUDA_CALLABLE mat_t<2,2,Type> inverse(const mat_t<2,2,Type>& m) { Type det = determinant(m); if (det > Type(kEps) || det < -Type(kEps)) { return mat_t<2,2,Type>( m.data[1][1], -m.data[0][1], -m.data[1][0], m.data[0][0])*(Type(1.0f)/det); } else { return mat_t<2,2,Type>(); } } template<typename Type> inline CUDA_CALLABLE mat_t<3,3,Type> inverse(const mat_t<3,3,Type>& m) { Type det = determinant(m); if (det != Type(0.0f)) { mat_t<3,3,Type> b; b.data[0][0] = m.data[1][1]*m.data[2][2] - m.data[1][2]*m.data[2][1]; b.data[1][0] = m.data[1][2]*m.data[2][0] - m.data[1][0]*m.data[2][2]; b.data[2][0] = m.data[1][0]*m.data[2][1] - m.data[1][1]*m.data[2][0]; b.data[0][1] = m.data[0][2]*m.data[2][1] - m.data[0][1]*m.data[2][2]; b.data[1][1] = m.data[0][0]*m.data[2][2] - m.data[0][2]*m.data[2][0]; b.data[2][1] = m.data[0][1]*m.data[2][0] - m.data[0][0]*m.data[2][1]; b.data[0][2] = m.data[0][1]*m.data[1][2] - m.data[0][2]*m.data[1][1]; b.data[1][2] = m.data[0][2]*m.data[1][0] - m.data[0][0]*m.data[1][2]; b.data[2][2] = m.data[0][0]*m.data[1][1] - m.data[0][1]*m.data[1][0]; return b*(Type(1.0f)/det); } else { return mat_t<3,3,Type>(); } } template<typename Type> inline CUDA_CALLABLE mat_t<4,4,Type> inverse(const mat_t<4,4,Type>& m) { // adapted from USD GfMatrix4f::Inverse() Type x00, x01, x02, x03; Type x10, x11, x12, x13; Type x20, x21, x22, x23; Type x30, x31, x32, x33; double y01, y02, y03, y12, y13, y23; Type z00, z10, z20, z30; Type z01, z11, z21, z31; double z02, z03, z12, z13, z22, z23, z32, z33; // Pickle 1st two columns of matrix into registers x00 = m.data[0][0]; x01 = m.data[0][1]; x10 = m.data[1][0]; x11 = m.data[1][1]; x20 = m.data[2][0]; x21 = m.data[2][1]; x30 = m.data[3][0]; x31 = m.data[3][1]; // Compute all six 2x2 determinants of 1st two columns y01 = x00*x11 - x10*x01; y02 = x00*x21 - x20*x01; y03 = x00*x31 - x30*x01; y12 = x10*x21 - x20*x11; y13 = x10*x31 - x30*x11; y23 = x20*x31 - x30*x21; // Pickle 2nd two columns of matrix into registers x02 = m.data[0][2]; x03 = m.data[0][3]; x12 = m.data[1][2]; x13 = m.data[1][3]; x22 = m.data[2][2]; x23 = m.data[2][3]; x32 = m.data[3][2]; x33 = m.data[3][3]; // Compute all 3x3 cofactors for 2nd two columns */ z33 = x02*y12 - x12*y02 + x22*y01; z23 = x12*y03 - x32*y01 - x02*y13; z13 = x02*y23 - x22*y03 + x32*y02; z03 = x22*y13 - x32*y12 - x12*y23; z32 = x13*y02 - x23*y01 - x03*y12; z22 = x03*y13 - x13*y03 + x33*y01; z12 = x23*y03 - x33*y02 - x03*y23; z02 = x13*y23 - x23*y13 + x33*y12; // Compute all six 2x2 determinants of 2nd two columns y01 = x02*x13 - x12*x03; y02 = x02*x23 - x22*x03; y03 = x02*x33 - x32*x03; y12 = x12*x23 - x22*x13; y13 = x12*x33 - x32*x13; y23 = x22*x33 - x32*x23; // Compute all 3x3 cofactors for 1st two columns z30 = x11*y02 - x21*y01 - x01*y12; z20 = x01*y13 - x11*y03 + x31*y01; z10 = x21*y03 - x31*y02 - x01*y23; z00 = x11*y23 - x21*y13 + x31*y12; z31 = x00*y12 - x10*y02 + x20*y01; z21 = x10*y03 - x30*y01 - x00*y13; z11 = x00*y23 - x20*y03 + x30*y02; z01 = x20*y13 - x30*y12 - x10*y23; // compute 4x4 determinant & its reciprocal double det = x30*z30 + x20*z20 + x10*z10 + x00*z00; if(fabs(det) > kEps) { mat_t<4,4,Type> invm; double rcp = 1.0 / det; // Multiply all 3x3 cofactors by reciprocal & transpose invm.data[0][0] = Type(z00*rcp); invm.data[0][1] = Type(z10*rcp); invm.data[1][0] = Type(z01*rcp); invm.data[0][2] = Type(z20*rcp); invm.data[2][0] = Type(z02*rcp); invm.data[0][3] = Type(z30*rcp); invm.data[3][0] = Type(z03*rcp); invm.data[1][1] = Type(z11*rcp); invm.data[1][2] = Type(z21*rcp); invm.data[2][1] = Type(z12*rcp); invm.data[1][3] = Type(z31*rcp); invm.data[3][1] = Type(z13*rcp); invm.data[2][2] = Type(z22*rcp); invm.data[2][3] = Type(z32*rcp); invm.data[3][2] = Type(z23*rcp); invm.data[3][3] = Type(z33*rcp); return invm; } else { return mat_t<4,4,Type>(); } } template<unsigned Rows,typename Type> inline CUDA_CALLABLE mat_t<Rows,Rows,Type> diag(const vec_t<Rows,Type>& d) { mat_t<Rows,Rows,Type> ret(Type(0)); for (unsigned i=0; i < Rows; ++i) { ret.data[i][i] = d[i]; } return ret; } template<unsigned Rows,unsigned Cols,typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b) { // col 0 = a * b[0] etc... mat_t<Rows,Cols,Type> ret; for (unsigned row=0; row < Rows; ++row) { for (unsigned col=0; col < Cols; ++col) // columns { ret.data[row][col] = a[row] * b[col]; } } return ret; } template<unsigned Cols,typename Type> inline CUDA_CALLABLE vec_t<Cols,Type> outer(Type a, const vec_t<Cols,Type>& b) { return mul(a, b); } template<unsigned Rows,typename Type> inline CUDA_CALLABLE vec_t<Rows,Type> outer(const vec_t<Rows,Type>& a, Type b) { return mul(a, b); } template<typename Type> inline CUDA_CALLABLE mat_t<3,3,Type> skew(const vec_t<3,Type>& a) { mat_t<3,3,Type> out( Type(0), -a[2], a[1], a[2], Type(0), -a[0], -a[1], a[0], Type(0) ); return out; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b) { mat_t<Rows,Cols,Type> t; for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { t.data[i][j] = a.data[i][j] * b.data[i][j]; } } return t; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE mat_t<Rows,Cols,Type> cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b) { mat_t<Rows,Cols,Type> t; for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { t.data[i][j] = a.data[i][j] / b.data[i][j]; } } return t; } template<typename Type> inline CUDA_CALLABLE vec_t<3,Type> transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v) { vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], Type(1))); return vec_t<3,Type>(out[0], out[1], out[2]); } template<typename Type> inline CUDA_CALLABLE vec_t<3,Type> transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v) { vec_t<4,Type> out = mul(m, vec_t<4,Type>(v[0], v[1], v[2], 0.f)); return vec_t<3,Type>(out[0], out[1], out[2]); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_extract(const mat_t<Rows,Cols,Type>& m, int row, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, const vec_t<Cols,Type>& adj_ret) { for( unsigned col=0; col < Cols; ++col ) adj_m.data[row][col] += adj_ret[col]; } template<unsigned Rows, unsigned Cols, typename Type> inline void CUDA_CALLABLE adj_extract(const mat_t<Rows,Cols,Type>& m, int row, int col, mat_t<Rows,Cols,Type>& adj_m, int& adj_row, int& adj_col, Type adj_ret) { #ifndef NDEBUG if (row < 0 || row > Rows) { printf("mat row index %d out of bounds at %s %d\n", row, __FILE__, __LINE__); assert(0); } if (col < 0 || col > Cols) { printf("mat col index %d out of bounds at %s %d\n", col, __FILE__, __LINE__); assert(0); } #endif adj_m.data[row][col] += adj_ret; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_outer(const vec_t<Rows,Type>& a, const vec_t<Cols,Type>& b, vec_t<Rows,Type>& adj_a, vec_t<Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret) { adj_a += mul(adj_ret, b); adj_b += mul(transpose(adj_ret), a); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_add(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { adj_a.data[i][j] += adj_ret.data[i][j]; adj_b.data[i][j] += adj_ret.data[i][j]; } } } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_sub(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { adj_a.data[i][j] += adj_ret.data[i][j]; adj_b.data[i][j] -= adj_ret.data[i][j]; } } } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_div(const mat_t<Rows,Cols,Type>& a, Type s, mat_t<Rows,Cols,Type>& adj_a, Type& adj_s, const mat_t<Rows,Cols,Type>& adj_ret) { adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2 for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { adj_a.data[i][j] += adj_ret.data[i][j] / s; } } } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_div(Type s, const mat_t<Rows,Cols,Type>& a, Type& adj_s, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret) { adj_s -= tensordot(a , adj_ret)/ (s * s); // - a / s^2 for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { adj_a.data[i][j] += s / adj_ret.data[i][j]; } } } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, Type b, mat_t<Rows,Cols,Type>& adj_a, Type& adj_b, const mat_t<Rows,Cols,Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { adj_a.data[i][j] += b*adj_ret.data[i][j]; adj_b += a.data[i][j]*adj_ret.data[i][j]; } } } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_mul(Type b, const mat_t<Rows,Cols,Type>& a, Type& adj_b, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Rows,Cols,Type>& adj_ret) { adj_mul(a, b, adj_a, adj_b, adj_ret); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_ddot(mat_t<Rows,Cols,Type> a, mat_t<Rows,Cols,Type> b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const Type adj_ret) { adj_a += b*adj_ret; adj_b += a*adj_ret; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const vec_t<Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, vec_t<Cols,Type>& adj_b, const vec_t<Rows,Type>& adj_ret) { adj_a += outer(adj_ret, b); adj_b += mul(transpose(a), adj_ret); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_mul(const vec_t<Rows,Type>& b, const mat_t<Rows,Cols,Type>& a, vec_t<Rows,Type>& adj_b, mat_t<Rows,Cols,Type>& adj_a, const vec_t<Cols,Type>& adj_ret) { adj_a += outer(b, adj_ret); adj_b += mul(adj_ret, transpose(a)); } template<unsigned Rows, unsigned Cols, unsigned ColsOut, typename Type> inline CUDA_CALLABLE void adj_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Cols,ColsOut,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Cols,ColsOut,Type>& adj_b, const mat_t<Rows,ColsOut,Type>& adj_ret) { adj_a += mul(adj_ret, transpose(b)); adj_b += mul(transpose(a), adj_ret); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_transpose(const mat_t<Rows,Cols,Type>& a, mat_t<Rows,Cols,Type>& adj_a, const mat_t<Cols,Rows,Type>& adj_ret) { adj_a += transpose(adj_ret); } template<unsigned Rows, typename Type> inline CUDA_CALLABLE void adj_trace(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, Type adj_ret) { for (unsigned i=0; i < Rows; ++i) adj_m.data[i][i] += adj_ret; } template<unsigned Rows, typename Type> inline CUDA_CALLABLE void adj_diag(const vec_t<Rows,Type>& d, vec_t<Rows,Type>& adj_d, const mat_t<Rows,Rows,Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) adj_d[i] += adj_ret.data[i][i]; } template<unsigned Rows, typename Type> inline CUDA_CALLABLE void adj_get_diag(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& adj_m, const vec_t<Rows,Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) adj_m.data[i][i] += adj_ret[i]; } template<typename Type> inline CUDA_CALLABLE void adj_determinant(const mat_t<2,2,Type>& m, mat_t<2,2,Type>& adj_m, Type adj_ret) { adj_m.data[0][0] += m.data[1][1]*adj_ret; adj_m.data[1][1] += m.data[0][0]*adj_ret; adj_m.data[0][1] -= m.data[1][0]*adj_ret; adj_m.data[1][0] -= m.data[0][1]*adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_determinant(const mat_t<3,3,Type>& m, mat_t<3,3,Type>& adj_m, Type adj_ret) { (vec_t<3,Type>&)adj_m.data[0] += cross(m.get_row(1), m.get_row(2))*adj_ret; (vec_t<3,Type>&)adj_m.data[1] += cross(m.get_row(2), m.get_row(0))*adj_ret; (vec_t<3,Type>&)adj_m.data[2] += cross(m.get_row(0), m.get_row(1))*adj_ret; } template<typename Type> inline CUDA_CALLABLE void adj_determinant(const mat_t<4,4,Type>& m, mat_t<4,4,Type>& adj_m, Type adj_ret) { // adapted from USD GfMatrix4f::Inverse() Type x00, x01, x02, x03; Type x10, x11, x12, x13; Type x20, x21, x22, x23; Type x30, x31, x32, x33; double y01, y02, y03, y12, y13, y23; Type z00, z10, z20, z30; Type z01, z11, z21, z31; double z02, z03, z12, z13, z22, z23, z32, z33; // Pickle 1st two columns of matrix into registers x00 = m.data[0][0]; x01 = m.data[0][1]; x10 = m.data[1][0]; x11 = m.data[1][1]; x20 = m.data[2][0]; x21 = m.data[2][1]; x30 = m.data[3][0]; x31 = m.data[3][1]; // Compute all six 2x2 determinants of 1st two columns y01 = x00*x11 - x10*x01; y02 = x00*x21 - x20*x01; y03 = x00*x31 - x30*x01; y12 = x10*x21 - x20*x11; y13 = x10*x31 - x30*x11; y23 = x20*x31 - x30*x21; // Pickle 2nd two columns of matrix into registers x02 = m.data[0][2]; x03 = m.data[0][3]; x12 = m.data[1][2]; x13 = m.data[1][3]; x22 = m.data[2][2]; x23 = m.data[2][3]; x32 = m.data[3][2]; x33 = m.data[3][3]; // Compute all 3x3 cofactors for 2nd two columns */ z33 = x02*y12 - x12*y02 + x22*y01; z23 = x12*y03 - x32*y01 - x02*y13; z13 = x02*y23 - x22*y03 + x32*y02; z03 = x22*y13 - x32*y12 - x12*y23; z32 = x13*y02 - x23*y01 - x03*y12; z22 = x03*y13 - x13*y03 + x33*y01; z12 = x23*y03 - x33*y02 - x03*y23; z02 = x13*y23 - x23*y13 + x33*y12; // Compute all six 2x2 determinants of 2nd two columns y01 = x02*x13 - x12*x03; y02 = x02*x23 - x22*x03; y03 = x02*x33 - x32*x03; y12 = x12*x23 - x22*x13; y13 = x12*x33 - x32*x13; y23 = x22*x33 - x32*x23; // Compute all 3x3 cofactors for 1st two columns z30 = x11*y02 - x21*y01 - x01*y12; z20 = x01*y13 - x11*y03 + x31*y01; z10 = x21*y03 - x31*y02 - x01*y23; z00 = x11*y23 - x21*y13 + x31*y12; z31 = x00*y12 - x10*y02 + x20*y01; z21 = x10*y03 - x30*y01 - x00*y13; z11 = x00*y23 - x20*y03 + x30*y02; z01 = x20*y13 - x30*y12 - x10*y23; // Multiply all 3x3 cofactors by adjoint & transpose adj_m.data[0][0] += Type(z00*adj_ret); adj_m.data[1][0] += Type(z10*adj_ret); adj_m.data[0][1] += Type(z01*adj_ret); adj_m.data[2][0] += Type(z20*adj_ret); adj_m.data[0][2] += Type(z02*adj_ret); adj_m.data[3][0] += Type(z30*adj_ret); adj_m.data[0][3] += Type(z03*adj_ret); adj_m.data[1][1] += Type(z11*adj_ret); adj_m.data[2][1] += Type(z21*adj_ret); adj_m.data[1][2] += Type(z12*adj_ret); adj_m.data[3][1] += Type(z31*adj_ret); adj_m.data[1][3] += Type(z13*adj_ret); adj_m.data[2][2] += Type(z22*adj_ret); adj_m.data[3][2] += Type(z32*adj_ret); adj_m.data[2][3] += Type(z23*adj_ret); adj_m.data[3][3] += Type(z33*adj_ret); } template<unsigned Rows, typename Type> inline CUDA_CALLABLE void adj_inverse(const mat_t<Rows,Rows,Type>& m, mat_t<Rows,Rows,Type>& ret, mat_t<Rows,Rows,Type>& adj_m, const mat_t<Rows,Rows,Type>& adj_ret) { // todo: how to cache this from the forward pass? mat_t<Rows,Rows,Type> invt = transpose(ret); // see https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf 2.2.3 adj_m -= mul(mul(invt, adj_ret), invt); } template<typename Type> inline CUDA_CALLABLE void adj_transform_point(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret) { vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 1.f); adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>()))); adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret); adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret); adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_transform_vector(const mat_t<4,4,Type>& m, const vec_t<3,Type>& v, mat_t<4,4,Type>& adj_m, vec_t<3,Type>& adj_v, const vec_t<3,Type>& adj_ret) { vec_t<4,Type> out = vec_t<4,Type>(v[0], v[1], v[2], 0.f); adj_m = add(adj_m, transpose(mat_t<4,4,Type>(adj_ret[0] * out, adj_ret[1] * out, adj_ret[2] * out, vec_t<4,Type>()))); adj_v[0] += dot(vec_t<3,Type>(m.data[0][0], m.data[1][0], m.data[2][0]), adj_ret); adj_v[1] += dot(vec_t<3,Type>(m.data[0][1], m.data[1][1], m.data[2][1]), adj_ret); adj_v[2] += dot(vec_t<3,Type>(m.data[0][2], m.data[1][2], m.data[2][2]), adj_ret); } template<typename Type> inline CUDA_CALLABLE void adj_skew(const vec_t<3,Type>& a, vec_t<3,Type>& adj_a, const mat_t<3,3,Type>& adj_ret) { adj_a[0] += adj_ret.data[2][1] - adj_ret.data[1][2]; adj_a[1] += adj_ret.data[0][2] - adj_ret.data[2][0]; adj_a[2] += adj_ret.data[1][0] - adj_ret.data[0][1]; } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_cw_mul(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret) { adj_a += cw_mul(b, adj_ret); adj_b += cw_mul(a, adj_ret); } template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_cw_div(const mat_t<Rows,Cols,Type>& a, const mat_t<Rows,Cols,Type>& b, mat_t<Rows,Cols,Type>& ret, mat_t<Rows,Cols,Type>& adj_a, mat_t<Rows,Cols,Type>& adj_b, const mat_t<Rows,Cols,Type>& adj_ret) { adj_a += cw_div(adj_ret, b); adj_b -= cw_mul(adj_ret, cw_div(ret, b)); } // adjoint for the constant constructor: template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_mat_t(Type s, Type& adj_s, const mat_t<Rows, Cols, Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { adj_s += adj_ret.data[i][j]; } } } // adjoint for the casting constructor: template<unsigned Rows, unsigned Cols, typename Type, typename OtherType> inline CUDA_CALLABLE void adj_mat_t(const mat_t<Rows, Cols, OtherType>& other, mat_t<Rows, Cols, OtherType>& adj_other, const mat_t<Rows, Cols, Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { adj_other.data[i][j] += adj_ret.data[i][j]; } } } // adjoint for the initializer_array scalar constructor: template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Rows * Cols, Type> &cmps, const initializer_array<Rows * Cols, Type*> &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret) { for (unsigned i=0; i < Rows; ++i) { for (unsigned j=0; j < Cols; ++j) { *adj_cmps[i * Cols + j] += adj_ret.data[i][j]; } } } template<typename Type> inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m10, Type m11, Type& adj_m00, Type& adj_m01, Type& adj_m10, Type& adj_m11, const mat_t<2, 2, Type>& adj_ret) { adj_m00 += adj_ret.data[0][0]; adj_m01 += adj_ret.data[0][1]; adj_m10 += adj_ret.data[1][0]; adj_m11 += adj_ret.data[1][1]; } template<typename Type> inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02, Type m10, Type m11, Type m12, Type m20, Type m21, Type m22, Type& a00, Type& a01, Type& a02, Type& a10, Type& a11, Type& a12, Type& a20, Type& a21, Type& a22, const mat_t<3, 3, Type>& adj_ret) { a00 += adj_ret.data[0][0]; a01 += adj_ret.data[0][1]; a02 += adj_ret.data[0][2]; a10 += adj_ret.data[1][0]; a11 += adj_ret.data[1][1]; a12 += adj_ret.data[1][2]; a20 += adj_ret.data[2][0]; a21 += adj_ret.data[2][1]; a22 += adj_ret.data[2][2]; } template<typename Type> inline CUDA_CALLABLE void adj_mat_t(Type m00, Type m01, Type m02, Type m03, Type m10, Type m11, Type m12, Type m13, Type m20, Type m21, Type m22, Type m23, Type m30, Type m31, Type m32, Type m33, Type& a00, Type& a01, Type& a02, Type& a03, Type& a10, Type& a11, Type& a12, Type& a13, Type& a20, Type& a21, Type& a22, Type& a23, Type& a30, Type& a31, Type& a32, Type& a33, const mat_t<4, 4, Type>& adj_ret) { a00 += adj_ret.data[0][0]; a01 += adj_ret.data[0][1]; a02 += adj_ret.data[0][2]; a03 += adj_ret.data[0][3]; a10 += adj_ret.data[1][0]; a11 += adj_ret.data[1][1]; a12 += adj_ret.data[1][2]; a13 += adj_ret.data[1][3]; a20 += adj_ret.data[2][0]; a21 += adj_ret.data[2][1]; a22 += adj_ret.data[2][2]; a23 += adj_ret.data[2][3]; a30 += adj_ret.data[3][0]; a31 += adj_ret.data[3][1]; a32 += adj_ret.data[3][2]; a33 += adj_ret.data[3][3]; } // adjoint for the initializer_array vector constructor: template<unsigned Rows, unsigned Cols, typename Type> inline CUDA_CALLABLE void adj_mat_t(const initializer_array<Cols, vec_t<Rows,Type> > &cmps, const initializer_array<Cols, vec_t<Rows,Type>* > &adj_cmps, const mat_t<Rows, Cols, Type>& adj_ret) { for (unsigned j=0; j < Cols; ++j) { for (unsigned i=0; i < Rows; ++i) { (*adj_cmps[j])[i] += adj_ret.data[i][j]; } } } template<typename Type> inline CUDA_CALLABLE void adj_mat_t(const vec_t<2,Type> &cmps0, const vec_t<2,Type> &cmps1, vec_t<2,Type> &adj_cmps0, vec_t<2,Type> &adj_cmps1, const mat_t<2, 2, Type>& adj_ret) { for (unsigned i=0; i < 2; ++i) { adj_cmps0[i] += adj_ret.data[i][0]; adj_cmps1[i] += adj_ret.data[i][1]; } } template<typename Type> inline CUDA_CALLABLE void adj_mat_t(const vec_t<3,Type> &cmps0, const vec_t<3,Type> &cmps1, const vec_t<3,Type> &cmps2, vec_t<3,Type> &adj_cmps0, vec_t<3,Type> &adj_cmps1, vec_t<3,Type> &adj_cmps2, const mat_t<3, 3, Type>& adj_ret) { for (unsigned i=0; i < 3; ++i) { adj_cmps0[i] += adj_ret.data[i][0]; adj_cmps1[i] += adj_ret.data[i][1]; adj_cmps2[i] += adj_ret.data[i][2]; } } template<typename Type> inline CUDA_CALLABLE void adj_mat_t(const vec_t<4,Type> &cmps0, const vec_t<4,Type> &cmps1, const vec_t<4,Type> &cmps2, const vec_t<4,Type> &cmps3, vec_t<4,Type> &adj_cmps0, vec_t<4,Type> &adj_cmps1, vec_t<4,Type> &adj_cmps2, vec_t<4,Type> &adj_cmps3, const mat_t<4, 4, Type>& adj_ret) { for (unsigned i=0; i < 4; ++i) { adj_cmps0[i] += adj_ret.data[i][0]; adj_cmps1[i] += adj_ret.data[i][1]; adj_cmps2[i] += adj_ret.data[i][2]; adj_cmps3[i] += adj_ret.data[i][3]; } } template<unsigned Rows, unsigned Cols, typename Type> CUDA_CALLABLE inline mat_t<Rows, Cols, Type> lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t) { return a*(Type(1)-t) + b*t; } template<unsigned Rows, unsigned Cols, typename Type> CUDA_CALLABLE inline void adj_lerp(const mat_t<Rows, Cols, Type>& a, const mat_t<Rows, Cols, Type>& b, Type t, mat_t<Rows, Cols, Type>& adj_a, mat_t<Rows, Cols, Type>& adj_b, Type& adj_t, const mat_t<Rows, Cols, Type>& adj_ret) { adj_a += adj_ret*(Type(1)-t); adj_b += adj_ret*t; adj_t += tensordot(b, adj_ret) - tensordot(a, adj_ret); } // for integral types we do not accumulate gradients template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int8>* buf, const mat_t<Rows, Cols, int8> &value) { } template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint8>* buf, const mat_t<Rows, Cols, uint8> &value) { } template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int16>* buf, const mat_t<Rows, Cols, int16> &value) { } template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint16>* buf, const mat_t<Rows, Cols, uint16> &value) { } template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int32>* buf, const mat_t<Rows, Cols, int32> &value) { } template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint32>* buf, const mat_t<Rows, Cols, uint32> &value) { } template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, int64>* buf, const mat_t<Rows, Cols, int64> &value) { } template<unsigned Rows, unsigned Cols> CUDA_CALLABLE inline void adj_atomic_add(mat_t<Rows, Cols, uint64>* buf, const mat_t<Rows, Cols, uint64> &value) { } using mat22h = mat_t<2,2,half>; using mat33h = mat_t<3,3,half>; using mat44h = mat_t<4,4,half>; using mat22 = mat_t<2,2,float>; using mat33 = mat_t<3,3,float>; using mat44 = mat_t<4,4,float>; using mat22f = mat_t<2,2,float>; using mat33f = mat_t<3,3,float>; using mat44f = mat_t<4,4,float>; using mat22d = mat_t<2,2,double>; using mat33d = mat_t<3,3,double>; using mat44d = mat_t<4,4,double>; inline CUDA_CALLABLE void adj_mat22(vec2 c0, vec2 c1, vec2& a0, vec2& a1, const mat22& adj_ret) { a0 += adj_ret.get_col(0); a1 += adj_ret.get_col(1); } inline CUDA_CALLABLE void adj_mat22(float m00, float m01, float m10, float m11, float& adj_m00, float& adj_m01, float& adj_m10, float& adj_m11, const mat22& adj_ret) { adj_m00 += adj_ret.data[0][0]; adj_m01 += adj_ret.data[0][1]; adj_m10 += adj_ret.data[1][0]; adj_m11 += adj_ret.data[1][1]; } inline CUDA_CALLABLE void adj_mat33(vec3 c0, vec3 c1, vec3 c2, vec3& a0, vec3& a1, vec3& a2, const mat33& adj_ret) { // column constructor a0 += adj_ret.get_col(0); a1 += adj_ret.get_col(1); a2 += adj_ret.get_col(2); } inline CUDA_CALLABLE void adj_mat33(float m00, float m01, float m02, float m10, float m11, float m12, float m20, float m21, float m22, float& a00, float& a01, float& a02, float& a10, float& a11, float& a12, float& a20, float& a21, float& a22, const mat33& adj_ret) { a00 += adj_ret.data[0][0]; a01 += adj_ret.data[0][1]; a02 += adj_ret.data[0][2]; a10 += adj_ret.data[1][0]; a11 += adj_ret.data[1][1]; a12 += adj_ret.data[1][2]; a20 += adj_ret.data[2][0]; a21 += adj_ret.data[2][1]; a22 += adj_ret.data[2][2]; } inline CUDA_CALLABLE void adj_mat44( vec4 c0, vec4 c1, vec4 c2, vec4 c3, vec4& a0, vec4& a1, vec4& a2, vec4& a3, const mat44& adj_ret) { // column constructor a0 += adj_ret.get_col(0); a1 += adj_ret.get_col(1); a2 += adj_ret.get_col(2); a3 += adj_ret.get_col(3); } inline CUDA_CALLABLE void adj_mat44(float m00, float m01, float m02, float m03, float m10, float m11, float m12, float m13, float m20, float m21, float m22, float m23, float m30, float m31, float m32, float m33, float& a00, float& a01, float& a02, float& a03, float& a10, float& a11, float& a12, float& a13, float& a20, float& a21, float& a22, float& a23, float& a30, float& a31, float& a32, float& a33, const mat44& adj_ret) { a00 += adj_ret.data[0][0]; a01 += adj_ret.data[0][1]; a02 += adj_ret.data[0][2]; a03 += adj_ret.data[0][3]; a10 += adj_ret.data[1][0]; a11 += adj_ret.data[1][1]; a12 += adj_ret.data[1][2]; a13 += adj_ret.data[1][3]; a20 += adj_ret.data[2][0]; a21 += adj_ret.data[2][1]; a22 += adj_ret.data[2][2]; a23 += adj_ret.data[2][3]; a30 += adj_ret.data[3][0]; a31 += adj_ret.data[3][1]; a32 += adj_ret.data[3][2]; a33 += adj_ret.data[3][3]; } } // namespace wp
48,690
C
30.494825
285
0.571822
NVIDIA/warp/warp/native/hashgrid.cpp
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "warp.h" #include "cuda_util.h" #include "hashgrid.h" #include "sort.h" #include "string.h" using namespace wp; #include <map> namespace { // host-side copy of mesh descriptors, maps GPU mesh address (id) to a CPU desc std::map<uint64_t, HashGrid> g_hash_grid_descriptors; } // anonymous namespace namespace wp { bool hash_grid_get_descriptor(uint64_t id, HashGrid& grid) { const auto& iter = g_hash_grid_descriptors.find(id); if (iter == g_hash_grid_descriptors.end()) return false; else grid = iter->second; return true; } void hash_grid_add_descriptor(uint64_t id, const HashGrid& grid) { g_hash_grid_descriptors[id] = grid; } void hash_grid_rem_descriptor(uint64_t id) { g_hash_grid_descriptors.erase(id); } // implemented in hashgrid.cu void hash_grid_rebuild_device(const HashGrid& grid, const wp::array_t<wp::vec3>& points); } // namespace wp // host methods uint64_t hash_grid_create_host(int dim_x, int dim_y, int dim_z) { HashGrid* grid = new HashGrid(); memset(grid, 0, sizeof(HashGrid)); grid->dim_x = dim_x; grid->dim_y = dim_y; grid->dim_z = dim_z; const int num_cells = dim_x*dim_y*dim_z; grid->cell_starts = (int*)alloc_host(num_cells*sizeof(int)); grid->cell_ends = (int*)alloc_host(num_cells*sizeof(int)); return (uint64_t)(grid); } void hash_grid_destroy_host(uint64_t id) { HashGrid* grid = (HashGrid*)(id); free_host(grid->point_ids); free_host(grid->point_cells); free_host(grid->cell_starts); free_host(grid->cell_ends); delete grid; } void hash_grid_reserve_host(uint64_t id, int num_points) { HashGrid* grid = (HashGrid*)(id); if (num_points > grid->max_points) { free_host(grid->point_cells); free_host(grid->point_ids); const int num_to_alloc = num_points*3/2; grid->point_cells = (int*)alloc_host(2*num_to_alloc*sizeof(int)); // *2 for auxiliary radix buffers grid->point_ids = (int*)alloc_host(2*num_to_alloc*sizeof(int)); // *2 for auxiliary radix buffers grid->max_points = num_to_alloc; } grid->num_points = num_points; } void hash_grid_update_host(uint64_t id, float cell_width, const wp::array_t<wp::vec3>* points) { // Python enforces this, but let's be defensive anyways if (!points || points->ndim != 1) { fprintf(stderr, "Warp error: Invalid points array passed to %s\n", __FUNCTION__); return; } if (!id) { fprintf(stderr, "Warp error: Invalid grid passed to %s\n", __FUNCTION__); return; } HashGrid* grid = (HashGrid*)(id); int num_points = points->shape[0]; hash_grid_reserve_host(id, num_points); grid->cell_width = cell_width; grid->cell_width_inv = 1.0f / cell_width; // calculate cell for each position for (int i=0; i < num_points; ++i) { const vec3& point = wp::index(*points, i); grid->point_cells[i] = hash_grid_index(*grid, point); grid->point_ids[i] = i; } // sort indices radix_sort_pairs_host(grid->point_cells, grid->point_ids, num_points); const int num_cells = grid->dim_x * grid->dim_y * grid->dim_z; memset(grid->cell_starts, 0, sizeof(int) * num_cells); memset(grid->cell_ends, 0, sizeof(int) * num_cells); // compute cell start / end for (int i=0; i < num_points; ++i) { // scan the particle-cell array to find the start and end const int c = grid->point_cells[i]; if (i == 0) grid->cell_starts[c] = 0; else { const int p = grid->point_cells[i-1]; if (c != p) { grid->cell_starts[c] = i; grid->cell_ends[p] = i; } } if (i == num_points - 1) { grid->cell_ends[c] = i + 1; } } } // device methods uint64_t hash_grid_create_device(void* context, int dim_x, int dim_y, int dim_z) { ContextGuard guard(context); HashGrid grid; memset(&grid, 0, sizeof(HashGrid)); grid.context = context ? context : cuda_context_get_current(); grid.dim_x = dim_x; grid.dim_y = dim_y; grid.dim_z = dim_z; const int num_cells = dim_x*dim_y*dim_z; grid.cell_starts = (int*)alloc_device(WP_CURRENT_CONTEXT, num_cells*sizeof(int)); grid.cell_ends = (int*)alloc_device(WP_CURRENT_CONTEXT, num_cells*sizeof(int)); // upload to device HashGrid* grid_device = (HashGrid*)(alloc_device(WP_CURRENT_CONTEXT, sizeof(HashGrid))); memcpy_h2d(WP_CURRENT_CONTEXT, grid_device, &grid, sizeof(HashGrid)); uint64_t grid_id = (uint64_t)(grid_device); hash_grid_add_descriptor(grid_id, grid); return grid_id; } void hash_grid_destroy_device(uint64_t id) { HashGrid grid; if (hash_grid_get_descriptor(id, grid)) { ContextGuard guard(grid.context); free_device(WP_CURRENT_CONTEXT, grid.point_ids); free_device(WP_CURRENT_CONTEXT, grid.point_cells); free_device(WP_CURRENT_CONTEXT, grid.cell_starts); free_device(WP_CURRENT_CONTEXT, grid.cell_ends); free_device(WP_CURRENT_CONTEXT, (HashGrid*)id); hash_grid_rem_descriptor(id); } } void hash_grid_reserve_device(uint64_t id, int num_points) { HashGrid grid; if (hash_grid_get_descriptor(id, grid)) { if (num_points > grid.max_points) { ContextGuard guard(grid.context); free_device(WP_CURRENT_CONTEXT, grid.point_cells); free_device(WP_CURRENT_CONTEXT, grid.point_ids); const int num_to_alloc = num_points*3/2; grid.point_cells = (int*)alloc_device(WP_CURRENT_CONTEXT, 2*num_to_alloc*sizeof(int)); // *2 for auxiliary radix buffers grid.point_ids = (int*)alloc_device(WP_CURRENT_CONTEXT, 2*num_to_alloc*sizeof(int)); // *2 for auxiliary radix buffers grid.max_points = num_to_alloc; // ensure we pre-size our sort routine to avoid // allocations during graph capture radix_sort_reserve(WP_CURRENT_CONTEXT, num_to_alloc); // update device side grid descriptor, todo: this is // slightly redundant since it is performed again // inside hash_grid_update_device(), but since // reserve can be called from Python we need to make // sure it is consistent memcpy_h2d(WP_CURRENT_CONTEXT, (HashGrid*)id, &grid, sizeof(HashGrid)); // update host side grid descriptor hash_grid_add_descriptor(id, grid); } } } void hash_grid_update_device(uint64_t id, float cell_width, const wp::array_t<wp::vec3>* points) { // Python enforces this, but let's be defensive anyways if (!points || points->ndim != 1) { fprintf(stderr, "Warp error: Invalid points array passed to %s\n", __FUNCTION__); return; } int num_points = points->shape[0]; // ensure we have enough memory reserved for update // this must be done before retrieving the descriptor // below since it may update it hash_grid_reserve_device(id, num_points); // host grid must be static so that we can // perform host->device memcpy from this variable // and have it safely recorded inside CUDA graphs static HashGrid grid; if (hash_grid_get_descriptor(id, grid)) { ContextGuard guard(grid.context); grid.num_points = num_points; grid.cell_width = cell_width; grid.cell_width_inv = 1.0f / cell_width; hash_grid_rebuild_device(grid, *points); // update device side grid descriptor memcpy_h2d(WP_CURRENT_CONTEXT, (HashGrid*)id, &grid, sizeof(HashGrid)); // update host side grid descriptor hash_grid_add_descriptor(id, grid); } } #if !WP_ENABLE_CUDA namespace wp { void hash_grid_rebuild_device(const HashGrid& grid, const wp::array_t<wp::vec3>& points) { } } // namespace wp #endif // !WP_ENABLE_CUDA
8,429
C++
26.913907
133
0.624748
NVIDIA/warp/warp/native/warp.cpp
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "warp.h" #include "scan.h" #include "array.h" #include "exports.h" #include "error.h" #include <stdlib.h> #include <string.h> uint16_t float_to_half_bits(float x) { // adapted from Fabien Giesen's post: https://gist.github.com/rygorous/2156668 union fp32 { uint32_t u; float f; struct { unsigned int mantissa : 23; unsigned int exponent : 8; unsigned int sign : 1; }; }; fp32 f; f.f = x; fp32 f32infty = { 255 << 23 }; fp32 f16infty = { 31 << 23 }; fp32 magic = { 15 << 23 }; uint32_t sign_mask = 0x80000000u; uint32_t round_mask = ~0xfffu; uint16_t u; uint32_t sign = f.u & sign_mask; f.u ^= sign; // NOTE all the integer compares in this function can be safely // compiled into signed compares since all operands are below // 0x80000000. Important if you want fast straight SSE2 code // (since there's no unsigned PCMPGTD). if (f.u >= f32infty.u) // Inf or NaN (all exponent bits set) u = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf else // (De)normalized number or zero { f.u &= round_mask; f.f *= magic.f; f.u -= round_mask; if (f.u > f16infty.u) f.u = f16infty.u; // Clamp to signed infinity if overflowed u = f.u >> 13; // Take the bits! } u |= sign >> 16; return u; } float half_bits_to_float(uint16_t u) { // adapted from Fabien Giesen's post: https://gist.github.com/rygorous/2156668 union fp32 { uint32_t u; float f; struct { unsigned int mantissa : 23; unsigned int exponent : 8; unsigned int sign : 1; }; }; static const fp32 magic = { 113 << 23 }; static const uint32_t shifted_exp = 0x7c00 << 13; // exponent mask after shift fp32 o; o.u = (u & 0x7fff) << 13; // exponent/mantissa bits uint32_t exp = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (u & 0x8000) << 16; // sign bit return o.f; } int init() { #if WP_ENABLE_CUDA int cuda_init(); // note: it's safe to proceed even if CUDA initialization failed cuda_init(); #endif return 0; } void shutdown() { } const char* get_error_string() { return wp::get_error_string(); } void set_error_output_enabled(int enable) { wp::set_error_output_enabled(bool(enable)); } int is_error_output_enabled() { return int(wp::is_error_output_enabled()); } int is_cuda_enabled() { return int(WP_ENABLE_CUDA); } int is_cuda_compatibility_enabled() { return int(WP_ENABLE_CUDA_COMPATIBILITY); } int is_cutlass_enabled() { return int(WP_ENABLE_CUTLASS); } int is_debug_enabled() { return int(WP_ENABLE_DEBUG); } void* alloc_host(size_t s) { return malloc(s); } void free_host(void* ptr) { free(ptr); } bool memcpy_h2h(void* dest, void* src, size_t n) { memcpy(dest, src, n); return true; } void memset_host(void* dest, int value, size_t n) { if ((n%4) > 0) { memset(dest, value, n); } else { const size_t num_words = n/4; for (size_t i=0; i < num_words; ++i) ((int*)dest)[i] = value; } } // fill memory buffer with a value: this is a faster memtile variant // for types bigger than one byte, but requires proper alignment of dst template <typename T> void memtile_value_host(T* dst, T value, size_t n) { while (n--) *dst++ = value; } void memtile_host(void* dst, const void* src, size_t srcsize, size_t n) { size_t dst_addr = reinterpret_cast<size_t>(dst); size_t src_addr = reinterpret_cast<size_t>(src); // try memtile_value first because it should be faster, but we need to ensure proper alignment if (srcsize == 8 && (dst_addr & 7) == 0 && (src_addr & 7) == 0) memtile_value_host(reinterpret_cast<int64_t*>(dst), *reinterpret_cast<const int64_t*>(src), n); else if (srcsize == 4 && (dst_addr & 3) == 0 && (src_addr & 3) == 0) memtile_value_host(reinterpret_cast<int32_t*>(dst), *reinterpret_cast<const int32_t*>(src), n); else if (srcsize == 2 && (dst_addr & 1) == 0 && (src_addr & 1) == 0) memtile_value_host(reinterpret_cast<int16_t*>(dst), *reinterpret_cast<const int16_t*>(src), n); else if (srcsize == 1) memset(dst, *reinterpret_cast<const int8_t*>(src), n); else { // generic version while (n--) { memcpy(dst, src, srcsize); dst = (int8_t*)dst + srcsize; } } } void array_scan_int_host(uint64_t in, uint64_t out, int len, bool inclusive) { scan_host((const int*)in, (int*)out, len, inclusive); } void array_scan_float_host(uint64_t in, uint64_t out, int len, bool inclusive) { scan_host((const float*)in, (float*)out, len, inclusive); } static void array_copy_nd(void* dst, const void* src, const int* dst_strides, const int* src_strides, const int*const* dst_indices, const int*const* src_indices, const int* shape, int ndim, int elem_size) { if (ndim == 1) { for (int i = 0; i < shape[0]; i++) { int src_idx = src_indices[0] ? src_indices[0][i] : i; int dst_idx = dst_indices[0] ? dst_indices[0][i] : i; const char* p = (const char*)src + src_idx * src_strides[0]; char* q = (char*)dst + dst_idx * dst_strides[0]; // copy element memcpy(q, p, elem_size); } } else { for (int i = 0; i < shape[0]; i++) { int src_idx = src_indices[0] ? src_indices[0][i] : i; int dst_idx = dst_indices[0] ? dst_indices[0][i] : i; const char* p = (const char*)src + src_idx * src_strides[0]; char* q = (char*)dst + dst_idx * dst_strides[0]; // recurse on next inner dimension array_copy_nd(q, p, dst_strides + 1, src_strides + 1, dst_indices + 1, src_indices + 1, shape + 1, ndim - 1, elem_size); } } } static void array_copy_to_fabric(wp::fabricarray_t<void>& dst, const void* src_data, int src_stride, const int* src_indices, int elem_size) { const int8_t* src_ptr = static_cast<const int8_t*>(src_data); if (src_indices) { // copy from indexed array for (size_t i = 0; i < dst.nbuckets; i++) { const wp::fabricbucket_t& bucket = dst.buckets[i]; int8_t* dst_ptr = static_cast<int8_t*>(bucket.ptr); size_t bucket_size = bucket.index_end - bucket.index_start; for (size_t j = 0; j < bucket_size; j++) { int idx = *src_indices; memcpy(dst_ptr, src_ptr + idx * elem_size, elem_size); dst_ptr += elem_size; ++src_indices; } } } else { if (src_stride == elem_size) { // copy from contiguous array for (size_t i = 0; i < dst.nbuckets; i++) { const wp::fabricbucket_t& bucket = dst.buckets[i]; size_t num_bytes = (bucket.index_end - bucket.index_start) * elem_size; memcpy(bucket.ptr, src_ptr, num_bytes); src_ptr += num_bytes; } } else { // copy from strided array for (size_t i = 0; i < dst.nbuckets; i++) { const wp::fabricbucket_t& bucket = dst.buckets[i]; int8_t* dst_ptr = static_cast<int8_t*>(bucket.ptr); size_t bucket_size = bucket.index_end - bucket.index_start; for (size_t j = 0; j < bucket_size; j++) { memcpy(dst_ptr, src_ptr, elem_size); src_ptr += src_stride; dst_ptr += elem_size; } } } } } static void array_copy_from_fabric(const wp::fabricarray_t<void>& src, void* dst_data, int dst_stride, const int* dst_indices, int elem_size) { int8_t* dst_ptr = static_cast<int8_t*>(dst_data); if (dst_indices) { // copy to indexed array for (size_t i = 0; i < src.nbuckets; i++) { const wp::fabricbucket_t& bucket = src.buckets[i]; const int8_t* src_ptr = static_cast<const int8_t*>(bucket.ptr); size_t bucket_size = bucket.index_end - bucket.index_start; for (size_t j = 0; j < bucket_size; j++) { int idx = *dst_indices; memcpy(dst_ptr + idx * elem_size, src_ptr, elem_size); src_ptr += elem_size; ++dst_indices; } } } else { if (dst_stride == elem_size) { // copy to contiguous array for (size_t i = 0; i < src.nbuckets; i++) { const wp::fabricbucket_t& bucket = src.buckets[i]; size_t num_bytes = (bucket.index_end - bucket.index_start) * elem_size; memcpy(dst_ptr, bucket.ptr, num_bytes); dst_ptr += num_bytes; } } else { // copy to strided array for (size_t i = 0; i < src.nbuckets; i++) { const wp::fabricbucket_t& bucket = src.buckets[i]; const int8_t* src_ptr = static_cast<const int8_t*>(bucket.ptr); size_t bucket_size = bucket.index_end - bucket.index_start; for (size_t j = 0; j < bucket_size; j++) { memcpy(dst_ptr, src_ptr, elem_size); dst_ptr += dst_stride; src_ptr += elem_size; } } } } } static void array_copy_fabric_to_fabric(wp::fabricarray_t<void>& dst, const wp::fabricarray_t<void>& src, int elem_size) { wp::fabricbucket_t* dst_bucket = dst.buckets; const wp::fabricbucket_t* src_bucket = src.buckets; int8_t* dst_ptr = static_cast<int8_t*>(dst_bucket->ptr); const int8_t* src_ptr = static_cast<const int8_t*>(src_bucket->ptr); size_t dst_remaining = dst_bucket->index_end - dst_bucket->index_start; size_t src_remaining = src_bucket->index_end - src_bucket->index_start; size_t total_copied = 0; while (total_copied < dst.size) { if (dst_remaining <= src_remaining) { // copy to destination bucket size_t num_elems = dst_remaining; size_t num_bytes = num_elems * elem_size; memcpy(dst_ptr, src_ptr, num_bytes); // advance to next destination bucket ++dst_bucket; dst_ptr = static_cast<int8_t*>(dst_bucket->ptr); dst_remaining = dst_bucket->index_end - dst_bucket->index_start; // advance source offset src_ptr += num_bytes; src_remaining -= num_elems; total_copied += num_elems; } else { // copy to destination bucket size_t num_elems = src_remaining; size_t num_bytes = num_elems * elem_size; memcpy(dst_ptr, src_ptr, num_bytes); // advance to next source bucket ++src_bucket; src_ptr = static_cast<const int8_t*>(src_bucket->ptr); src_remaining = src_bucket->index_end - src_bucket->index_start; // advance destination offset dst_ptr += num_bytes; dst_remaining -= num_elems; total_copied += num_elems; } } } static void array_copy_to_fabric_indexed(wp::indexedfabricarray_t<void>& dst, const void* src_data, int src_stride, const int* src_indices, int elem_size) { const int8_t* src_ptr = static_cast<const int8_t*>(src_data); if (src_indices) { // copy from indexed array for (size_t i = 0; i < dst.size; i++) { size_t src_idx = src_indices[i]; size_t dst_idx = dst.indices[i]; void* dst_ptr = fabricarray_element_ptr(dst.fa, dst_idx, elem_size); memcpy(dst_ptr, src_ptr + dst_idx * elem_size, elem_size); } } else { // copy from contiguous/strided array for (size_t i = 0; i < dst.size; i++) { size_t dst_idx = dst.indices[i]; void* dst_ptr = fabricarray_element_ptr(dst.fa, dst_idx, elem_size); if (dst_ptr) { memcpy(dst_ptr, src_ptr, elem_size); src_ptr += src_stride; } } } } static void array_copy_fabric_indexed_to_fabric(wp::fabricarray_t<void>& dst, const wp::indexedfabricarray_t<void>& src, int elem_size) { wp::fabricbucket_t* dst_bucket = dst.buckets; int8_t* dst_ptr = static_cast<int8_t*>(dst_bucket->ptr); int8_t* dst_end = dst_ptr + elem_size * (dst_bucket->index_end - dst_bucket->index_start); for (size_t i = 0; i < src.size; i++) { size_t src_idx = src.indices[i]; const void* src_ptr = fabricarray_element_ptr(src.fa, src_idx, elem_size); if (dst_ptr >= dst_end) { // advance to next destination bucket ++dst_bucket; dst_ptr = static_cast<int8_t*>(dst_bucket->ptr); dst_end = dst_ptr + elem_size * (dst_bucket->index_end - dst_bucket->index_start); } memcpy(dst_ptr, src_ptr, elem_size); dst_ptr += elem_size; } } static void array_copy_fabric_indexed_to_fabric_indexed(wp::indexedfabricarray_t<void>& dst, const wp::indexedfabricarray_t<void>& src, int elem_size) { for (size_t i = 0; i < src.size; i++) { size_t src_idx = src.indices[i]; size_t dst_idx = dst.indices[i]; const void* src_ptr = fabricarray_element_ptr(src.fa, src_idx, elem_size); void* dst_ptr = fabricarray_element_ptr(dst.fa, dst_idx, elem_size); memcpy(dst_ptr, src_ptr, elem_size); } } static void array_copy_fabric_to_fabric_indexed(wp::indexedfabricarray_t<void>& dst, const wp::fabricarray_t<void>& src, int elem_size) { wp::fabricbucket_t* src_bucket = src.buckets; const int8_t* src_ptr = static_cast<const int8_t*>(src_bucket->ptr); const int8_t* src_end = src_ptr + elem_size * (src_bucket->index_end - src_bucket->index_start); for (size_t i = 0; i < dst.size; i++) { size_t dst_idx = dst.indices[i]; void* dst_ptr = fabricarray_element_ptr(dst.fa, dst_idx, elem_size); if (src_ptr >= src_end) { // advance to next source bucket ++src_bucket; src_ptr = static_cast<int8_t*>(src_bucket->ptr); src_end = src_ptr + elem_size * (src_bucket->index_end - src_bucket->index_start); } memcpy(dst_ptr, src_ptr, elem_size); src_ptr += elem_size; } } static void array_copy_from_fabric_indexed(const wp::indexedfabricarray_t<void>& src, void* dst_data, int dst_stride, const int* dst_indices, int elem_size) { int8_t* dst_ptr = static_cast<int8_t*>(dst_data); if (dst_indices) { // copy to indexed array for (size_t i = 0; i < src.size; i++) { size_t idx = src.indices[i]; if (idx < src.fa.size) { const void* src_ptr = fabricarray_element_ptr(src.fa, idx, elem_size); int dst_idx = dst_indices[i]; memcpy(dst_ptr + dst_idx * elem_size, src_ptr, elem_size); } else { fprintf(stderr, "Warp copy error: Source index %llu is out of bounds for fabric array of size %llu", (unsigned long long)idx, (unsigned long long)src.fa.size); } } } else { // copy to contiguous/strided array for (size_t i = 0; i < src.size; i++) { size_t idx = src.indices[i]; if (idx < src.fa.size) { const void* src_ptr = fabricarray_element_ptr(src.fa, idx, elem_size); memcpy(dst_ptr, src_ptr, elem_size); dst_ptr += dst_stride; } else { fprintf(stderr, "Warp copy error: Source index %llu is out of bounds for fabric array of size %llu", (unsigned long long)idx, (unsigned long long)src.fa.size); } } } } WP_API bool array_copy_host(void* dst, void* src, int dst_type, int src_type, int elem_size) { if (!src || !dst) return false; const void* src_data = NULL; void* dst_data = NULL; int src_ndim = 0; int dst_ndim = 0; const int* src_shape = NULL; const int* dst_shape = NULL; const int* src_strides = NULL; const int* dst_strides = NULL; const int*const* src_indices = NULL; const int*const* dst_indices = NULL; const wp::fabricarray_t<void>* src_fabricarray = NULL; wp::fabricarray_t<void>* dst_fabricarray = NULL; const wp::indexedfabricarray_t<void>* src_indexedfabricarray = NULL; wp::indexedfabricarray_t<void>* dst_indexedfabricarray = NULL; const int* null_indices[wp::ARRAY_MAX_DIMS] = { NULL }; if (src_type == wp::ARRAY_TYPE_REGULAR) { const wp::array_t<void>& src_arr = *static_cast<const wp::array_t<void>*>(src); src_data = src_arr.data; src_ndim = src_arr.ndim; src_shape = src_arr.shape.dims; src_strides = src_arr.strides; src_indices = null_indices; } else if (src_type == wp::ARRAY_TYPE_INDEXED) { const wp::indexedarray_t<void>& src_arr = *static_cast<const wp::indexedarray_t<void>*>(src); src_data = src_arr.arr.data; src_ndim = src_arr.arr.ndim; src_shape = src_arr.shape.dims; src_strides = src_arr.arr.strides; src_indices = src_arr.indices; } else if (src_type == wp::ARRAY_TYPE_FABRIC) { src_fabricarray = static_cast<const wp::fabricarray_t<void>*>(src); src_ndim = 1; } else if (src_type == wp::ARRAY_TYPE_FABRIC_INDEXED) { src_indexedfabricarray = static_cast<const wp::indexedfabricarray_t<void>*>(src); src_ndim = 1; } else { fprintf(stderr, "Warp copy error: Invalid source array type (%d)\n", src_type); return false; } if (dst_type == wp::ARRAY_TYPE_REGULAR) { const wp::array_t<void>& dst_arr = *static_cast<const wp::array_t<void>*>(dst); dst_data = dst_arr.data; dst_ndim = dst_arr.ndim; dst_shape = dst_arr.shape.dims; dst_strides = dst_arr.strides; dst_indices = null_indices; } else if (dst_type == wp::ARRAY_TYPE_INDEXED) { const wp::indexedarray_t<void>& dst_arr = *static_cast<const wp::indexedarray_t<void>*>(dst); dst_data = dst_arr.arr.data; dst_ndim = dst_arr.arr.ndim; dst_shape = dst_arr.shape.dims; dst_strides = dst_arr.arr.strides; dst_indices = dst_arr.indices; } else if (dst_type == wp::ARRAY_TYPE_FABRIC) { dst_fabricarray = static_cast<wp::fabricarray_t<void>*>(dst); dst_ndim = 1; } else if (dst_type == wp::ARRAY_TYPE_FABRIC_INDEXED) { dst_indexedfabricarray = static_cast<wp::indexedfabricarray_t<void>*>(dst); dst_ndim = 1; } else { fprintf(stderr, "Warp copy error: Invalid destination array type (%d)\n", dst_type); return false; } if (src_ndim != dst_ndim) { fprintf(stderr, "Warp copy error: Incompatible array dimensionalities (%d and %d)\n", src_ndim, dst_ndim); return false; } // handle fabric arrays if (dst_fabricarray) { size_t n = dst_fabricarray->size; if (src_fabricarray) { // copy from fabric to fabric if (src_fabricarray->size != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_fabric_to_fabric(*dst_fabricarray, *src_fabricarray, elem_size); return true; } else if (src_indexedfabricarray) { // copy from fabric indexed to fabric if (src_indexedfabricarray->size != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_fabric_indexed_to_fabric(*dst_fabricarray, *src_indexedfabricarray, elem_size); return true; } else { // copy to fabric if (size_t(src_shape[0]) != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_to_fabric(*dst_fabricarray, src_data, src_strides[0], src_indices[0], elem_size); return true; } } else if (dst_indexedfabricarray) { size_t n = dst_indexedfabricarray->size; if (src_fabricarray) { // copy from fabric to fabric indexed if (src_fabricarray->size != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_fabric_to_fabric_indexed(*dst_indexedfabricarray, *src_fabricarray, elem_size); return true; } else if (src_indexedfabricarray) { // copy from fabric indexed to fabric indexed if (src_indexedfabricarray->size != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_fabric_indexed_to_fabric_indexed(*dst_indexedfabricarray, *src_indexedfabricarray, elem_size); return true; } else { // copy to fabric indexed if (size_t(src_shape[0]) != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_to_fabric_indexed(*dst_indexedfabricarray, src_data, src_strides[0], src_indices[0], elem_size); return true; } } else if (src_fabricarray) { // copy from fabric size_t n = src_fabricarray->size; if (size_t(dst_shape[0]) != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_from_fabric(*src_fabricarray, dst_data, dst_strides[0], dst_indices[0], elem_size); return true; } else if (src_indexedfabricarray) { // copy from fabric indexed size_t n = src_indexedfabricarray->size; if (size_t(dst_shape[0]) != n) { fprintf(stderr, "Warp copy error: Incompatible array sizes\n"); return false; } array_copy_from_fabric_indexed(*src_indexedfabricarray, dst_data, dst_strides[0], dst_indices[0], elem_size); return true; } for (int i = 0; i < src_ndim; i++) { if (src_shape[i] != dst_shape[i]) { fprintf(stderr, "Warp copy error: Incompatible array shapes\n"); return 0; } } array_copy_nd(dst_data, src_data, dst_strides, src_strides, dst_indices, src_indices, src_shape, src_ndim, elem_size); return true; } static void array_fill_strided(void* data, const int* shape, const int* strides, int ndim, const void* value, int value_size) { if (ndim == 1) { char* p = (char*)data; for (int i = 0; i < shape[0]; i++) { memcpy(p, value, value_size); p += strides[0]; } } else { for (int i = 0; i < shape[0]; i++) { char* p = (char*)data + i * strides[0]; // recurse on next inner dimension array_fill_strided(p, shape + 1, strides + 1, ndim - 1, value, value_size); } } } static void array_fill_indexed(void* data, const int* shape, const int* strides, const int*const* indices, int ndim, const void* value, int value_size) { if (ndim == 1) { for (int i = 0; i < shape[0]; i++) { int idx = indices[0] ? indices[0][i] : i; char* p = (char*)data + idx * strides[0]; memcpy(p, value, value_size); } } else { for (int i = 0; i < shape[0]; i++) { int idx = indices[0] ? indices[0][i] : i; char* p = (char*)data + idx * strides[0]; // recurse on next inner dimension array_fill_indexed(p, shape + 1, strides + 1, indices + 1, ndim - 1, value, value_size); } } } static void array_fill_fabric(wp::fabricarray_t<void>& fa, const void* value_ptr, int value_size) { for (size_t i = 0; i < fa.nbuckets; i++) { const wp::fabricbucket_t& bucket = fa.buckets[i]; size_t bucket_size = bucket.index_end - bucket.index_start; memtile_host(bucket.ptr, value_ptr, value_size, bucket_size); } } static void array_fill_fabric_indexed(wp::indexedfabricarray_t<void>& ifa, const void* value_ptr, int value_size) { for (size_t i = 0; i < ifa.size; i++) { size_t idx = size_t(ifa.indices[i]); if (idx < ifa.fa.size) { void* p = fabricarray_element_ptr(ifa.fa, idx, value_size); memcpy(p, value_ptr, value_size); } } } WP_API void array_fill_host(void* arr_ptr, int arr_type, const void* value_ptr, int value_size) { if (!arr_ptr || !value_ptr) return; if (arr_type == wp::ARRAY_TYPE_REGULAR) { wp::array_t<void>& arr = *static_cast<wp::array_t<void>*>(arr_ptr); array_fill_strided(arr.data, arr.shape.dims, arr.strides, arr.ndim, value_ptr, value_size); } else if (arr_type == wp::ARRAY_TYPE_INDEXED) { wp::indexedarray_t<void>& ia = *static_cast<wp::indexedarray_t<void>*>(arr_ptr); array_fill_indexed(ia.arr.data, ia.shape.dims, ia.arr.strides, ia.indices, ia.arr.ndim, value_ptr, value_size); } else if (arr_type == wp::ARRAY_TYPE_FABRIC) { wp::fabricarray_t<void>& fa = *static_cast<wp::fabricarray_t<void>*>(arr_ptr); array_fill_fabric(fa, value_ptr, value_size); } else if (arr_type == wp::ARRAY_TYPE_FABRIC_INDEXED) { wp::indexedfabricarray_t<void>& ifa = *static_cast<wp::indexedfabricarray_t<void>*>(arr_ptr); array_fill_fabric_indexed(ifa, value_ptr, value_size); } else { fprintf(stderr, "Warp fill error: Invalid array type id %d\n", arr_type); } } // impl. files // TODO: compile as separate translation units #include "bvh.cpp" #include "scan.cpp" // stubs for platforms where there is no CUDA #if !WP_ENABLE_CUDA void* alloc_pinned(size_t s) { // CUDA is not available, fall back on system allocator return alloc_host(s); } void free_pinned(void* ptr) { // CUDA is not available, fall back on system allocator free_host(ptr); } void* alloc_device(void* context, size_t s) { return NULL; } void* alloc_device_default(void* context, size_t s) { return NULL; } void* alloc_device_async(void* context, size_t s) { return NULL; } void free_device(void* context, void* ptr) { } void free_device_default(void* context, void* ptr) { } void free_device_async(void* context, void* ptr) { } bool memcpy_h2d(void* context, void* dest, void* src, size_t n, void* stream) { return false; } bool memcpy_d2h(void* context, void* dest, void* src, size_t n, void* stream) { return false; } bool memcpy_d2d(void* context, void* dest, void* src, size_t n, void* stream) { return false; } bool memcpy_p2p(void* dst_context, void* dst, void* src_context, void* src, size_t n, void* stream) { return false; } void memset_device(void* context, void* dest, int value, size_t n) { } void memtile_device(void* context, void* dest, const void* src, size_t srcsize, size_t n) { } bool array_copy_device(void* context, void* dst, void* src, int dst_type, int src_type, int elem_size) { return false; } void array_fill_device(void* context, void* arr, int arr_type, const void* value, int value_size) { } WP_API int cuda_driver_version() { return 0; } WP_API int cuda_toolkit_version() { return 0; } WP_API bool cuda_driver_is_initialized() { return false; } WP_API int nvrtc_supported_arch_count() { return 0; } WP_API void nvrtc_supported_archs(int* archs) {} WP_API int cuda_device_get_count() { return 0; } WP_API void* cuda_device_get_primary_context(int ordinal) { return NULL; } WP_API const char* cuda_device_get_name(int ordinal) { return NULL; } WP_API int cuda_device_get_arch(int ordinal) { return 0; } WP_API void cuda_device_get_uuid(int ordinal, char uuid[16]) {} WP_API int cuda_device_get_pci_domain_id(int ordinal) { return -1; } WP_API int cuda_device_get_pci_bus_id(int ordinal) { return -1; } WP_API int cuda_device_get_pci_device_id(int ordinal) { return -1; } WP_API int cuda_device_is_uva(int ordinal) { return 0; } WP_API int cuda_device_is_mempool_supported(int ordinal) { return 0; } WP_API int cuda_device_set_mempool_release_threshold(int ordinal, uint64_t threshold) { return 0; } WP_API uint64_t cuda_device_get_mempool_release_threshold(int ordinal) { return 0; } WP_API void cuda_device_get_memory_info(int ordinal, size_t* free_mem, size_t* total_mem) {} WP_API void* cuda_context_get_current() { return NULL; } WP_API void cuda_context_set_current(void* ctx) {} WP_API void cuda_context_push_current(void* context) {} WP_API void cuda_context_pop_current() {} WP_API void* cuda_context_create(int device_ordinal) { return NULL; } WP_API void cuda_context_destroy(void* context) {} WP_API void cuda_context_synchronize(void* context) {} WP_API uint64_t cuda_context_check(void* context) { return 0; } WP_API int cuda_context_get_device_ordinal(void* context) { return -1; } WP_API int cuda_context_is_primary(void* context) { return 0; } WP_API void* cuda_context_get_stream(void* context) { return NULL; } WP_API void cuda_context_set_stream(void* context, void* stream, int sync) {} WP_API int cuda_is_peer_access_supported(int target_ordinal, int peer_ordinal) { return 0; } WP_API int cuda_is_peer_access_enabled(void* target_context, void* peer_context) { return 0; } WP_API int cuda_set_peer_access_enabled(void* target_context, void* peer_context, int enable) { return 0; } WP_API int cuda_is_mempool_access_enabled(int target_ordinal, int peer_ordinal) { return 0; } WP_API int cuda_set_mempool_access_enabled(int target_ordinal, int peer_ordinal, int enable) { return 0; } WP_API void* cuda_stream_create(void* context) { return NULL; } WP_API void cuda_stream_destroy(void* context, void* stream) {} WP_API void cuda_stream_register(void* context, void* stream) {} WP_API void cuda_stream_unregister(void* context, void* stream) {} WP_API void* cuda_stream_get_current() { return NULL; } WP_API void cuda_stream_synchronize(void* stream) {} WP_API void cuda_stream_wait_event(void* stream, void* event) {} WP_API void cuda_stream_wait_stream(void* stream, void* other_stream, void* event) {} WP_API int cuda_stream_is_capturing(void* stream) { return 0; } WP_API void* cuda_event_create(void* context, unsigned flags) { return NULL; } WP_API void cuda_event_destroy(void* event) {} WP_API void cuda_event_record(void* event, void* stream) {} WP_API void cuda_event_synchronize(void* event) {} WP_API float cuda_event_elapsed_time(void* start_event, void* end_event) { return 0.0f; } WP_API bool cuda_graph_begin_capture(void* context, void* stream, int external) { return false; } WP_API bool cuda_graph_end_capture(void* context, void* stream, void** graph_ret) { return false; } WP_API bool cuda_graph_launch(void* graph, void* stream) { return false; } WP_API bool cuda_graph_destroy(void* context, void* graph) { return false; } WP_API size_t cuda_compile_program(const char* cuda_src, int arch, const char* include_dir, bool debug, bool verbose, bool verify_fp, bool fast_math, const char* output_file) { return 0; } WP_API void* cuda_load_module(void* context, const char* ptx) { return NULL; } WP_API void cuda_unload_module(void* context, void* module) {} WP_API void* cuda_get_kernel(void* context, void* module, const char* name) { return NULL; } WP_API size_t cuda_launch_kernel(void* context, void* kernel, size_t dim, int max_blocks, void** args, void* stream) { return 0; } WP_API void cuda_set_context_restore_policy(bool always_restore) {} WP_API int cuda_get_context_restore_policy() { return false; } WP_API void array_scan_int_device(uint64_t in, uint64_t out, int len, bool inclusive) {} WP_API void array_scan_float_device(uint64_t in, uint64_t out, int len, bool inclusive) {} WP_API void cuda_graphics_map(void* context, void* resource) {} WP_API void cuda_graphics_unmap(void* context, void* resource) {} WP_API void cuda_graphics_device_ptr_and_size(void* context, void* resource, uint64_t* ptr, size_t* size) {} WP_API void* cuda_graphics_register_gl_buffer(void* context, uint32_t gl_buffer, unsigned int flags) { return NULL; } WP_API void cuda_graphics_unregister_resource(void* context, void* resource) {} WP_API void cuda_timing_begin(int flags) {} WP_API int cuda_timing_get_result_count() { return 0; } WP_API void cuda_timing_end(timing_result_t* results, int size) {} #endif // !WP_ENABLE_CUDA
34,649
C++
31.750473
188
0.574389
NVIDIA/warp/warp/native/volume.h
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once #include "array.h" #include "builtin.h" #define PNANOVDB_C #define PNANOVDB_MEMCPY_CUSTOM #define pnanovdb_memcpy memcpy #if defined(WP_NO_CRT) && !defined(__CUDACC__) // PNanoVDB will try to include <stdint.h> unless __CUDACC_RTC__ is defined #define __CUDACC_RTC__ #endif #include "nanovdb/PNanoVDB.h" #if defined(WP_NO_CRT) && !defined(__CUDACC__) #undef __CUDACC_RTC__ #endif namespace wp { namespace volume { // Need to kept in sync with constants in python-side Volume class static constexpr int CLOSEST = 0; static constexpr int LINEAR = 1; // pnanovdb helper function CUDA_CALLABLE inline pnanovdb_buf_t id_to_buffer(uint64_t id) { pnanovdb_buf_t buf; buf.data = (uint32_t *)id; return buf; } CUDA_CALLABLE inline pnanovdb_grid_handle_t get_grid(pnanovdb_buf_t buf) { return {0u}; } CUDA_CALLABLE inline pnanovdb_uint32_t get_grid_type(pnanovdb_buf_t buf) { return pnanovdb_grid_get_grid_type(buf, get_grid(buf)); } CUDA_CALLABLE inline pnanovdb_tree_handle_t get_tree(pnanovdb_buf_t buf) { return pnanovdb_grid_get_tree(buf, get_grid(buf)); } CUDA_CALLABLE inline pnanovdb_root_handle_t get_root(pnanovdb_buf_t buf) { return pnanovdb_tree_get_root(buf, get_tree(buf)); } template <typename T> struct pnano_traits { }; // to add support for more grid types, extend this // and update _volume_supported_value_types in builtins.py template <> struct pnano_traits<int32_t> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_INT32; }; template <> struct pnano_traits<int64_t> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_INT64; }; template <> struct pnano_traits<uint32_t> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_UINT32; }; template <> struct pnano_traits<float> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_FLOAT; }; template <> struct pnano_traits<double> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_DOUBLE; }; template <> struct pnano_traits<vec3f> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_VEC3F; }; template <> struct pnano_traits<vec3d> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_VEC3D; }; template <> struct pnano_traits<vec4f> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_VEC4F; }; template <> struct pnano_traits<vec4d> { static constexpr int GRID_TYPE = PNANOVDB_GRID_TYPE_VEC4D; }; // common accessors over various grid types // WARNING: implementation below only for >=32b values, but that's the case for all types above // for smaller types add a specialization template <typename T> CUDA_CALLABLE inline void pnano_read(T &result, pnanovdb_buf_t buf, pnanovdb_address_t address) { result = *reinterpret_cast<const T *>(buf.data + (address.byte_offset >> 2)); } template <typename T> CUDA_CALLABLE inline void pnano_write(const T &value, pnanovdb_buf_t buf, pnanovdb_address_t address) { *reinterpret_cast<T *>(buf.data + (address.byte_offset >> 2)) = value; } template <typename T> CUDA_CALLABLE inline void pnano_read(T &result, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk) { using traits = pnano_traits<T>; const pnanovdb_address_t address = pnanovdb_root_get_value_address(traits::GRID_TYPE, buf, root, ijk); pnano_read<T>(result, buf, address); } template <typename T> CUDA_CALLABLE inline void pnano_read(T &result, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) { using traits = pnano_traits<T>; // pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(traits::GRID_TYPE, buf, acc, ijk); pnanovdb_uint32_t level; const pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address_and_level(traits::GRID_TYPE, buf, acc, ijk, PNANOVDB_REF(level)); pnano_read<T>(result, buf, address); } /// regular grid accessor (values stored in leafs) struct value_accessor_base { pnanovdb_buf_t buf; pnanovdb_root_handle_t root; pnanovdb_readaccessor_t accessor; explicit inline CUDA_CALLABLE value_accessor_base(const pnanovdb_buf_t buf) : buf(buf), root(get_root(buf)) { } CUDA_CALLABLE inline void init_cache() { pnanovdb_readaccessor_init(PNANOVDB_REF(accessor), root); } }; template <typename T> struct leaf_value_accessor : value_accessor_base { using ValueType = T; explicit inline CUDA_CALLABLE leaf_value_accessor(const pnanovdb_buf_t buf) : value_accessor_base(buf) { } CUDA_CALLABLE inline bool is_valid() const { return get_grid_type(buf) == pnano_traits<T>::GRID_TYPE; } CUDA_CALLABLE inline T read_single(const pnanovdb_coord_t &ijk) const { T val; pnano_read(val, buf, root, PNANOVDB_REF(ijk)); return val; } CUDA_CALLABLE inline T read_cache(const pnanovdb_coord_t &ijk) { T val; pnano_read(val, buf, PNANOVDB_REF(accessor), PNANOVDB_REF(ijk)); return val; } CUDA_CALLABLE inline void adj_read_single(const pnanovdb_coord_t &ijk, const T &adj_ret) { // NOP } CUDA_CALLABLE inline void adj_read_cache(const pnanovdb_coord_t &ijk, const T &adj_ret) { // NOP } }; CUDA_CALLABLE inline pnanovdb_uint64_t leaf_regular_get_voxel_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { // compute leaf index from value address, assuming all leaf voxels are allocated const pnanovdb_grid_type_t grid_type = get_grid_type(buf); const pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); const pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_table) + ((PNANOVDB_GRID_TYPE_GET(grid_type, value_stride_bits) * n) >> 3u); const pnanovdb_address_t leaf_address = pnanovdb_address_offset_neg(value_address, byte_offset); const pnanovdb_uint64_t first_leaf_offset = pnanovdb_tree_get_node_offset_leaf(buf, get_tree(buf)); const pnanovdb_uint32_t leaf_size = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_size); const pnanovdb_uint64_t leaf_index = (leaf_address.byte_offset - first_leaf_offset) / leaf_size; return leaf_index * PNANOVDB_LEAF_TABLE_COUNT + n + 1; } CUDA_CALLABLE inline pnanovdb_uint64_t get_grid_voxel_index(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_address_t value_address, const pnanovdb_coord_t &ijk) { switch (grid_type) { case PNANOVDB_GRID_TYPE_INDEX: return pnanovdb_leaf_index_get_value_index(buf, value_address, PNANOVDB_REF(ijk)); case PNANOVDB_GRID_TYPE_ONINDEX: return pnanovdb_leaf_onindex_get_value_index(buf, value_address, PNANOVDB_REF(ijk)); case PNANOVDB_GRID_TYPE_INDEXMASK: return pnanovdb_leaf_indexmask_get_value_index(buf, value_address, PNANOVDB_REF(ijk)); case PNANOVDB_GRID_TYPE_ONINDEXMASK: return pnanovdb_leaf_onindexmask_get_value_index(buf, value_address, PNANOVDB_REF(ijk)); default: return leaf_regular_get_voxel_index(buf, value_address, PNANOVDB_REF(ijk)); } }; /// index grid accessor template <typename T> struct index_value_accessor : value_accessor_base { using ValueType = T; pnanovdb_grid_type_t grid_type; array_t<T> data; const T &background; T *adj_background; explicit inline CUDA_CALLABLE index_value_accessor(const pnanovdb_buf_t buf, const array_t<T> &data, const T &background, T *adj_background = nullptr) : value_accessor_base(buf), grid_type(get_grid_type(buf)), data(data), background(background), adj_background(adj_background) { } CUDA_CALLABLE inline bool is_valid() const { // Accessor is valid for all grid types return true; } CUDA_CALLABLE inline T read_single(const pnanovdb_coord_t &ijk) const { pnanovdb_uint32_t level; const pnanovdb_address_t address = pnanovdb_root_get_value_address_and_level(grid_type, buf, root, PNANOVDB_REF(ijk), PNANOVDB_REF(level)); return read_at(level, address, ijk); } CUDA_CALLABLE inline T read_cache(const pnanovdb_coord_t &ijk) { pnanovdb_uint32_t level; const pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address_and_level( grid_type, buf, PNANOVDB_REF(accessor), PNANOVDB_REF(ijk), PNANOVDB_REF(level)); return read_at(level, address, ijk); } CUDA_CALLABLE inline T read_at(pnanovdb_uint32_t level, const pnanovdb_address_t address, const pnanovdb_coord_t &ijk) const { if (level == 0) { pnanovdb_uint64_t voxel_index = get_grid_voxel_index(grid_type, buf, address, ijk); if (voxel_index > 0) { return *wp::address(data, voxel_index - 1); } } return background; } CUDA_CALLABLE inline void adj_read_single(const pnanovdb_coord_t &ijk, const T &adj_ret) { pnanovdb_uint32_t level; const pnanovdb_address_t address = pnanovdb_root_get_value_address_and_level(grid_type, buf, root, PNANOVDB_REF(ijk), PNANOVDB_REF(level)); adj_read_at(level, address, ijk, adj_ret); } CUDA_CALLABLE inline void adj_read_cache(const pnanovdb_coord_t &ijk, const T &adj_ret) { pnanovdb_uint32_t level; const pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address_and_level( grid_type, buf, PNANOVDB_REF(accessor), PNANOVDB_REF(ijk), PNANOVDB_REF(level)); adj_read_at(level, address, ijk, adj_ret); } CUDA_CALLABLE inline void adj_read_at(pnanovdb_uint32_t level, const pnanovdb_address_t address, const pnanovdb_coord_t &ijk, const T &adj_ret) const { if (level == 0) { pnanovdb_uint64_t voxel_index = get_grid_voxel_index(grid_type, buf, address, ijk); if (voxel_index > 0) { adj_atomic_add(&index_grad(data, voxel_index - 1), adj_ret); return; } } *adj_background += adj_ret; } }; CUDA_CALLABLE inline pnanovdb_coord_t vec3_round_to_coord(const pnanovdb_vec3_t a) { pnanovdb_coord_t v; v.x = pnanovdb_float_to_int32(roundf(a.x)); v.y = pnanovdb_float_to_int32(roundf(a.y)); v.z = pnanovdb_float_to_int32(roundf(a.z)); return v; } template <typename T> struct val_traits { using grad_t = vec_t<3, T>; using scalar_t = T; // multiplies the gradient on the right // needs to be specialized for scalar types as gradient is stored as column rather than row vector static CUDA_CALLABLE inline T rmul(const grad_t &grad, const vec_t<3, scalar_t> &rhs) { return dot(grad, rhs); } }; template <unsigned Length, typename T> struct val_traits<vec_t<Length, T>> { using grad_t = mat_t<3, Length, T>; using scalar_t = T; static CUDA_CALLABLE inline vec_t<Length, T> rmul(const grad_t &grad, const vec_t<3, scalar_t> &rhs) { return mul(grad, rhs); } }; // Sampling the volume at the given index-space coordinates, uvw can be fractional template <typename Accessor> CUDA_CALLABLE inline typename Accessor::ValueType volume_sample(Accessor &accessor, vec3 uvw, int sampling_mode) { using T = typename Accessor::ValueType; using w_t = typename val_traits<T>::scalar_t; if (!accessor.is_valid()) { return 0; } const pnanovdb_buf_t buf = accessor.buf; const pnanovdb_vec3_t uvw_pnano{uvw[0], uvw[1], uvw[2]}; if (sampling_mode == CLOSEST) { const pnanovdb_coord_t ijk = vec3_round_to_coord(uvw_pnano); return accessor.read_single(ijk); } else if (sampling_mode == LINEAR) { // NB. linear sampling is not used on int volumes constexpr pnanovdb_coord_t OFFSETS[] = { {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {1, 0, 0}, {1, 0, 1}, {1, 1, 0}, {1, 1, 1}, }; const pnanovdb_vec3_t ijk_base{floorf(uvw_pnano.x), floorf(uvw_pnano.y), floorf(uvw_pnano.z)}; const pnanovdb_vec3_t ijk_frac{uvw_pnano.x - ijk_base.x, uvw_pnano.y - ijk_base.y, uvw_pnano.z - ijk_base.z}; const pnanovdb_coord_t ijk{(pnanovdb_int32_t)ijk_base.x, (pnanovdb_int32_t)ijk_base.y, (pnanovdb_int32_t)ijk_base.z}; accessor.init_cache(); T val = 0; const float wx[2]{1 - ijk_frac.x, ijk_frac.x}; const float wy[2]{1 - ijk_frac.y, ijk_frac.y}; const float wz[2]{1 - ijk_frac.z, ijk_frac.z}; for (int idx = 0; idx < 8; ++idx) { const pnanovdb_coord_t &offs = OFFSETS[idx]; const pnanovdb_coord_t ijk_shifted = pnanovdb_coord_add(ijk, offs); const T v = accessor.read_cache(ijk_shifted); const w_t w = wx[offs.x] * wy[offs.y] * wz[offs.z]; val = add(val, w * v); } return val; } return 0; } template <typename Accessor> CUDA_CALLABLE inline void adj_volume_sample(Accessor &accessor, vec3 uvw, int sampling_mode, vec3 &adj_uvw, const typename Accessor::ValueType &adj_ret) { // TODO: accessor data gradients using T = typename Accessor::ValueType; using w_t = typename val_traits<T>::scalar_t; using w_grad_t = vec_t<3, w_t>; if (!accessor.is_valid()) { return; } const pnanovdb_buf_t buf = accessor.buf; const pnanovdb_vec3_t uvw_pnano{uvw[0], uvw[1], uvw[2]}; if (sampling_mode != LINEAR) { const pnanovdb_coord_t ijk = vec3_round_to_coord(uvw_pnano); accessor.adj_read_single(ijk, adj_ret); return; } constexpr pnanovdb_coord_t OFFSETS[] = { {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {1, 0, 0}, {1, 0, 1}, {1, 1, 0}, {1, 1, 1}, }; const pnanovdb_vec3_t ijk_base{floorf(uvw_pnano.x), floorf(uvw_pnano.y), floorf(uvw_pnano.z)}; const pnanovdb_vec3_t ijk_frac{uvw_pnano.x - ijk_base.x, uvw_pnano.y - ijk_base.y, uvw_pnano.z - ijk_base.z}; const pnanovdb_coord_t ijk{(pnanovdb_int32_t)ijk_base.x, (pnanovdb_int32_t)ijk_base.y, (pnanovdb_int32_t)ijk_base.z}; accessor.init_cache(); const float wx[2]{1 - ijk_frac.x, ijk_frac.x}; const float wy[2]{1 - ijk_frac.y, ijk_frac.y}; const float wz[2]{1 - ijk_frac.z, ijk_frac.z}; for (int idx = 0; idx < 8; ++idx) { const pnanovdb_coord_t &offs = OFFSETS[idx]; const pnanovdb_coord_t ijk_shifted = pnanovdb_coord_add(ijk, offs); const T v = accessor.read_cache(ijk_shifted); const vec3 signs(offs.x * 2 - 1, offs.y * 2 - 1, offs.z * 2 - 1); const w_t w = wx[offs.x] * wy[offs.y] * wz[offs.z]; const w_grad_t grad_w(signs[0] * wy[offs.y] * wz[offs.z], signs[1] * wx[offs.x] * wz[offs.z], signs[2] * wx[offs.x] * wy[offs.y]); adj_uvw += vec3(mul(w_t(dot(v, adj_ret)), grad_w)); const T adj_v = w * adj_ret; accessor.adj_read_cache(ijk_shifted, adj_v); } } // Sampling the volume at the given index-space coordinates, uvw can be fractional template <typename Accessor> CUDA_CALLABLE inline typename Accessor::ValueType volume_sample_grad( Accessor &accessor, vec3 uvw, int sampling_mode, typename val_traits<typename Accessor::ValueType>::grad_t &grad) { using T = typename Accessor::ValueType; using grad_T = typename val_traits<T>::grad_t; using w_t = typename val_traits<T>::scalar_t; using w_grad_t = vec_t<3, w_t>; grad = grad_T{}; if (!accessor.is_valid()) { return 0; } const pnanovdb_buf_t buf = accessor.buf; const pnanovdb_vec3_t uvw_pnano{uvw[0], uvw[1], uvw[2]}; if (sampling_mode == CLOSEST) { const pnanovdb_coord_t ijk = vec3_round_to_coord(uvw_pnano); return accessor.read_single(ijk); } else if (sampling_mode == LINEAR) { // NB. linear sampling is not used on int volumes constexpr pnanovdb_coord_t OFFSETS[] = { {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {1, 0, 0}, {1, 0, 1}, {1, 1, 0}, {1, 1, 1}, }; const pnanovdb_vec3_t ijk_base{floorf(uvw_pnano.x), floorf(uvw_pnano.y), floorf(uvw_pnano.z)}; const pnanovdb_vec3_t ijk_frac{uvw_pnano.x - ijk_base.x, uvw_pnano.y - ijk_base.y, uvw_pnano.z - ijk_base.z}; const pnanovdb_coord_t ijk{(pnanovdb_int32_t)ijk_base.x, (pnanovdb_int32_t)ijk_base.y, (pnanovdb_int32_t)ijk_base.z}; accessor.init_cache(); T val = 0; const float wx[2]{1 - ijk_frac.x, ijk_frac.x}; const float wy[2]{1 - ijk_frac.y, ijk_frac.y}; const float wz[2]{1 - ijk_frac.z, ijk_frac.z}; for (int idx = 0; idx < 8; ++idx) { const pnanovdb_coord_t &offs = OFFSETS[idx]; const pnanovdb_coord_t ijk_shifted = pnanovdb_coord_add(ijk, offs); const T v = accessor.read_cache(ijk_shifted); const vec3 signs(offs.x * 2 - 1, offs.y * 2 - 1, offs.z * 2 - 1); const w_t w = wx[offs.x] * wy[offs.y] * wz[offs.z]; const w_grad_t grad_w(signs[0] * wy[offs.y] * wz[offs.z], signs[1] * wx[offs.x] * wz[offs.z], signs[2] * wx[offs.x] * wy[offs.y]); val = add(val, w * v); grad += outer(v, grad_w); } return val; } return 0; } template <typename Accessor> CUDA_CALLABLE inline void adj_volume_sample_grad(Accessor &accessor, vec3 uvw, int sampling_mode, typename val_traits<typename Accessor::ValueType>::grad_t &grad, vec3 &adj_uvw, typename val_traits<typename Accessor::ValueType>::grad_t &adj_grad, const typename Accessor::ValueType &adj_ret) { // TODO: accessor data gradients using T = typename Accessor::ValueType; using grad_T = typename val_traits<T>::grad_t; using w_t = typename val_traits<T>::scalar_t; using w_grad_t = vec_t<3, w_t>; using w_hess_t = mat_t<3, 3, w_t>; if (!accessor.is_valid()) { return; } const pnanovdb_buf_t buf = accessor.buf; const pnanovdb_vec3_t uvw_pnano{uvw[0], uvw[1], uvw[2]}; if (sampling_mode != LINEAR) { const pnanovdb_coord_t ijk = vec3_round_to_coord(uvw_pnano); accessor.adj_read_single(ijk, adj_ret); return; } constexpr pnanovdb_coord_t OFFSETS[] = { {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {0, 1, 1}, {1, 0, 0}, {1, 0, 1}, {1, 1, 0}, {1, 1, 1}, }; const pnanovdb_vec3_t ijk_base{floorf(uvw_pnano.x), floorf(uvw_pnano.y), floorf(uvw_pnano.z)}; const pnanovdb_vec3_t ijk_frac{uvw_pnano.x - ijk_base.x, uvw_pnano.y - ijk_base.y, uvw_pnano.z - ijk_base.z}; const pnanovdb_coord_t ijk{(pnanovdb_int32_t)ijk_base.x, (pnanovdb_int32_t)ijk_base.y, (pnanovdb_int32_t)ijk_base.z}; accessor.init_cache(); const float wx[2]{1 - ijk_frac.x, ijk_frac.x}; const float wy[2]{1 - ijk_frac.y, ijk_frac.y}; const float wz[2]{1 - ijk_frac.z, ijk_frac.z}; for (int idx = 0; idx < 8; ++idx) { const pnanovdb_coord_t &offs = OFFSETS[idx]; const pnanovdb_coord_t ijk_shifted = pnanovdb_coord_add(ijk, offs); const T v = accessor.read_cache(ijk_shifted); const vec3 signs(offs.x * 2 - 1, offs.y * 2 - 1, offs.z * 2 - 1); const w_t w = wx[offs.x] * wy[offs.y] * wz[offs.z]; const w_grad_t grad_w(signs[0] * wy[offs.y] * wz[offs.z], signs[1] * wx[offs.x] * wz[offs.z], signs[2] * wx[offs.x] * wy[offs.y]); adj_uvw += vec3(mul(w_t(dot(v, adj_ret)), grad_w)); const w_hess_t hess_w(0.0, signs[1] * signs[0] * wz[offs.z], signs[2] * signs[0] * wy[offs.y], signs[0] * signs[1] * wz[offs.z], 0.0, signs[2] * signs[1] * wx[offs.x], signs[0] * signs[2] * wy[offs.y], signs[1] * signs[2] * wx[offs.x], 0.0); adj_uvw += vec3(mul(mul(v, adj_grad), hess_w)); const T adj_v = w * adj_ret + val_traits<T>::rmul(adj_grad, grad_w); accessor.adj_read_cache(ijk_shifted, adj_v); } } } // namespace volume // namespace volume // exposed kernel builtins // volume_sample template <typename T> CUDA_CALLABLE inline T volume_sample(uint64_t id, vec3 uvw, int sampling_mode) { volume::leaf_value_accessor<T> accessor(volume::id_to_buffer(id)); return volume::volume_sample(accessor, uvw, sampling_mode); } template <typename T> CUDA_CALLABLE inline void adj_volume_sample(uint64_t id, vec3 uvw, int sampling_mode, uint64_t &adj_id, vec3 &adj_uvw, int &adj_sampling_mode, const T &adj_ret) { volume::leaf_value_accessor<T> accessor(volume::id_to_buffer(id)); volume::adj_volume_sample(accessor, uvw, sampling_mode, adj_uvw, adj_ret); } template <typename T> CUDA_CALLABLE inline T volume_sample_grad(uint64_t id, vec3 uvw, int sampling_mode, typename volume::val_traits<T>::grad_t &grad) { volume::leaf_value_accessor<T> accessor(volume::id_to_buffer(id)); return volume::volume_sample_grad(accessor, uvw, sampling_mode, grad); } template <typename T> CUDA_CALLABLE inline void adj_volume_sample_grad(uint64_t id, vec3 uvw, int sampling_mode, typename volume::val_traits<T>::grad_t &grad, uint64_t &adj_id, vec3 &adj_uvw, int &adj_sampling_mode, typename volume::val_traits<T>::grad_t &adj_grad, const T &adj_ret) { volume::leaf_value_accessor<T> accessor(volume::id_to_buffer(id)); volume::adj_volume_sample_grad(accessor, uvw, sampling_mode, grad, adj_uvw, adj_grad, adj_ret); } // Sampling a float volume at the given index-space coordinates, uvw can be fractional CUDA_CALLABLE inline float volume_sample_f(uint64_t id, vec3 uvw, int sampling_mode) { return volume_sample<float>(id, uvw, sampling_mode); } // Sampling an int volume at the given index-space coordinates, uvw can be fractional CUDA_CALLABLE inline int32_t volume_sample_i(uint64_t id, vec3 uvw) { return volume_sample<int32_t>(id, uvw, volume::CLOSEST); } // Sampling a vector volume at the given index-space coordinates, uvw can be fractional CUDA_CALLABLE inline vec3 volume_sample_v(uint64_t id, vec3 uvw, int sampling_mode) { return volume_sample<vec3>(id, uvw, sampling_mode); } CUDA_CALLABLE inline void adj_volume_sample_f(uint64_t id, vec3 uvw, int sampling_mode, uint64_t &adj_id, vec3 &adj_uvw, int &adj_sampling_mode, const float &adj_ret) { adj_volume_sample(id, uvw, sampling_mode, adj_id, adj_uvw, adj_sampling_mode, adj_ret); } CUDA_CALLABLE inline void adj_volume_sample_v(uint64_t id, vec3 uvw, int sampling_mode, uint64_t &adj_id, vec3 &adj_uvw, int &adj_sampling_mode, const vec3 &adj_ret) { adj_volume_sample(id, uvw, sampling_mode, adj_id, adj_uvw, adj_sampling_mode, adj_ret); } CUDA_CALLABLE inline void adj_volume_sample_i(uint64_t id, vec3 uvw, uint64_t &adj_id, vec3 &adj_uvw, const int32_t &adj_ret) { // NOP } // Sampling the volume at the given index-space coordinates, uvw can be fractional CUDA_CALLABLE inline float volume_sample_grad_f(uint64_t id, vec3 uvw, int sampling_mode, vec3 &grad) { return volume_sample_grad<float>(id, uvw, sampling_mode, grad); } CUDA_CALLABLE inline void adj_volume_sample_grad_f(uint64_t id, vec3 uvw, int sampling_mode, vec3 &grad, uint64_t &adj_id, vec3 &adj_uvw, int &adj_sampling_mode, vec3 &adj_grad, const float &adj_ret) { adj_volume_sample_grad<float>(id, uvw, sampling_mode, grad, adj_id, adj_uvw, adj_sampling_mode, adj_grad, adj_ret); } // volume_sample_index template <typename T> CUDA_CALLABLE inline T volume_sample_index(uint64_t id, vec3 uvw, int sampling_mode, const array_t<T> &voxel_data, const T &background) { volume::index_value_accessor<T> accessor(volume::id_to_buffer(id), voxel_data, background); return volume::volume_sample(accessor, uvw, sampling_mode); } template <typename T> CUDA_CALLABLE inline void adj_volume_sample_index(uint64_t id, vec3 uvw, int sampling_mode, const array_t<T> &voxel_data, const T &background, uint64_t &adj_id, vec3 &adj_uvw, int &adj_sampling_mode, array_t<T> &adj_voxel_data, T &adj_background, const T &adj_ret) { volume::index_value_accessor<T> accessor(volume::id_to_buffer(id), voxel_data, background, &adj_background); volume::adj_volume_sample(accessor, uvw, sampling_mode, adj_uvw, adj_ret); } template <typename T> CUDA_CALLABLE inline T volume_sample_grad_index(uint64_t id, vec3 uvw, int sampling_mode, const array_t<T> &voxel_data, const T &background, typename volume::val_traits<T>::grad_t &grad) { volume::index_value_accessor<T> accessor(volume::id_to_buffer(id), voxel_data, background); return volume::volume_sample_grad(accessor, uvw, sampling_mode, grad); } template <typename T> CUDA_CALLABLE inline void adj_volume_sample_grad_index( uint64_t id, vec3 uvw, int sampling_mode, const array_t<T> &voxel_data, const T &background, typename volume::val_traits<T>::grad_t &grad, uint64_t &adj_id, vec3 &adj_uvw, int &adj_sampling_mode, array_t<T> &adj_voxel_data, T &adj_background, typename volume::val_traits<T>::grad_t &adj_grad, const T &adj_ret) { volume::index_value_accessor<T> accessor(volume::id_to_buffer(id), voxel_data, background, &adj_background); volume::adj_volume_sample_grad(accessor, uvw, sampling_mode, grad, adj_uvw, adj_grad, adj_ret); } // volume_lookup template <typename T> CUDA_CALLABLE inline T volume_lookup(uint64_t id, int32_t i, int32_t j, int32_t k) { using traits = volume::pnano_traits<T>; const pnanovdb_buf_t buf = volume::id_to_buffer(id); if (volume::get_grid_type(buf) != traits::GRID_TYPE) return 0; const pnanovdb_root_handle_t root = volume::get_root(buf); const pnanovdb_coord_t ijk{i, j, k}; T val; volume::pnano_read(val, buf, root, PNANOVDB_REF(ijk)); return val; } template <typename T> CUDA_CALLABLE inline void adj_volume_lookup(uint64_t id, int32_t i, int32_t j, int32_t k, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, const T &adj_ret) { // NOP -- adjoint of grid values is not available } CUDA_CALLABLE inline float volume_lookup_f(uint64_t id, int32_t i, int32_t j, int32_t k) { return volume_lookup<float>(id, i, j, k); } CUDA_CALLABLE inline int32_t volume_lookup_i(uint64_t id, int32_t i, int32_t j, int32_t k) { return volume_lookup<int32_t>(id, i, j, k); } CUDA_CALLABLE inline vec3 volume_lookup_v(uint64_t id, int32_t i, int32_t j, int32_t k) { return volume_lookup<vec3>(id, i, j, k); } CUDA_CALLABLE inline void adj_volume_lookup_f(uint64_t id, int32_t i, int32_t j, int32_t k, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, const float &adj_ret) { adj_volume_lookup(id, i, j, k, adj_id, adj_i, adj_j, adj_k, adj_ret); } CUDA_CALLABLE inline void adj_volume_lookup_i(uint64_t id, int32_t i, int32_t j, int32_t k, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, const int32_t &adj_ret) { adj_volume_lookup(id, i, j, k, adj_id, adj_i, adj_j, adj_k, adj_ret); } CUDA_CALLABLE inline void adj_volume_lookup_v(uint64_t id, int32_t i, int32_t j, int32_t k, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, const vec3 &adj_ret) { adj_volume_lookup(id, i, j, k, adj_id, adj_i, adj_j, adj_k, adj_ret); } CUDA_CALLABLE inline int32_t volume_lookup_index(uint64_t id, int32_t i, int32_t j, int32_t k) { const pnanovdb_buf_t buf = volume::id_to_buffer(id); const pnanovdb_root_handle_t root = volume::get_root(buf); const pnanovdb_grid_type_t grid_type = volume::get_grid_type(buf); const pnanovdb_coord_t ijk{i, j, k}; pnanovdb_uint32_t level; const pnanovdb_address_t address = pnanovdb_root_get_value_address_and_level(grid_type, buf, root, PNANOVDB_REF(ijk), PNANOVDB_REF(level)); if (level == 0) { pnanovdb_uint64_t voxel_index = volume::get_grid_voxel_index(grid_type, buf, address, ijk); return static_cast<int32_t>(voxel_index) - 1; } return -1; } CUDA_CALLABLE inline void adj_volume_lookup_index(uint64_t id, int32_t i, int32_t j, int32_t k, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, const vec3 &adj_ret) { // NOP } // volume_store template <typename T> CUDA_CALLABLE inline void volume_store(uint64_t id, int32_t i, int32_t j, int32_t k, const T &value) { using traits = volume::pnano_traits<T>; const pnanovdb_buf_t buf = volume::id_to_buffer(id); if (volume::get_grid_type(buf) != traits::GRID_TYPE) return; const pnanovdb_root_handle_t root = volume::get_root(buf); const pnanovdb_coord_t ijk{i, j, k}; pnanovdb_uint32_t level; const pnanovdb_address_t address = pnanovdb_root_get_value_address_and_level(traits::GRID_TYPE, buf, root, PNANOVDB_REF(ijk), PNANOVDB_REF(level)); if (level == 0) { // only write at at leaf level (prevent modifying background value) // TODO is this the intended semantics? or should be allow writing to background? volume::pnano_write(value, buf, address); } } template <typename T> CUDA_CALLABLE inline void adj_volume_store(uint64_t id, int32_t i, int32_t j, int32_t k, const T &value, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, T &adj_value) { // NOP -- adjoint of grid values is not available } CUDA_CALLABLE inline void volume_store_f(uint64_t id, int32_t i, int32_t j, int32_t k, const float &value) { volume_store(id, i, j, k, value); } CUDA_CALLABLE inline void adj_volume_store_f(uint64_t id, int32_t i, int32_t j, int32_t k, const float &value, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, float &adj_value) { adj_volume_store(id, i, j, k, value, adj_id, adj_i, adj_j, adj_k, adj_value); } CUDA_CALLABLE inline void volume_store_v(uint64_t id, int32_t i, int32_t j, int32_t k, const vec3 &value) { volume_store(id, i, j, k, value); } CUDA_CALLABLE inline void adj_volume_store_v(uint64_t id, int32_t i, int32_t j, int32_t k, const vec3 &value, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, vec3 &adj_value) { adj_volume_store(id, i, j, k, value, adj_id, adj_i, adj_j, adj_k, adj_value); } CUDA_CALLABLE inline void volume_store_i(uint64_t id, int32_t i, int32_t j, int32_t k, const int32_t &value) { volume_store(id, i, j, k, value); } CUDA_CALLABLE inline void adj_volume_store_i(uint64_t id, int32_t i, int32_t j, int32_t k, const int32_t &value, uint64_t &adj_id, int32_t &adj_i, int32_t &adj_j, int32_t &adj_k, int32_t &adj_value) { adj_volume_store(id, i, j, k, value, adj_id, adj_i, adj_j, adj_k, adj_value); } // Transform position from index space to world space CUDA_CALLABLE inline vec3 volume_index_to_world(uint64_t id, vec3 uvw) { const pnanovdb_buf_t buf = volume::id_to_buffer(id); const pnanovdb_grid_handle_t grid = {0u}; const pnanovdb_vec3_t pos{uvw[0], uvw[1], uvw[2]}; const pnanovdb_vec3_t xyz = pnanovdb_grid_index_to_worldf(buf, grid, PNANOVDB_REF(pos)); return {xyz.x, xyz.y, xyz.z}; } // Transform position from world space to index space CUDA_CALLABLE inline vec3 volume_world_to_index(uint64_t id, vec3 xyz) { const pnanovdb_buf_t buf = volume::id_to_buffer(id); const pnanovdb_grid_handle_t grid = {0u}; const pnanovdb_vec3_t pos{xyz[0], xyz[1], xyz[2]}; const pnanovdb_vec3_t uvw = pnanovdb_grid_world_to_indexf(buf, grid, PNANOVDB_REF(pos)); return {uvw.x, uvw.y, uvw.z}; } CUDA_CALLABLE inline void adj_volume_index_to_world(uint64_t id, vec3 uvw, uint64_t &adj_id, vec3 &adj_uvw, const vec3 &adj_ret) { const pnanovdb_buf_t buf = volume::id_to_buffer(id); const pnanovdb_grid_handle_t grid = {0u}; const pnanovdb_vec3_t pos{adj_ret[0], adj_ret[1], adj_ret[2]}; const pnanovdb_vec3_t xyz = pnanovdb_grid_index_to_world_dirf(buf, grid, PNANOVDB_REF(pos)); adj_uvw = add(adj_uvw, vec3{xyz.x, xyz.y, xyz.z}); } CUDA_CALLABLE inline void adj_volume_world_to_index(uint64_t id, vec3 xyz, uint64_t &adj_id, vec3 &adj_xyz, const vec3 &adj_ret) { const pnanovdb_buf_t buf = volume::id_to_buffer(id); const pnanovdb_grid_handle_t grid = {0u}; const pnanovdb_vec3_t pos{adj_ret[0], adj_ret[1], adj_ret[2]}; const pnanovdb_vec3_t uvw = pnanovdb_grid_world_to_index_dirf(buf, grid, PNANOVDB_REF(pos)); adj_xyz = add(adj_xyz, vec3{uvw.x, uvw.y, uvw.z}); } // Transform direction from index space to world space CUDA_CALLABLE inline vec3 volume_index_to_world_dir(uint64_t id, vec3 uvw) { const pnanovdb_buf_t buf = volume::id_to_buffer(id); const pnanovdb_grid_handle_t grid = {0u}; const pnanovdb_vec3_t pos{uvw[0], uvw[1], uvw[2]}; const pnanovdb_vec3_t xyz = pnanovdb_grid_index_to_world_dirf(buf, grid, PNANOVDB_REF(pos)); return {xyz.x, xyz.y, xyz.z}; } // Transform direction from world space to index space CUDA_CALLABLE inline vec3 volume_world_to_index_dir(uint64_t id, vec3 xyz) { const pnanovdb_buf_t buf = volume::id_to_buffer(id); const pnanovdb_grid_handle_t grid = {0u}; const pnanovdb_vec3_t pos{xyz[0], xyz[1], xyz[2]}; const pnanovdb_vec3_t uvw = pnanovdb_grid_world_to_index_dirf(buf, grid, PNANOVDB_REF(pos)); return {uvw.x, uvw.y, uvw.z}; } CUDA_CALLABLE inline void adj_volume_index_to_world_dir(uint64_t id, vec3 uvw, uint64_t &adj_id, vec3 &adj_uvw, const vec3 &adj_ret) { adj_volume_index_to_world(id, uvw, adj_id, adj_uvw, adj_ret); } CUDA_CALLABLE inline void adj_volume_world_to_index_dir(uint64_t id, vec3 xyz, uint64_t &adj_id, vec3 &adj_xyz, const vec3 &adj_ret) { adj_volume_world_to_index(id, xyz, adj_id, adj_xyz, adj_ret); } } // namespace wp
36,349
C
36.825182
120
0.613112
NVIDIA/warp/warp/native/mesh.cpp
/** Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "mesh.h" #include "bvh.h" #include "warp.h" #include "cuda_util.h" using namespace wp; #include <map> namespace { // host-side copy of mesh descriptors, maps GPU mesh address (id) to a CPU desc std::map<uint64_t, Mesh> g_mesh_descriptors; } // anonymous namespace namespace wp { bool mesh_get_descriptor(uint64_t id, Mesh& mesh) { const auto& iter = g_mesh_descriptors.find(id); if (iter == g_mesh_descriptors.end()) return false; else mesh = iter->second; return true; } void mesh_add_descriptor(uint64_t id, const Mesh& mesh) { g_mesh_descriptors[id] = mesh; } void mesh_rem_descriptor(uint64_t id) { g_mesh_descriptors.erase(id); } } // namespace wp void bvh_refit_with_solid_angle_recursive_host(BVH& bvh, int index, Mesh& mesh) { BVHPackedNodeHalf& lower = bvh.node_lowers[index]; BVHPackedNodeHalf& upper = bvh.node_uppers[index]; if (lower.b) { // Leaf, compute properties const int leaf_index = lower.i; precompute_triangle_solid_angle_props(mesh.points[mesh.indices[leaf_index*3+0]], mesh.points[mesh.indices[leaf_index*3+1]], mesh.points[mesh.indices[leaf_index*3+2]], mesh.solid_angle_props[index]); (vec3&)lower = mesh.solid_angle_props[index].box.lower; (vec3&)upper = mesh.solid_angle_props[index].box.upper; } else { int left_index = lower.i; int right_index = upper.i; bvh_refit_with_solid_angle_recursive_host(bvh, left_index, mesh); bvh_refit_with_solid_angle_recursive_host(bvh, right_index, mesh); // combine SolidAngleProps* left_child_data = &mesh.solid_angle_props[left_index]; SolidAngleProps* right_child_data = (left_index != right_index) ? &mesh.solid_angle_props[right_index] : NULL; combine_precomputed_solid_angle_props(mesh.solid_angle_props[index], left_child_data, right_child_data); // compute union of children const vec3& left_lower = (vec3&)bvh.node_lowers[left_index]; const vec3& left_upper = (vec3&)bvh.node_uppers[left_index]; const vec3& right_lower = (vec3&)bvh.node_lowers[right_index]; const vec3& right_upper = (vec3&)bvh.node_uppers[right_index]; // union of child bounds vec3 new_lower = min(left_lower, right_lower); vec3 new_upper = max(left_upper, right_upper); // write new BVH nodes (vec3&)lower = new_lower; (vec3&)upper = new_upper; } } void bvh_refit_with_solid_angle_host(BVH& bvh, Mesh& mesh) { bvh_refit_with_solid_angle_recursive_host(bvh, 0, mesh); } uint64_t mesh_create_host(array_t<wp::vec3> points, array_t<wp::vec3> velocities, array_t<int> indices, int num_points, int num_tris, int support_winding_number) { Mesh* m = new Mesh(points, velocities, indices, num_points, num_tris); m->lowers = new vec3[num_tris]; m->uppers = new vec3[num_tris]; float sum = 0.0; for (int i=0; i < num_tris; ++i) { wp::vec3& p0 = points[indices[i*3+0]]; wp::vec3& p1 = points[indices[i*3+1]]; wp::vec3& p2 = points[indices[i*3+2]]; // compute triangle bounds bounds3 b; b.add_point(p0); b.add_point(p1); b.add_point(p2); m->lowers[i] = b.lower; m->uppers[i] = b.upper; // compute edge lengths sum += length(p0-p1) + length(p0-p2) + length(p2-p1); } m->average_edge_length = sum / (num_tris*3); wp::bvh_create_host(m->lowers, m->uppers, num_tris, m->bvh); if (support_winding_number) { // Let's first compute the sold int num_bvh_nodes = 2*num_tris-1; m->solid_angle_props = new SolidAngleProps[num_bvh_nodes]; bvh_refit_with_solid_angle_host(m->bvh, *m); } return (uint64_t)m; } void mesh_destroy_host(uint64_t id) { Mesh* m = (Mesh*)(id); delete[] m->lowers; delete[] m->uppers; if (m->solid_angle_props) { delete [] m->solid_angle_props; } bvh_destroy_host(m->bvh); delete m; } void mesh_refit_host(uint64_t id) { Mesh* m = (Mesh*)(id); float sum = 0.0; for (int i=0; i < m->num_tris; ++i) { wp::vec3 p0 = m->points.data[m->indices.data[i*3+0]]; wp::vec3 p1 = m->points.data[m->indices.data[i*3+1]]; wp::vec3 p2 = m->points.data[m->indices.data[i*3+2]]; // compute triangle bounds bounds3 b; b.add_point(p0); b.add_point(p1); b.add_point(p2); m->lowers[i] = b.lower; m->uppers[i] = b.upper; sum += length(p0-p1) + length(p0-p2) + length(p2-p1); } m->average_edge_length = sum / (m->num_tris*3); if (m->solid_angle_props) { // If solid angle were used, use refit solid angle bvh_refit_with_solid_angle_host(m->bvh, *m); } else { bvh_refit_host(m->bvh); } } // stubs for non-CUDA platforms #if !WP_ENABLE_CUDA WP_API uint64_t mesh_create_device(void* context, wp::array_t<wp::vec3> points, wp::array_t<wp::vec3> velocities, wp::array_t<int> tris, int num_points, int num_tris, int support_winding_number) { return 0; } WP_API void mesh_destroy_device(uint64_t id) {} WP_API void mesh_refit_device(uint64_t id) {} #endif // !WP_ENABLE_CUDA
5,802
C++
27.446078
214
0.612892
NVIDIA/warp/warp/native/crt.cpp
/** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "crt.h" #include <cmath> #include <cstdio> #include <cassert> extern "C" WP_API int _wp_isfinite(double x) { return std::isfinite(x); } extern "C" WP_API int _wp_isnan(double x) { return std::isnan(x); } extern "C" WP_API int _wp_isinf(double x) { return std::isinf(x); } extern "C" WP_API void _wp_assert(const char* expression, const char* file, unsigned int line) { fflush(stdout); fprintf(stderr, "Assertion failed: '%s'\n" "At '%s:%d'\n", expression, file, line); fflush(stderr); // Now invoke the standard assert(), which may abort the program or break // into the debugger as decided by the runtime environment. assert(false && "assert() failed"); }
1,167
C++
26.16279
94
0.692374
NVIDIA/warp/warp/native/initializer_array.h
/** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #pragma once namespace wp { // wp::initializer_array<> is a simple substitute for std::initializer_list<> // which doesn't depend on compiler implementation-specific support. It copies // elements by value and only supports array-style indexing. template<unsigned Length, typename Type> struct initializer_array { const Type storage[Length]; CUDA_CALLABLE const Type operator[](unsigned i) { return storage[i]; } CUDA_CALLABLE const Type operator[](unsigned i) const { return storage[i]; } }; } // namespace wp
996
C
29.21212
78
0.737952
NVIDIA/warp/warp/native/cutlass_gemm.cpp
/** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "builtin.h" // stubs for platforms where there is no CUDA #if !WP_ENABLE_CUDA || !WP_ENABLE_CUTLASS extern "C" { WP_API bool cutlass_gemm( void* context, int compute_capability, int m, int n, int k, const char* datatype_str, const void* a, const void* b, const void* c, void* d, float alpha, float beta, bool row_major_a, bool row_major_b, bool allow_tf32x3_arith, int batch_count) { printf("CUDA is disabled and/or CUTLASS is disabled.\n"); return false; } } // extern "C" #endif // !WP_ENABLE_CUDA || !WP_ENABLE_CUTLASS
1,123
C++
31.114285
77
0.64203
NVIDIA/warp/warp/native/solid_angle.h
// This code is adapted from https://github.com/alecjacobson/WindingNumber/tree/1e6081e52905575d8e98fb8b7c0921274a18752f // The original license is below: /* MIT License Copyright (c) 2018 Side Effects Software Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ namespace wp { class SolidAngleProps{ public: vec3 average_p; vec3 normal; vec3 n_ij_diag; vec3 n_ijk_diag; float sum_permute_n_xyz; float two_n_xxy_n_yxx; float two_n_xxz_n_zxx; float two_n_yyz_n_zyy; float two_n_yyx_n_xyy; float two_n_zzx_n_xzz; float two_n_zzy_n_yzz; float n_xy; float n_yx; float n_yz; float n_zy; float n_zx; float n_xz; bounds3 box; vec3 area_P; float area; float max_p_dist_sq; }; CUDA_CALLABLE inline void compute_integrals( const vec3 &a, const vec3 &b, const vec3 &c, const vec3 &P, float *integral_ii, float *integral_ij, float *integral_ik, const int i) { // NOTE: a, b, and c must be in order of the i axis. // We're splitting the triangle at the middle i coordinate. const vec3 oab = b - a; const vec3 oac = c - a; const vec3 ocb = b - c; const float t = oab[i] / oac[i]; const int j = (i == 2) ? 0 : (i + 1); const int k = (j == 2) ? 0 : (j + 1); const float jdiff = t * oac[j] - oab[j]; const float kdiff = t * oac[k] - oab[k]; vec3 cross_a; cross_a[0] = (jdiff * oab[k] - kdiff * oab[j]); cross_a[1] = kdiff * oab[i]; cross_a[2] = jdiff * oab[i]; vec3 cross_c; cross_c[0] = (jdiff * ocb[k] - kdiff * ocb[j]); cross_c[1] = kdiff * ocb[i]; cross_c[2] = jdiff * ocb[i]; const float area_scale_a = length(cross_a); const float area_scale_c = length(cross_c); const float Pai = a[i] - P[i]; const float Pci = c[i] - P[i]; // Integral over the area of the triangle of (pi^2)dA, // by splitting the triangle into two at b, the a side // and the c side. const float int_ii_a = area_scale_a * (0.5f * Pai * Pai + (2.0f / 3.0f) * Pai * oab[i] + 0.25f * oab[i] * oab[i]); const float int_ii_c = area_scale_c * (0.5f * Pci * Pci + (2.0f / 3.0f) * Pci * ocb[i] + 0.25f * ocb[i] * ocb[i]); *integral_ii = int_ii_a + int_ii_c; int jk = j; float *integral = integral_ij; float diff = jdiff; while (true) // This only does 2 iterations, one for j and one for k { if (integral) { float obmidj = b[jk] + 0.5f * diff; float oabmidj = obmidj - a[jk]; float ocbmidj = obmidj - c[jk]; float Paj = a[jk] - P[jk]; float Pcj = c[jk] - P[jk]; // Integral over the area of the triangle of (pi*pj)dA const float int_ij_a = area_scale_a * (0.5f * Pai * Paj + (1.0f / 3.0f) * Pai * oabmidj + (1.0f / 3.0f) * Paj * oab[i] + 0.25f * oab[i] * oabmidj); const float int_ij_c = area_scale_c * (0.5f * Pci * Pcj + (1.0f / 3.0f) * Pci * ocbmidj + (1.0f / 3.0f) * Pcj * ocb[i] + 0.25f * ocb[i] * ocbmidj); *integral = int_ij_a + int_ij_c; } if (jk == k) break; jk = k; integral = integral_ik; diff = kdiff; } }; CUDA_CALLABLE inline void my_swap(int &a, int &b) { int c = a; a = b; b = c; } CUDA_CALLABLE inline void precompute_triangle_solid_angle_props(const vec3 &a, const vec3 &b, const vec3 &c, SolidAngleProps &my_data) { const vec3 ab = b - a; const vec3 ac = c - a; // Are weighted normal const vec3 N = 0.5f * cross(ab, ac); const float area2 = length_sq(N); const float area = sqrtf(area2); const vec3 P = (a + b + c) / 3.0f; my_data.box.add_point(a); my_data.box.add_point(b); my_data.box.add_point(c); my_data.average_p = P; my_data.area_P = P * area; my_data.normal = N; my_data.area = area; // NOTE: Due to P being at the centroid, triangles have Nij = 0 // contributions to Nij. my_data.n_ij_diag = 0.0f; my_data.n_xy = 0.0f; my_data.n_yx = 0.0f; my_data.n_yz = 0.0f; my_data.n_zy = 0.0f; my_data.n_zx = 0.0f; my_data.n_xz = 0.0f; // If it's zero-length, the results are zero, so we can skip. if (area == 0) { my_data.n_ijk_diag = 0.0f; my_data.sum_permute_n_xyz = 0.0f; my_data.two_n_xxy_n_yxx = 0.0f; my_data.two_n_xxz_n_zxx = 0.0f; my_data.two_n_yyz_n_zyy = 0.0f; my_data.two_n_yyx_n_xyy = 0.0f; my_data.two_n_zzx_n_xzz = 0.0f; my_data.two_n_zzy_n_yzz = 0.0f; return; } // We need to use the NORMALIZED normal to multiply the integrals by. vec3 n = N / area; // Figure out the order of a, b, and c in x, y, and z // for use in computing the integrals for Nijk. vec3 values[3] = {a, b, c}; int order_x[3] = {0, 1, 2}; if (a[0] > b[0]) my_swap(order_x[0], order_x[1]); if (values[order_x[0]][0] > c[0]) my_swap(order_x[0], order_x[2]); if (values[order_x[1]][0] > values[order_x[2]][0]) my_swap(order_x[1], order_x[2]); float dx = values[order_x[2]][0] - values[order_x[0]][0]; int order_y[3] = {0, 1, 2}; if (a[1] > b[1]) my_swap(order_y[0], order_y[1]); if (values[order_y[0]][1] > c[1]) my_swap(order_y[0], order_y[2]); if (values[order_y[1]][1] > values[order_y[2]][1]) my_swap(order_y[1], order_y[2]); float dy = values[order_y[2]][1] - values[order_y[0]][1]; int order_z[3] = {0, 1, 2}; if (a[2] > b[2]) my_swap(order_z[0], order_z[1]); if (values[order_z[0]][2] > c[2]) my_swap(order_z[0], order_z[2]); if (values[order_z[1]][2] > values[order_z[2]][2]) my_swap(order_z[1], order_z[2]); float dz = values[order_z[2]][2] - values[order_z[0]][2]; float integral_xx = 0.0f; float integral_xy = 0.0f; float integral_yy = 0.0f; float integral_yz = 0.0f; float integral_zz = 0.0f; float integral_zx = 0.0f; // Note that if the span of any axis is zero, the integral must be zero, // since there's a factor of (p_i-P_i), i.e. value minus average, // and every value must be equal to the average, giving zero. if (dx > 0) { compute_integrals( values[order_x[0]], values[order_x[1]], values[order_x[2]], P, &integral_xx, ((dx >= dy && dy > 0) ? &integral_xy : nullptr), ((dx >= dz && dz > 0) ? &integral_zx : nullptr), 0); } if (dy > 0) { compute_integrals( values[order_y[0]], values[order_y[1]], values[order_y[2]], P, &integral_yy, ((dy >= dz && dz > 0) ? &integral_yz : nullptr), ((dx < dy && dx > 0) ? &integral_xy : nullptr), 1); } if (dz > 0) { compute_integrals( values[order_z[0]], values[order_z[1]], values[order_z[2]], P, &integral_zz, ((dx < dz && dx > 0) ? &integral_zx : nullptr), ((dy < dz && dy > 0) ? &integral_yz : nullptr), 2); } vec3 Niii(integral_xx, integral_yy, integral_zz); Niii = cw_mul(Niii, n); my_data.n_ijk_diag = Niii; my_data.sum_permute_n_xyz = 2.0f * (n[0] * integral_yz + n[1] * integral_zx + n[2] * integral_xy); float Nxxy = n[0] * integral_xy; float Nxxz = n[0] * integral_zx; float Nyyz = n[1] * integral_yz; float Nyyx = n[1] * integral_xy; float Nzzx = n[2] * integral_zx; float Nzzy = n[2] * integral_yz; my_data.two_n_xxy_n_yxx = 2.0f * Nxxy + n[1] * integral_xx; my_data.two_n_xxz_n_zxx = 2.0f * Nxxz + n[2] * integral_xx; my_data.two_n_yyz_n_zyy = 2.0f * Nyyz + n[2] * integral_yy; my_data.two_n_yyx_n_xyy = 2.0f * Nyyx + n[0] * integral_yy; my_data.two_n_zzx_n_xzz = 2.0f * Nzzx + n[0] * integral_zz; my_data.two_n_zzy_n_yzz = 2.0f * Nzzy + n[1] * integral_zz; } CUDA_CALLABLE inline void combine_precomputed_solid_angle_props(SolidAngleProps &my_data, const SolidAngleProps *left_child_data, const SolidAngleProps *right_child_data) { vec3 N = left_child_data->normal; vec3 areaP = left_child_data->area_P; float area = left_child_data->area; if (right_child_data) { const vec3 local_N = right_child_data->normal; N += local_N; areaP += right_child_data->area_P; area += right_child_data->area; } my_data.normal = N; my_data.area_P = areaP; my_data.area = area; bounds3 box(left_child_data->box); if (right_child_data) { box = bounds_union(box, right_child_data->box); } // Normalize P vec3 averageP; if (area > 0) { averageP = areaP / area; } else { averageP = 0.5f * (box.lower + box.upper); } my_data.average_p = averageP; my_data.box = box; // We now have the current box's P, so we can adjust Nij and Nijk my_data.n_ij_diag = left_child_data->n_ij_diag; my_data.n_xy = 0.0f; my_data.n_yx = 0.0f; my_data.n_yz = 0.0f; my_data.n_zy = 0.0f; my_data.n_zx = 0.0f; my_data.n_xz = 0.0f; my_data.n_ijk_diag = left_child_data->n_ijk_diag; my_data.sum_permute_n_xyz = left_child_data->sum_permute_n_xyz; my_data.two_n_xxy_n_yxx = left_child_data->two_n_xxy_n_yxx; my_data.two_n_xxz_n_zxx = left_child_data->two_n_xxz_n_zxx; my_data.two_n_yyz_n_zyy = left_child_data->two_n_yyz_n_zyy; my_data.two_n_yyx_n_xyy = left_child_data->two_n_yyx_n_xyy; my_data.two_n_zzx_n_xzz = left_child_data->two_n_zzx_n_xzz; my_data.two_n_zzy_n_yzz = left_child_data->two_n_zzy_n_yzz; if (right_child_data) { my_data.n_ij_diag += right_child_data->n_ij_diag; my_data.n_ijk_diag += right_child_data->n_ijk_diag; my_data.sum_permute_n_xyz += right_child_data->sum_permute_n_xyz; my_data.two_n_xxy_n_yxx += right_child_data->two_n_xxy_n_yxx; my_data.two_n_xxz_n_zxx += right_child_data->two_n_xxz_n_zxx; my_data.two_n_yyz_n_zyy += right_child_data->two_n_yyz_n_zyy; my_data.two_n_yyx_n_xyy += right_child_data->two_n_yyx_n_xyy; my_data.two_n_zzx_n_xzz += right_child_data->two_n_zzx_n_xzz; my_data.two_n_zzy_n_yzz += right_child_data->two_n_zzy_n_yzz; } for (int i = 0; i < (right_child_data ? 2 : 1); ++i) { const SolidAngleProps &child_data = (i == 0) ? *left_child_data : *right_child_data; vec3 displacement = child_data.average_p - vec3(my_data.average_p); vec3 N = child_data.normal; // Adjust Nij for the change in centre P my_data.n_ij_diag += cw_mul(N, displacement); float Nxy = child_data.n_xy + N[0] * displacement[1]; float Nyx = child_data.n_yx + N[1] * displacement[0]; float Nyz = child_data.n_yz + N[1] * displacement[2]; float Nzy = child_data.n_zy + N[2] * displacement[1]; float Nzx = child_data.n_zx + N[2] * displacement[0]; float Nxz = child_data.n_xz + N[0] * displacement[2]; my_data.n_xy += Nxy; my_data.n_yx += Nyx; my_data.n_yz += Nyz; my_data.n_zy += Nzy; my_data.n_zx += Nzx; my_data.n_xz += Nxz; // Adjust Nijk for the change in centre P my_data.n_ijk_diag += 2.0f * cw_mul(displacement, child_data.n_ij_diag) + cw_mul(displacement, cw_mul(displacement, child_data.normal)); my_data.sum_permute_n_xyz += (displacement[0] * (Nyz + Nzy) + displacement[1] * (Nzx + Nxz) + displacement[2] * (Nxy + Nyx)); my_data.two_n_xxy_n_yxx += 2 * (displacement[1] * child_data.n_ij_diag[0] + displacement[0] * child_data.n_xy + N[0] * displacement[0] * displacement[1]) + 2 * child_data.n_yx * displacement[0] + N[1] * displacement[0] * displacement[0]; my_data.two_n_xxz_n_zxx += 2 * (displacement[2] * child_data.n_ij_diag[0] + displacement[0] * child_data.n_xz + N[0] * displacement[0] * displacement[2]) + 2 * child_data.n_zx * displacement[0] + N[2] * displacement[0] * displacement[0]; my_data.two_n_yyz_n_zyy += 2 * (displacement[2] * child_data.n_ij_diag[1] + displacement[1] * child_data.n_yz + N[1] * displacement[1] * displacement[2]) + 2 * child_data.n_zy * displacement[1] + N[2] * displacement[1] * displacement[1]; my_data.two_n_yyx_n_xyy += 2 * (displacement[0] * child_data.n_ij_diag[1] + displacement[1] * child_data.n_yx + N[1] * displacement[1] * displacement[0]) + 2 * child_data.n_xy * displacement[1] + N[0] * displacement[1] * displacement[1]; my_data.two_n_zzx_n_xzz += 2 * (displacement[0] * child_data.n_ij_diag[2] + displacement[2] * child_data.n_zx + N[2] * displacement[2] * displacement[0]) + 2 * child_data.n_xz * displacement[2] + N[0] * displacement[2] * displacement[2]; my_data.two_n_zzy_n_yzz += 2 * (displacement[1] * child_data.n_ij_diag[2] + displacement[2] * child_data.n_zy + N[2] * displacement[2] * displacement[1]) + 2 * child_data.n_yz * displacement[2] + N[1] * displacement[2] * displacement[2]; } my_data.max_p_dist_sq = length_sq(max(my_data.average_p - my_data.box.lower, my_data.box.upper - my_data.average_p)); } // Return whether need to CUDA_CALLABLE inline bool evaluate_node_solid_angle(const vec3 &query_point, SolidAngleProps *current_data, float &solid_angle, const float accuracy_scale_sq) { SolidAngleProps &data = *current_data; float max_p_sq = data.max_p_dist_sq; vec3 q = query_point - data.average_p; float qlength2 = length_sq(q); if (qlength2 <= max_p_sq * accuracy_scale_sq) { solid_angle = 0.0f; return true; } float omega_approx = 0.0f; // qlength2 must be non-zero, since it's strictly greater than something. // We still need to be careful for NaNs, though, because the 4th power might cause problems. float qlength_m2 = 1.0f / qlength2; float qlength_m1 = sqrtf(qlength_m2); // Normalize q to reduce issues with overflow/underflow, since we'd need the 7th power // if we didn't normalize, and (1e-6)^-7 = 1e42, which overflows single-precision. q = q * qlength_m1; omega_approx = -qlength_m2 * dot(q, data.normal); vec3 q2 = cw_mul(q, q); float qlength_m3 = qlength_m2 * qlength_m1; float omega_1 = qlength_m3 * (data.n_ij_diag[0] + data.n_ij_diag[1] + data.n_ij_diag[2] - 3.0f * (dot(q2, data.n_ij_diag) + q[0] * q[1] * (data.n_xy + data.n_yx) + q[0] * q[2] * (data.n_zx + data.n_xz) + q[1] * q[2] * (data.n_yz + data.n_zy))); omega_approx += omega_1; vec3 q3 = cw_mul(q2, q); float qlength_m4 = qlength_m2 * qlength_m2; vec3 temp0(data.two_n_yyx_n_xyy + data.two_n_zzx_n_xzz, data.two_n_zzy_n_yzz + data.two_n_xxy_n_yxx, data.two_n_xxz_n_zxx + data.two_n_yyz_n_zyy); vec3 temp1(q[1] * data.two_n_xxy_n_yxx + q[2] * data.two_n_xxz_n_zxx, q[2] * data.two_n_yyz_n_zyy + q[0] * data.two_n_yyx_n_xyy, q[0] * data.two_n_zzx_n_xzz + q[1] * data.two_n_zzy_n_yzz); float omega_2 = qlength_m4 * (1.5f * dot(q, 3.0f * data.n_ijk_diag + temp0) - 7.5f * (dot(q3, data.n_ijk_diag) + q[0] * q[1] * q[2] * data.sum_permute_n_xyz + dot(q2, temp1))); omega_approx += omega_2; // Safety check if not finite, need to descend instead if (!isfinite(omega_approx)) { omega_approx = 0.0f; solid_angle = 0.0; return true; } solid_angle = omega_approx; return false; } CUDA_CALLABLE inline float robust_solid_angle( const vec3 &a, const vec3 &b, const vec3 &c, const vec3 &p) { vec3 qa = a - p; vec3 qb = b - p; vec3 qc = c - p; const float a_length = length(qa); const float b_length = length(qb); const float c_length = length(qc); if (a_length == 0.0f || b_length == 0.0f || c_length == 0.0f) return 0.0f; qa = qa / a_length; qb = qb / b_length; qc = qc / c_length; const float numerator = dot(qa, cross(qb - qa, qc - qa)); if (numerator == 0.0f) return 0.0f; const float denominator = 1.0f + dot(qa, qb) + dot(qa, qc) + dot(qb, qc); return 2.0f * atan2(numerator, denominator); } }
15,685
C
34.408578
245
0.632898
NVIDIA/warp/warp/native/scan.cpp
#include "scan.h" #include <numeric> template<typename T> void scan_host(const T* values_in, T* values_out, int n, bool inclusive) { static void* scan_temp_memory = NULL; static size_t scan_temp_max_size = 0; // compute temporary memory required if (!inclusive && n > scan_temp_max_size) { free_host(scan_temp_memory); scan_temp_memory = alloc_host(sizeof(T) * n); scan_temp_max_size = n; } T* result = inclusive ? values_out : static_cast<T*>(scan_temp_memory); // scan std::partial_sum(values_in, values_in + n, result); if (!inclusive) { values_out[0] = (T)0; memcpy_h2h(values_out + 1, result, sizeof(T) * (n - 1)); } } template void scan_host(const int*, int*, int, bool); template void scan_host(const float*, float*, int, bool);
823
C++
25.580644
75
0.612394
NVIDIA/warp/warp/native/nanovdb/HostBuffer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! @file nanovdb/HostBuffer.h @date April 20, 2021 @brief HostBuffer - a buffer that contains a shared or private bump pool to either externally or internally managed host memory. @details This HostBuffer can be used in multiple ways, most of which are demonstrated in the examples below. Memory in the pool can be managed or unmanged (e.g. internal or external) and can be shared between multiple buffers or belong to a single buffer. Example that uses HostBuffer::create inside io::readGrids to create a full self-managed buffer, i.e. not shared and without padding, per grid in the file. @code auto handles = nanovdb::io::readGrids("file.nvdb"); @endcode Example that uses HostBuffer::createFull. Assuming you have a raw pointer to a NanoVDB grid of unknown type, this examples shows how to create its GridHandle which can be used to enquire about the grid type and meta data. @code void *data;// pointer to a NanoVDB grid of unknown type uint64_t size;// byte size of NanoVDB grid of unknown type auto buffer = nanovdb::HostBuffer::createFull(size, data); nanovdb::GridHandle<> gridHandle(std::move(buffer)); @endcode Example that uses HostBuffer::createPool for internally managed host memory. Suppose you want to read multiple grids in multiple files, but reuse the same fixed sized memory buffer to both avoid memory fragmentation as well as exceeding the fixed memory ceiling! @code auto pool = nanovdb::HostBuffer::createPool(1 << 30);// 1 GB memory pool std::vector<std::string>> frames;// vector of grid names for (int i=0; i<frames.size(); ++i) { auto handles = nanovdb::io::readGrids(frames[i], 0, pool);// throws if grids in file exceed 1 GB ... pool.reset();// clears all handles and resets the memory pool for reuse } @endcode Example that uses HostBuffer::createPool for externally managed host memory. Note that in this example @c handles are allowed to outlive @c pool since they internally store a shared pointer to the memory pool. However @c data MUST outlive @c handles since the pool does not own its memory in this example. @code const size_t poolSize = 1 << 30;// 1 GB void *data = std::malloc(size + NANOVDB_DATA_ALIGNMENT);// 1 GB pool with padding void *buffer = nanovdb::alignPtr(data);// 32B aligned buffer //void *buffer = std::aligned_alloc(NANOVDB_DATA_ALIGNMENT, poolSize);// in C++17 auto pool = nanovdb::HostBuffer::createPool(poolSize, buffer); auto handles1 = nanovdb::io::readGrids("file1.nvdb", 0, pool); auto handles2 = nanovdb::io::readGrids("file2.nvdb", 0, pool); .... std::free(data); //std::free(buffer); @endcode Example that uses HostBuffer::createPool for externally managed host memory. Note that in this example @c handles are allowed to outlive @c pool since they internally store a shared pointer to the memory pool. However @c array MUST outlive @c handles since the pool does not own its memory in this example. @code const size_t poolSize = 1 << 30;// 1 GB std::unique_ptr<char[]> array(new char[size + NANOVDB_DATA_ALIGNMENT]);// scoped pool of 1 GB with padding void *buffer = nanovdb::alignPtr(array.get());// 32B aligned buffer auto pool = nanovdb::HostBuffer::createPool(poolSize, buffer); auto handles = nanovdb::io::readGrids("file.nvdb", 0, pool); @endcode */ #ifndef NANOVDB_HOSTBUFFER_H_HAS_BEEN_INCLUDED #define NANOVDB_HOSTBUFFER_H_HAS_BEEN_INCLUDED #include <nanovdb/NanoVDB.h>// for NANOVDB_DATA_ALIGNMENT; #include <stdint.h> // for types like int32_t etc #include <cstdio> // for fprintf #include <cstdlib> // for std::malloc/std::realloc/std::free #include <memory>// for std::make_shared #include <mutex>// for std::mutex #include <unordered_set>// for std::unordered_set #include <cassert>// for assert #include <sstream>// for std::stringstream #include <cstring>// for memcpy #define checkPtr(ptr, msg) \ { \ ptrAssert((ptr), (msg), __FILE__, __LINE__); \ } namespace nanovdb { template<typename BufferT> struct BufferTraits { static constexpr bool hasDeviceDual = false; }; // ----------------------------> HostBuffer <-------------------------------------- /// @brief This is a buffer that contains a shared or private pool /// to either externally or internally managed host memory. /// /// @note Terminology: /// Pool: 0 = buffer.size() < buffer.poolSize() /// Buffer: 0 < buffer.size() < buffer.poolSize() /// Full: 0 < buffer.size() = buffer.poolSize() /// Empty: 0 = buffer.size() = buffer.poolSize() class HostBuffer { struct Pool;// forward declaration of private pool struct std::shared_ptr<Pool> mPool; uint64_t mSize; // total number of bytes for the NanoVDB grid. void* mData; // raw buffer for the NanoVDB grid. #if defined(DEBUG) || defined(_DEBUG) static inline void ptrAssert(void* ptr, const char* msg, const char* file, int line, bool abort = true) { if (ptr == nullptr) { fprintf(stderr, "NULL pointer error: %s %s %d\n", msg, file, line); if (abort) exit(1); } if (uint64_t(ptr) % NANOVDB_DATA_ALIGNMENT) { fprintf(stderr, "Alignment pointer error: %s %s %d\n", msg, file, line); if (abort) exit(1); } } #else static inline void ptrAssert(void*, const char*, const char*, int, bool = true) { } #endif public: /// @brief Return a full buffer or an empty buffer HostBuffer(uint64_t bufferSize = 0); /// @brief Move copy-constructor HostBuffer(HostBuffer&& other); /// @brief Custom descructor ~HostBuffer() { this->clear(); } /// @brief Move copy assignment operation HostBuffer& operator=(HostBuffer&& other); /// @brief Disallow copy-construction HostBuffer(const HostBuffer&) = delete; /// @brief Disallow copy assignment operation HostBuffer& operator=(const HostBuffer&) = delete; /// @brief Return a pool buffer which satisfies: buffer.size == 0, /// buffer.poolSize() == poolSize, and buffer.data() == nullptr. /// If data==nullptr, memory for the pool will be allocated. /// /// @throw If poolSize is zero. static HostBuffer createPool(uint64_t poolSize, void *data = nullptr); /// @brief Return a full buffer which satisfies: buffer.size == bufferSize, /// buffer.poolSize() == bufferSize, and buffer.data() == data. /// If data==nullptr, memory for the pool will be allocated. /// /// @throw If bufferSize is zero. static HostBuffer createFull(uint64_t bufferSize, void *data = nullptr); /// @brief Return a buffer with @c bufferSize bytes managed by /// the specified memory @c pool. If none is provided, i.e. /// @c pool == nullptr or @c pool->poolSize() == 0, one is /// created with size @c bufferSize, i.e. a full buffer is returned. /// /// @throw If the specified @c pool has insufficient memory for /// the requested buffer size. static HostBuffer create(uint64_t bufferSize, const HostBuffer* pool = nullptr); /// @brief Initialize as a full buffer with the specified size. If data is NULL /// the memory is internally allocated. void init(uint64_t bufferSize, void *data = nullptr); //@{ /// @brief Retuns a pointer to the raw memory buffer managed by this allocator. /// /// @warning Note that the pointer can be NULL if the allocator was not initialized! const void* data() const { return mData; } void* data() { return mData; } //@} //@{ /// @brief Returns the size in bytes associated with this buffer. uint64_t bufferSize() const { return mSize; } uint64_t size() const { return this->bufferSize(); } //@} /// @brief Returns the size in bytes of the memory pool shared with this instance. uint64_t poolSize() const; /// @brief Return true if memory is managed (using std::malloc and std:free) by the /// shared pool in this buffer. Else memory is assumed to be managed externally. bool isManaged() const; //@{ /// @brief Returns true if this buffer has no memory associated with it bool isEmpty() const { return !mPool || mSize == 0 || mData == nullptr; } bool empty() const { return this->isEmpty(); } //@} /// @brief Return true if this is a pool, i.e. an empty buffer with a nonempty /// internal pool, i.e. this->size() == 0 and this->poolSize() != 0 bool isPool() const { return mSize == 0 && this->poolSize() > 0; } /// @brief Return true if the pool exists, is nonempty but has no more available memory bool isFull() const; /// @brief Clear this buffer so it is empty. void clear(); /// @brief Clears all existing buffers that are registered against the memory pool /// and resets the pool so it can be reused to create new buffers. /// /// @throw If this instance is not empty or contains no pool. /// /// @warning This method is not thread-safe! void reset(); /// @brief Total number of bytes from the pool currently in use by buffers uint64_t poolUsage() const; /// @brief resize the pool size. It will attempt to resize the existing /// memory block, but if that fails a deep copy is performed. /// If @c data is not NULL it will be used as new externally /// managed memory for the pool. All registered buffers are /// updated so GridHandle::grid might return a new address (if /// deep copy was performed). /// /// @note This method can be use to resize the memory pool and even /// change it from internally to externally managed memory or vice versa. /// /// @throw if @c poolSize is less than this->poolUsage() the used memory /// or allocations fail. void resizePool(uint64_t poolSize, void *data = nullptr); }; // HostBuffer class // --------------------------> Implementation of HostBuffer::Pool <------------------------------------ // This is private struct of HostBuffer so you can safely ignore the API struct HostBuffer::Pool { using HashTableT = std::unordered_set<HostBuffer*>; std::mutex mMutex; // mutex for updating mRegister and mFree HashTableT mRegister; void *mData, *mFree; uint64_t mSize, mPadding; bool mManaged; /// @brief External memory ctor Pool(uint64_t size = 0, void* data = nullptr) : mData(data) , mFree(mData) , mSize(size) , mPadding(0) , mManaged(data == nullptr) { if (mManaged) { mData = Pool::alloc(mSize); if (mData == nullptr) throw std::runtime_error("Pool::Pool malloc failed"); } mPadding = alignmentPadding(mData); if (!mManaged && mPadding != 0) { throw std::runtime_error("Pool::Pool: external memory buffer is not aligned to " + std::to_string(NANOVDB_DATA_ALIGNMENT) + " bytes.\nHint: use nanovdb::alignPtr or std::aligned_alloc (C++17 only)"); } mFree = util::PtrAdd(mData, mPadding); } /// @brief Custom destructor ~Pool() { assert(mRegister.empty()); if (mManaged) std::free(mData); } /// @brief Disallow copy-construction Pool(const Pool&) = delete; /// @brief Disallow move-construction Pool(const Pool&&) = delete; /// @brief Disallow copy assignment operation Pool& operator=(const Pool&) = delete; /// @brief Disallow move assignment operation Pool& operator=(const Pool&&) = delete; /// @brief Return the total number of bytes used from this Pool by buffers uint64_t usage() const { return util::PtrDiff(mFree, mData) - mPadding; } /// @brief Allocate a buffer of the specified size and add it to the register void add(HostBuffer* buffer, uint64_t size) { void *alignedFree = util::PtrAdd(mFree, alignmentPadding(mFree)); if (util::PtrAdd(alignedFree, size) > util::PtrAdd(mData, mPadding + mSize)) { std::stringstream ss; ss << "HostBuffer::Pool: insufficient memory\n" << "\tA buffer requested " << size << " bytes with " << NANOVDB_DATA_ALIGNMENT << "-bytes alignment from a pool with " << mSize << " bytes of which\n\t" << (util::PtrDiff(alignedFree, mData) - mPadding) << " bytes are used by " << mRegister.size() << " other buffer(s). " << "Pool is " << (mManaged ? "internally" : "externally") << " managed.\n"; //std::cerr << ss.str(); throw std::runtime_error(ss.str()); } buffer->mSize = size; const std::lock_guard<std::mutex> lock(mMutex); mRegister.insert(buffer); buffer->mData = alignedFree; mFree = util::PtrAdd(alignedFree, size); } /// @brief Remove the specified buffer from the register void remove(HostBuffer *buffer) { const std::lock_guard<std::mutex> lock(mMutex); mRegister.erase(buffer); } /// @brief Replaces buffer1 with buffer2 in the register void replace(HostBuffer *buffer1, HostBuffer *buffer2) { const std::lock_guard<std::mutex> lock(mMutex); mRegister.erase( buffer1); mRegister.insert(buffer2); } /// @brief Reset the register and all its buffers void reset() { for (HostBuffer *buffer : mRegister) { buffer->mPool.reset(); buffer->mSize = 0; buffer->mData = nullptr; } mRegister.clear(); mFree = util::PtrAdd(mData, mPadding); } /// @brief Resize this Pool and update registered buffers as needed. If data is no NULL /// it is used as externally managed memory. void resize(uint64_t size, void *data = nullptr) { const uint64_t memUsage = this->usage(); const bool managed = (data == nullptr); if (!managed && alignmentPadding(data) != 0) { throw std::runtime_error("Pool::resize: external memory buffer is not aligned to " + std::to_string(NANOVDB_DATA_ALIGNMENT) + " bytes"); } if (memUsage > size) { throw std::runtime_error("Pool::resize: insufficient memory"); } uint64_t padding = 0; if (mManaged && managed && size != mSize) { // managed -> managed padding = mPadding; data = Pool::realloc(mData, memUsage, size, padding); // performs both copy and free of mData } else if (!mManaged && managed) { // un-managed -> managed data = Pool::alloc(size); padding = alignmentPadding(data); } if (data == nullptr) { throw std::runtime_error("Pool::resize: allocation failed"); } else if (data != mData) { void* paddedData = util::PtrAdd(data, padding); if (!(mManaged && managed)) { // no need to copy if managed -> managed memcpy(paddedData, util::PtrAdd(mData, mPadding), memUsage); } for (HostBuffer* buffer : mRegister) { // update registered buffers //buffer->mData = paddedData + ptrdiff_t(buffer->mData - (mData + mPadding)); buffer->mData = util::PtrAdd(paddedData, util::PtrDiff(buffer->mData, util::PtrAdd(mData, mPadding))); } mFree = util::PtrAdd(paddedData, memUsage); // update the free pointer if (mManaged && !managed) {// only free if managed -> un-managed std::free(mData); } mData = data; mPadding = padding; } mSize = size; mManaged = managed; } /// @brief Return true is all the memory in this pool is in use. bool isFull() const { assert(mFree <= util::PtrAdd(mData, mPadding + mSize)); return mSize > 0 ? mFree == util::PtrAdd(mData, mPadding + mSize) : false; } private: static void* alloc(uint64_t size) { //#if (__cplusplus >= 201703L) // return std::aligned_alloc(NANOVDB_DATA_ALIGNMENT, size);//C++17 or newer //#else // make sure we alloc enough space to align the result return std::malloc(size + NANOVDB_DATA_ALIGNMENT); //#endif } static void* realloc(void* const origData, uint64_t origSize, uint64_t desiredSize, uint64_t& padding) { // make sure we alloc enough space to align the result void* data = std::realloc(origData, desiredSize + NANOVDB_DATA_ALIGNMENT); if (data != nullptr && data != origData) { uint64_t newPadding = alignmentPadding(data); // Number of padding bytes may have changed -- move data if that's the case if (newPadding != padding) { // Realloc should not happen when shrinking down buffer, but let's be safe std::memmove(util::PtrAdd(data, newPadding), util::PtrAdd(data, padding), math::Min(origSize, desiredSize)); padding = newPadding; } } return data; } };// struct HostBuffer::Pool // --------------------------> Implementation of HostBuffer <------------------------------------ inline HostBuffer::HostBuffer(uint64_t size) : mPool(nullptr), mSize(size), mData(nullptr) { if (size>0) { mPool = std::make_shared<Pool>(size); mData = mPool->mFree; mPool->mRegister.insert(this); mPool->mFree = util::PtrAdd(mPool->mFree, size); } } inline HostBuffer::HostBuffer(HostBuffer&& other) : mPool(other.mPool), mSize(other.mSize), mData(other.mData) { if (mPool && mSize != 0) { mPool->replace(&other, this); } other.mPool.reset(); other.mSize = 0; other.mData = nullptr; } inline void HostBuffer::init(uint64_t bufferSize, void *data) { if (bufferSize == 0) { throw std::runtime_error("HostBuffer: invalid buffer size"); } if (mPool) { mPool.reset(); } if (!mPool || mPool->mSize != bufferSize) { mPool = std::make_shared<Pool>(bufferSize, data); } mPool->add(this, bufferSize); } inline HostBuffer& HostBuffer::operator=(HostBuffer&& other) { if (mPool) { mPool->remove(this); } mPool = other.mPool; mSize = other.mSize; mData = other.mData; if (mPool && mSize != 0) { mPool->replace(&other, this); } other.mPool.reset(); other.mSize = 0; other.mData = nullptr; return *this; } inline uint64_t HostBuffer::poolSize() const { return mPool ? mPool->mSize : 0u; } inline uint64_t HostBuffer::poolUsage() const { return mPool ? mPool->usage(): 0u; } inline bool HostBuffer::isManaged() const { return mPool ? mPool->mManaged : false; } inline bool HostBuffer::isFull() const { return mPool ? mPool->isFull() : false; } inline HostBuffer HostBuffer::createPool(uint64_t poolSize, void *data) { if (poolSize == 0) { throw std::runtime_error("HostBuffer: invalid pool size"); } HostBuffer buffer; buffer.mPool = std::make_shared<Pool>(poolSize, data); // note the buffer is NOT registered by its pool since it is not using its memory buffer.mSize = 0; buffer.mData = nullptr; return buffer; } inline HostBuffer HostBuffer::createFull(uint64_t bufferSize, void *data) { if (bufferSize == 0) { throw std::runtime_error("HostBuffer: invalid buffer size"); } HostBuffer buffer; buffer.mPool = std::make_shared<Pool>(bufferSize, data); buffer.mPool->add(&buffer, bufferSize); return buffer; } inline HostBuffer HostBuffer::create(uint64_t bufferSize, const HostBuffer* pool) { HostBuffer buffer; if (pool == nullptr || !pool->mPool) { buffer.mPool = std::make_shared<Pool>(bufferSize); } else { buffer.mPool = pool->mPool; } buffer.mPool->add(&buffer, bufferSize); return buffer; } inline void HostBuffer::clear() { if (mPool) {// remove self from the buffer register in the pool mPool->remove(this); } mPool.reset(); mSize = 0; mData = nullptr; } inline void HostBuffer::reset() { if (this->size()>0) { throw std::runtime_error("HostBuffer: only empty buffers can call reset"); } if (!mPool) { throw std::runtime_error("HostBuffer: this buffer contains no pool to reset"); } mPool->reset(); } inline void HostBuffer::resizePool(uint64_t size, void *data) { if (!mPool) { throw std::runtime_error("HostBuffer: this buffer contains no pool to resize"); } mPool->resize(size, data); } } // namespace nanovdb #endif // end of NANOVDB_HOSTBUFFER_H_HAS_BEEN_INCLUDED
21,463
C
35.318105
118
0.606951
NVIDIA/warp/warp/native/nanovdb/NanoVDB.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! \file nanovdb/NanoVDB.h \author Ken Museth \date January 8, 2020 \brief Implements a light-weight self-contained VDB data-structure in a single file! In other words, this is a significantly watered-down version of the OpenVDB implementation, with few dependencies - so a one-stop-shop for a minimalistic VDB data structure that run on most platforms! \note It is important to note that NanoVDB (by design) is a read-only sparse GPU (and CPU) friendly data structure intended for applications like rendering and collision detection. As such it obviously lacks a lot of the functionality and features of OpenVDB grids. NanoVDB is essentially a compact linearized (or serialized) representation of an OpenVDB tree with getValue methods only. For best performance use the ReadAccessor::getValue method as opposed to the Tree::getValue method. Note that since a ReadAccessor caches previous access patterns it is by design not thread-safe, so use one instantiation per thread (it is very light-weight). Also, it is not safe to copy accessors between the GPU and CPU! In fact, client code should only interface with the API of the Grid class (all other nodes of the NanoVDB data structure can safely be ignored by most client codes)! \warning NanoVDB grids can only be constructed via tools like createNanoGrid or the GridBuilder. This explains why none of the grid nodes defined below have public constructors or destructors. \details Please see the following paper for more details on the data structure: K. Museth, “VDB: High-Resolution Sparse Volumes with Dynamic Topology”, ACM Transactions on Graphics 32(3), 2013, which can be found here: http://www.museth.org/Ken/Publications_files/Museth_TOG13.pdf NanoVDB was first published there: https://dl.acm.org/doi/fullHtml/10.1145/3450623.3464653 Overview: This file implements the following fundamental class that when combined forms the backbone of the VDB tree data structure: Coord- a signed integer coordinate Vec3 - a 3D vector Vec4 - a 4D vector BBox - a bounding box Mask - a bitmask essential to the non-root tree nodes Map - an affine coordinate transformation Grid - contains a Tree and a map for world<->index transformations. Use this class as the main API with client code! Tree - contains a RootNode and getValue methods that should only be used for debugging RootNode - the top-level node of the VDB data structure InternalNode - the internal nodes of the VDB data structure LeafNode - the lowest level tree nodes that encode voxel values and state ReadAccessor - implements accelerated random access operations Semantics: A VDB data structure encodes values and (binary) states associated with signed integer coordinates. Values encoded at the leaf node level are denoted voxel values, and values associated with other tree nodes are referred to as tile values, which by design cover a larger coordinate index domain. Memory layout: It's important to emphasize that all the grid data (defined below) are explicitly 32 byte aligned, which implies that any memory buffer that contains a NanoVDB grid must also be at 32 byte aligned. That is, the memory address of the beginning of a buffer (see ascii diagram below) must be divisible by 32, i.e. uintptr_t(&buffer)%32 == 0! If this is not the case, the C++ standard says the behaviour is undefined! Normally this is not a concerns on GPUs, because they use 256 byte aligned allocations, but the same cannot be said about the CPU. GridData is always at the very beginning of the buffer immediately followed by TreeData! The remaining nodes and blind-data are allowed to be scattered throughout the buffer, though in practice they are arranged as: GridData: 672 bytes (e.g. magic, checksum, major, flags, index, count, size, name, map, world bbox, voxel size, class, type, offset, count) TreeData: 64 bytes (node counts and byte offsets) ... optional padding ... RootData: size depends on ValueType (index bbox, voxel count, tile count, min/max/avg/standard deviation) Array of: RootData::Tile ... optional padding ... Array of: Upper InternalNodes of size 32^3: bbox, two bit masks, 32768 tile values, and min/max/avg/standard deviation values ... optional padding ... Array of: Lower InternalNodes of size 16^3: bbox, two bit masks, 4096 tile values, and min/max/avg/standard deviation values ... optional padding ... Array of: LeafNodes of size 8^3: bbox, bit masks, 512 voxel values, and min/max/avg/standard deviation values Notation: "]---[" implies it has optional padding, and "][" implies zero padding [GridData(672B)][TreeData(64B)]---[RootData][N x Root::Tile]---[InternalData<5>]---[InternalData<4>]---[LeafData<3>]---[BLINDMETA...]---[BLIND0]---[BLIND1]---etc. ^ ^ ^ ^ ^ ^ | | | | | | +-- Start of 32B aligned buffer | | | | +-- Node0::DataType* leafData GridType::DataType* gridData | | | | | | | +-- Node1::DataType* lowerData RootType::DataType* rootData --+ | | | +-- Node2::DataType* upperData | +-- RootType::DataType::Tile* tile */ #ifndef NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED #define NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED // The following two header files are the only mandatory dependencies #include <nanovdb/util/Util.h>// for __hostdev__ and lots of other utility functions #include <nanovdb/math/Math.h>// for Coord, BBox, Vec3, Vec4 etc // Do not change this value! 32 byte alignment is fixed in NanoVDB #define NANOVDB_DATA_ALIGNMENT 32 // NANOVDB_MAGIC_NUMB is currently used for both grids and files (starting with v32.6.0) // NANOVDB_MAGIC_GRID will soon be used exclusively for grids (serialized to a single buffer) // NANOVDB_MAGIC_FILE will soon be used exclusively for files // NANOVDB_MAGIC_NODE will soon be used exclusively for NodeManager // NANOVDB_MAGIC_FRAG will soon be used exclusively for a fragmented grid, i.e. a grid that is not serialized // | : 0 in 30 corresponds to 0 in NanoVDB0 #define NANOVDB_MAGIC_NUMB 0x304244566f6e614eUL // "NanoVDB0" in hex - little endian (uint64_t) #define NANOVDB_MAGIC_GRID 0x314244566f6e614eUL // "NanoVDB1" in hex - little endian (uint64_t) #define NANOVDB_MAGIC_FILE 0x324244566f6e614eUL // "NanoVDB2" in hex - little endian (uint64_t) #define NANOVDB_MAGIC_NODE 0x334244566f6e614eUL // "NanoVDB3" in hex - little endian (uint64_t) #define NANOVDB_MAGIC_FRAG 0x344244566f6e614eUL // "NanoVDB4" in hex - little endian (uint64_t) #define NANOVDB_MAGIC_MASK 0x00FFFFFFFFFFFFFFUL // use this mask to remove the number //#define NANOVDB_MAGIC_NUMBER 0x304244566f6e614eUL //#define NANOVDB_USE_NEW_MAGIC_NUMBERS// used to enable use of the new magic numbers described above #define NANOVDB_MAJOR_VERSION_NUMBER 32 // reflects changes to the ABI and hence also the file format #define NANOVDB_MINOR_VERSION_NUMBER 7 // reflects changes to the API but not ABI #define NANOVDB_PATCH_VERSION_NUMBER 0 // reflects changes that does not affect the ABI or API #define TBB_SUPPRESS_DEPRECATED_MESSAGES 1 // This replaces a Coord key at the root level with a single uint64_t #define NANOVDB_USE_SINGLE_ROOT_KEY // This replaces three levels of Coord keys in the ReadAccessor with one Coord //#define NANOVDB_USE_SINGLE_ACCESSOR_KEY // Use this to switch between std::ofstream or FILE implementations //#define NANOVDB_USE_IOSTREAMS // Use this to switch between old and new accessor methods #define NANOVDB_NEW_ACCESSOR_METHODS #define NANOVDB_FPN_BRANCHLESS #if !defined(NANOVDB_ALIGN) #define NANOVDB_ALIGN(n) alignas(n) #endif // !defined(NANOVDB_ALIGN) namespace nanovdb {// ================================================================= // --------------------------> Build types <------------------------------------ /// @brief Dummy type for a voxel whose value equals an offset into an external value array class ValueIndex{}; /// @brief Dummy type for a voxel whose value equals an offset into an external value array of active values class ValueOnIndex{}; /// @brief Like @c ValueIndex but with a mutable mask class ValueIndexMask{}; /// @brief Like @c ValueOnIndex but with a mutable mask class ValueOnIndexMask{}; /// @brief Dummy type for a voxel whose value equals its binary active state class ValueMask{}; /// @brief Dummy type for a 16 bit floating point values (placeholder for IEEE 754 Half) class Half{}; /// @brief Dummy type for a 4bit quantization of float point values class Fp4{}; /// @brief Dummy type for a 8bit quantization of float point values class Fp8{}; /// @brief Dummy type for a 16bit quantization of float point values class Fp16{}; /// @brief Dummy type for a variable bit quantization of floating point values class FpN{}; /// @brief Dummy type for indexing points into voxels class Point{}; // --------------------------> GridType <------------------------------------ /// @brief return the number of characters (including null termination) required to convert enum type to a string template <class EnumT> __hostdev__ inline constexpr uint32_t strlen(){return (uint32_t)EnumT::StrLen - (uint32_t)EnumT::End;} /// @brief List of types that are currently supported by NanoVDB /// /// @note To expand on this list do: /// 1) Add the new type between Unknown and End in the enum below /// 2) Add the new type to OpenToNanoVDB::processGrid that maps OpenVDB types to GridType /// 3) Verify that the ConvertTrait in NanoToOpenVDB.h works correctly with the new type /// 4) Add the new type to toGridType (defined below) that maps NanoVDB types to GridType /// 5) Add the new type to toStr (defined below) enum class GridType : uint32_t { Unknown = 0, // unknown value type - should rarely be used Float = 1, // single precision floating point value Double = 2, // double precision floating point value Int16 = 3, // half precision signed integer value Int32 = 4, // single precision signed integer value Int64 = 5, // double precision signed integer value Vec3f = 6, // single precision floating 3D vector Vec3d = 7, // double precision floating 3D vector Mask = 8, // no value, just the active state Half = 9, // half precision floating point value (placeholder for IEEE 754 Half) UInt32 = 10, // single precision unsigned integer value Boolean = 11, // boolean value, encoded in bit array RGBA8 = 12, // RGBA packed into 32bit word in reverse-order, i.e. R is lowest byte. Fp4 = 13, // 4bit quantization of floating point value Fp8 = 14, // 8bit quantization of floating point value Fp16 = 15, // 16bit quantization of floating point value FpN = 16, // variable bit quantization of floating point value Vec4f = 17, // single precision floating 4D vector Vec4d = 18, // double precision floating 4D vector Index = 19, // index into an external array of active and inactive values OnIndex = 20, // index into an external array of active values IndexMask = 21, // like Index but with a mutable mask OnIndexMask = 22, // like OnIndex but with a mutable mask PointIndex = 23, // voxels encode indices to co-located points Vec3u8 = 24, // 8bit quantization of floating point 3D vector (only as blind data) Vec3u16 = 25, // 16bit quantization of floating point 3D vector (only as blind data) UInt8 = 26, // 8 bit unsigned integer values (eg 0 -> 255 gray scale) End = 27,// total number of types in this enum (excluding StrLen since it's not a type) StrLen = End + 12};// this entry is used to determine the minimum size of c-string /// @brief Maps a GridType to a c-string /// @param dst destination string of size 12 or larger /// @param gridType GridType enum to be mapped to a string /// @return Retuns a c-string used to describe a GridType __hostdev__ inline char* toStr(char *dst, GridType gridType) { switch (gridType){ case GridType::Unknown: return util::strcpy(dst, "?"); case GridType::Float: return util::strcpy(dst, "float"); case GridType::Double: return util::strcpy(dst, "double"); case GridType::Int16: return util::strcpy(dst, "int16"); case GridType::Int32: return util::strcpy(dst, "int32"); case GridType::Int64: return util::strcpy(dst, "int64"); case GridType::Vec3f: return util::strcpy(dst, "Vec3f"); case GridType::Vec3d: return util::strcpy(dst, "Vec3d"); case GridType::Mask: return util::strcpy(dst, "Mask"); case GridType::Half: return util::strcpy(dst, "Half"); case GridType::UInt32: return util::strcpy(dst, "uint32"); case GridType::Boolean: return util::strcpy(dst, "bool"); case GridType::RGBA8: return util::strcpy(dst, "RGBA8"); case GridType::Fp4: return util::strcpy(dst, "Float4"); case GridType::Fp8: return util::strcpy(dst, "Float8"); case GridType::Fp16: return util::strcpy(dst, "Float16"); case GridType::FpN: return util::strcpy(dst, "FloatN"); case GridType::Vec4f: return util::strcpy(dst, "Vec4f"); case GridType::Vec4d: return util::strcpy(dst, "Vec4d"); case GridType::Index: return util::strcpy(dst, "Index"); case GridType::OnIndex: return util::strcpy(dst, "OnIndex"); case GridType::IndexMask: return util::strcpy(dst, "IndexMask"); case GridType::OnIndexMask: return util::strcpy(dst, "OnIndexMask"); case GridType::PointIndex: return util::strcpy(dst, "PointIndex"); case GridType::Vec3u8: return util::strcpy(dst, "Vec3u8"); case GridType::Vec3u16: return util::strcpy(dst, "Vec3u16"); case GridType::UInt8: return util::strcpy(dst, "uint8"); default: return util::strcpy(dst, "End"); } } // --------------------------> GridClass <------------------------------------ /// @brief Classes (superset of OpenVDB) that are currently supported by NanoVDB enum class GridClass : uint32_t { Unknown = 0, LevelSet = 1, // narrow band level set, e.g. SDF FogVolume = 2, // fog volume, e.g. density Staggered = 3, // staggered MAC grid, e.g. velocity PointIndex = 4, // point index grid PointData = 5, // point data grid Topology = 6, // grid with active states only (no values) VoxelVolume = 7, // volume of geometric cubes, e.g. colors cubes in Minecraft IndexGrid = 8, // grid whose values are offsets, e.g. into an external array TensorGrid = 9, // Index grid for indexing learnable tensor features End = 10,// total number of types in this enum (excluding StrLen since it's not a type) StrLen = End + 7};// this entry is used to determine the minimum size of c-string /// @brief Retuns a c-string used to describe a GridClass /// @param dst destination string of size 7 or larger /// @param gridClass GridClass enum to be converted to a string __hostdev__ inline char* toStr(char *dst, GridClass gridClass) { switch (gridClass){ case GridClass::Unknown: return util::strcpy(dst, "?"); case GridClass::LevelSet: return util::strcpy(dst, "SDF"); case GridClass::FogVolume: return util::strcpy(dst, "FOG"); case GridClass::Staggered: return util::strcpy(dst, "MAC"); case GridClass::PointIndex: return util::strcpy(dst, "PNTIDX"); case GridClass::PointData: return util::strcpy(dst, "PNTDAT"); case GridClass::Topology: return util::strcpy(dst, "TOPO"); case GridClass::VoxelVolume: return util::strcpy(dst, "VOX"); case GridClass::IndexGrid: return util::strcpy(dst, "INDEX"); case GridClass::TensorGrid: return util::strcpy(dst, "TENSOR"); default: return util::strcpy(dst, "END"); } } // --------------------------> GridFlags <------------------------------------ /// @brief Grid flags which indicate what extra information is present in the grid buffer. enum class GridFlags : uint32_t { HasLongGridName = 1 << 0, // grid name is longer than 256 characters HasBBox = 1 << 1, // nodes contain bounding-boxes of active values HasMinMax = 1 << 2, // nodes contain min/max of active values HasAverage = 1 << 3, // nodes contain averages of active values HasStdDeviation = 1 << 4, // nodes contain standard deviations of active values IsBreadthFirst = 1 << 5, // nodes are typically arranged breadth-first in memory End = 1 << 6, // use End - 1 as a mask for the 5 lower bit flags StrLen = End + 23,// this entry is used to determine the minimum size of c-string }; /// @brief Retuns a c-string used to describe a GridFlags /// @param dst destination string of size 23 or larger /// @param gridFlags GridFlags enum to be converted to a string __hostdev__ inline const char* toStr(char *dst, GridFlags gridFlags) { switch (gridFlags){ case GridFlags::HasLongGridName: return util::strcpy(dst, "has long grid name"); case GridFlags::HasBBox: return util::strcpy(dst, "has bbox"); case GridFlags::HasMinMax: return util::strcpy(dst, "has min/max"); case GridFlags::HasAverage: return util::strcpy(dst, "has average"); case GridFlags::HasStdDeviation: return util::strcpy(dst, "has standard deviation"); case GridFlags::IsBreadthFirst: return util::strcpy(dst, "is breadth-first"); default: return util::strcpy(dst, "end"); } } // --------------------------> MagicType <------------------------------------ /// @brief Enums used to identify magic numbers recognized by NanoVDB enum class MagicType : uint32_t { Unknown = 0,// first 64 bits are neither of the cases below OpenVDB = 1,// first 32 bits = 0x56444220UL NanoVDB = 2,// first 64 bits = NANOVDB_MAGIC_NUMB NanoGrid = 3,// first 64 bits = NANOVDB_MAGIC_GRID NanoFile = 4,// first 64 bits = NANOVDB_MAGIC_FILE NanoNode = 5,// first 64 bits = NANOVDB_MAGIC_NODE NanoFrag = 6,// first 64 bits = NANOVDB_MAGIC_FRAG End = 7, StrLen = End + 25};// this entry is used to determine the minimum size of c-string /// @brief maps 64 bits of magic number to enum __hostdev__ inline MagicType toMagic(uint64_t magic) { switch (magic){ case NANOVDB_MAGIC_NUMB: return MagicType::NanoVDB; case NANOVDB_MAGIC_GRID: return MagicType::NanoGrid; case NANOVDB_MAGIC_FILE: return MagicType::NanoFile; case NANOVDB_MAGIC_NODE: return MagicType::NanoNode; case NANOVDB_MAGIC_FRAG: return MagicType::NanoFrag; default: return (magic & ~uint32_t(0)) == 0x56444220UL ? MagicType::OpenVDB : MagicType::Unknown; } } /// @brief print 64-bit magic number to string /// @param dst destination string of size 25 or larger /// @param magic 64 bit magic number to be printed /// @return return destination string @c dst __hostdev__ inline char* toStr(char *dst, MagicType magic) { switch (magic){ case MagicType::Unknown: return util::strcpy(dst, "unknown"); case MagicType::NanoVDB: return util::strcpy(dst, "nanovdb"); case MagicType::NanoGrid: return util::strcpy(dst, "nanovdb::Grid"); case MagicType::NanoFile: return util::strcpy(dst, "nanovdb::File"); case MagicType::NanoNode: return util::strcpy(dst, "nanovdb::NodeManager"); case MagicType::NanoFrag: return util::strcpy(dst, "fragmented nanovdb::Grid"); case MagicType::OpenVDB: return util::strcpy(dst, "openvdb"); default: return util::strcpy(dst, "end"); } } // --------------------------> PointType enums <------------------------------------ // Define the type used when the points are encoded as blind data in the output grid enum class PointType : uint32_t { Disable = 0,// no point information e.g. when BuildT != Point PointID = 1,// linear index of type uint32_t to points World64 = 2,// Vec3d in world space World32 = 3,// Vec3f in world space Grid64 = 4,// Vec3d in grid space Grid32 = 5,// Vec3f in grid space Voxel32 = 6,// Vec3f in voxel space Voxel16 = 7,// Vec3u16 in voxel space Voxel8 = 8,// Vec3u8 in voxel space Default = 9,// output matches input, i.e. Vec3d or Vec3f in world space End =10 }; // --------------------------> GridBlindData enums <------------------------------------ /// @brief Blind-data Classes that are currently supported by NanoVDB enum class GridBlindDataClass : uint32_t { Unknown = 0, IndexArray = 1, AttributeArray = 2, GridName = 3, ChannelArray = 4, End = 5 }; /// @brief Blind-data Semantics that are currently understood by NanoVDB enum class GridBlindDataSemantic : uint32_t { Unknown = 0, PointPosition = 1, // 3D coordinates in an unknown space PointColor = 2, PointNormal = 3, PointRadius = 4, PointVelocity = 5, PointId = 6, WorldCoords = 7, // 3D coordinates in world space, e.g. (0.056, 0.8, 1,8) GridCoords = 8, // 3D coordinates in grid space, e.g. (1.2, 4.0, 5.7), aka index-space VoxelCoords = 9, // 3D coordinates in voxel space, e.g. (0.2, 0.0, 0.7) End = 10 }; // --------------------------> BuildTraits <------------------------------------ /// @brief Define static boolean tests for template build types template<typename T> struct BuildTraits { // check if T is an index type static constexpr bool is_index = util::is_same<T, ValueIndex, ValueIndexMask, ValueOnIndex, ValueOnIndexMask>::value; static constexpr bool is_onindex = util::is_same<T, ValueOnIndex, ValueOnIndexMask>::value; static constexpr bool is_offindex = util::is_same<T, ValueIndex, ValueIndexMask>::value; static constexpr bool is_indexmask = util::is_same<T, ValueIndexMask, ValueOnIndexMask>::value; // check if T is a compressed float type with fixed bit precision static constexpr bool is_FpX = util::is_same<T, Fp4, Fp8, Fp16>::value; // check if T is a compressed float type with fixed or variable bit precision static constexpr bool is_Fp = util::is_same<T, Fp4, Fp8, Fp16, FpN>::value; // check if T is a POD float type, i.e float or double static constexpr bool is_float = util::is_floating_point<T>::value; // check if T is a template specialization of LeafData<T>, i.e. has T mValues[512] static constexpr bool is_special = is_index || is_Fp || util::is_same<T, Point, bool, ValueMask>::value; }; // BuildTraits // --------------------------> BuildToValueMap <------------------------------------ /// @brief Maps one type (e.g. the build types above) to other (actual) types template<typename T> struct BuildToValueMap { using Type = T; using type = T; }; template<> struct BuildToValueMap<ValueIndex> { using Type = uint64_t; using type = uint64_t; }; template<> struct BuildToValueMap<ValueOnIndex> { using Type = uint64_t; using type = uint64_t; }; template<> struct BuildToValueMap<ValueIndexMask> { using Type = uint64_t; using type = uint64_t; }; template<> struct BuildToValueMap<ValueOnIndexMask> { using Type = uint64_t; using type = uint64_t; }; template<> struct BuildToValueMap<ValueMask> { using Type = bool; using type = bool; }; template<> struct BuildToValueMap<Half> { using Type = float; using type = float; }; template<> struct BuildToValueMap<Fp4> { using Type = float; using type = float; }; template<> struct BuildToValueMap<Fp8> { using Type = float; using type = float; }; template<> struct BuildToValueMap<Fp16> { using Type = float; using type = float; }; template<> struct BuildToValueMap<FpN> { using Type = float; using type = float; }; template<> struct BuildToValueMap<Point> { using Type = uint64_t; using type = uint64_t; }; // --------------------------> utility functions related to alignment <------------------------------------ /// @brief return true if the specified pointer is 32 byte aligned __hostdev__ inline static bool isAligned(const void* p){return uint64_t(p) % NANOVDB_DATA_ALIGNMENT == 0;} /// @brief return the smallest number of bytes that when added to the specified pointer results in a 32 byte aligned pointer. __hostdev__ inline static uint64_t alignmentPadding(const void* p) { NANOVDB_ASSERT(p); return (NANOVDB_DATA_ALIGNMENT - (uint64_t(p) % NANOVDB_DATA_ALIGNMENT)) % NANOVDB_DATA_ALIGNMENT; } /// @brief offset the specified pointer so it is 32 byte aligned. Works with both const and non-const pointers. template <typename T> __hostdev__ inline static T* alignPtr(T* p){return util::PtrAdd<T>(p, alignmentPadding(p));} // --------------------------> isFloatingPoint(GridType) <------------------------------------ /// @brief return true if the GridType maps to a floating point type __hostdev__ inline bool isFloatingPoint(GridType gridType) { return gridType == GridType::Float || gridType == GridType::Double || gridType == GridType::Half || gridType == GridType::Fp4 || gridType == GridType::Fp8 || gridType == GridType::Fp16 || gridType == GridType::FpN; } // --------------------------> isFloatingPointVector(GridType) <------------------------------------ /// @brief return true if the GridType maps to a floating point vec3. __hostdev__ inline bool isFloatingPointVector(GridType gridType) { return gridType == GridType::Vec3f || gridType == GridType::Vec3d || gridType == GridType::Vec4f || gridType == GridType::Vec4d; } // --------------------------> isInteger(GridType) <------------------------------------ /// @brief Return true if the GridType maps to a POD integer type. /// @details These types are used to associate a voxel with a POD integer type __hostdev__ inline bool isInteger(GridType gridType) { return gridType == GridType::Int16 || gridType == GridType::Int32 || gridType == GridType::Int64 || gridType == GridType::UInt32|| gridType == GridType::UInt8; } // --------------------------> isIndex(GridType) <------------------------------------ /// @brief Return true if the GridType maps to a special index type (not a POD integer type). /// @details These types are used to index from a voxel into an external array of values, e.g. sidecar or blind data. __hostdev__ inline bool isIndex(GridType gridType) { return gridType == GridType::Index ||// index both active and inactive values gridType == GridType::OnIndex ||// index active values only gridType == GridType::IndexMask ||// as Index, but with an additional mask gridType == GridType::OnIndexMask;// as OnIndex, but with an additional mask } // --------------------------> isValue(GridType, GridClass) <------------------------------------ /// @brief return true if the combination of GridType and GridClass is valid. __hostdev__ inline bool isValid(GridType gridType, GridClass gridClass) { if (gridClass == GridClass::LevelSet || gridClass == GridClass::FogVolume) { return isFloatingPoint(gridType); } else if (gridClass == GridClass::Staggered) { return isFloatingPointVector(gridType); } else if (gridClass == GridClass::PointIndex || gridClass == GridClass::PointData) { return gridType == GridType::PointIndex || gridType == GridType::UInt32; } else if (gridClass == GridClass::Topology) { return gridType == GridType::Mask; } else if (gridClass == GridClass::IndexGrid) { return isIndex(gridType); } else if (gridClass == GridClass::VoxelVolume) { return gridType == GridType::RGBA8 || gridType == GridType::Float || gridType == GridType::Double || gridType == GridType::Vec3f || gridType == GridType::Vec3d || gridType == GridType::UInt32 || gridType == GridType::UInt8; } return gridClass < GridClass::End && gridType < GridType::End; // any valid combination } // --------------------------> validation of blind data meta data <------------------------------------ /// @brief return true if the combination of GridBlindDataClass, GridBlindDataSemantic and GridType is valid. __hostdev__ inline bool isValid(const GridBlindDataClass& blindClass, const GridBlindDataSemantic& blindSemantics, const GridType& blindType) { bool test = false; switch (blindClass) { case GridBlindDataClass::IndexArray: test = (blindSemantics == GridBlindDataSemantic::Unknown || blindSemantics == GridBlindDataSemantic::PointId) && isInteger(blindType); break; case GridBlindDataClass::AttributeArray: if (blindSemantics == GridBlindDataSemantic::PointPosition || blindSemantics == GridBlindDataSemantic::WorldCoords) { test = blindType == GridType::Vec3f || blindType == GridType::Vec3d; } else if (blindSemantics == GridBlindDataSemantic::GridCoords) { test = blindType == GridType::Vec3f; } else if (blindSemantics == GridBlindDataSemantic::VoxelCoords) { test = blindType == GridType::Vec3f || blindType == GridType::Vec3u8 || blindType == GridType::Vec3u16; } else { test = blindSemantics != GridBlindDataSemantic::PointId; } break; case GridBlindDataClass::GridName: test = blindSemantics == GridBlindDataSemantic::Unknown && blindType == GridType::Unknown; break; default: // captures blindClass == Unknown and ChannelArray test = blindClass < GridBlindDataClass::End && blindSemantics < GridBlindDataSemantic::End && blindType < GridType::End; // any valid combination break; } //if (!test) printf("Invalid combination: GridBlindDataClass=%u, GridBlindDataSemantic=%u, GridType=%u\n",(uint32_t)blindClass, (uint32_t)blindSemantics, (uint32_t)blindType); return test; } // ----------------------------> Version class <------------------------------------- /// @brief Bit-compacted representation of all three version numbers /// /// @details major is the top 11 bits, minor is the 11 middle bits and patch is the lower 10 bits class Version { uint32_t mData; // 11 + 11 + 10 bit packing of major + minor + patch public: static constexpr uint32_t End = 0, StrLen = 8;// for strlen<Version>() /// @brief Default constructor __hostdev__ Version() : mData(uint32_t(NANOVDB_MAJOR_VERSION_NUMBER) << 21 | uint32_t(NANOVDB_MINOR_VERSION_NUMBER) << 10 | uint32_t(NANOVDB_PATCH_VERSION_NUMBER)) { } /// @brief Constructor from a raw uint32_t data representation __hostdev__ Version(uint32_t data) : mData(data) {} /// @brief Constructor from major.minor.patch version numbers __hostdev__ Version(uint32_t major, uint32_t minor, uint32_t patch) : mData(major << 21 | minor << 10 | patch) { NANOVDB_ASSERT(major < (1u << 11)); // max value of major is 2047 NANOVDB_ASSERT(minor < (1u << 11)); // max value of minor is 2047 NANOVDB_ASSERT(patch < (1u << 10)); // max value of patch is 1023 } __hostdev__ bool operator==(const Version& rhs) const { return mData == rhs.mData; } __hostdev__ bool operator<( const Version& rhs) const { return mData < rhs.mData; } __hostdev__ bool operator<=(const Version& rhs) const { return mData <= rhs.mData; } __hostdev__ bool operator>( const Version& rhs) const { return mData > rhs.mData; } __hostdev__ bool operator>=(const Version& rhs) const { return mData >= rhs.mData; } __hostdev__ uint32_t id() const { return mData; } __hostdev__ uint32_t getMajor() const { return (mData >> 21) & ((1u << 11) - 1); } __hostdev__ uint32_t getMinor() const { return (mData >> 10) & ((1u << 11) - 1); } __hostdev__ uint32_t getPatch() const { return mData & ((1u << 10) - 1); } __hostdev__ bool isCompatible() const { return this->getMajor() == uint32_t(NANOVDB_MAJOR_VERSION_NUMBER); } /// @brief Returns the difference between major version of this instance and NANOVDB_MAJOR_VERSION_NUMBER /// @return return 0 if the major version equals NANOVDB_MAJOR_VERSION_NUMBER, else a negative age if this /// instance has a smaller major verion (is older), and a positive age if it is newer, i.e. larger. __hostdev__ int age() const {return int(this->getMajor()) - int(NANOVDB_MAJOR_VERSION_NUMBER);} }; // Version /// @brief print the verion number to a c-string /// @param dst destination string of size 8 or more /// @param v version to be printed /// @return returns destination string @c dst __hostdev__ inline char* toStr(char *dst, const Version &v) { return util::sprint(dst, v.getMajor(), ".",v.getMinor(), ".",v.getPatch()); } // ----------------------------> TensorTraits <-------------------------------------- template<typename T, int Rank = (util::is_specialization<T, math::Vec3>::value || util::is_specialization<T, math::Vec4>::value || util::is_same<T, math::Rgba8>::value) ? 1 : 0> struct TensorTraits; template<typename T> struct TensorTraits<T, 0> { static const int Rank = 0; // i.e. scalar static const bool IsScalar = true; static const bool IsVector = false; static const int Size = 1; using ElementType = T; static T scalar(const T& s) { return s; } }; template<typename T> struct TensorTraits<T, 1> { static const int Rank = 1; // i.e. vector static const bool IsScalar = false; static const bool IsVector = true; static const int Size = T::SIZE; using ElementType = typename T::ValueType; static ElementType scalar(const T& v) { return v.length(); } }; // ----------------------------> FloatTraits <-------------------------------------- template<typename T, int = sizeof(typename TensorTraits<T>::ElementType)> struct FloatTraits { using FloatType = float; }; template<typename T> struct FloatTraits<T, 8> { using FloatType = double; }; template<> struct FloatTraits<bool, 1> { using FloatType = bool; }; template<> struct FloatTraits<ValueIndex, 1> // size of empty class in C++ is 1 byte and not 0 byte { using FloatType = uint64_t; }; template<> struct FloatTraits<ValueIndexMask, 1> // size of empty class in C++ is 1 byte and not 0 byte { using FloatType = uint64_t; }; template<> struct FloatTraits<ValueOnIndex, 1> // size of empty class in C++ is 1 byte and not 0 byte { using FloatType = uint64_t; }; template<> struct FloatTraits<ValueOnIndexMask, 1> // size of empty class in C++ is 1 byte and not 0 byte { using FloatType = uint64_t; }; template<> struct FloatTraits<ValueMask, 1> // size of empty class in C++ is 1 byte and not 0 byte { using FloatType = bool; }; template<> struct FloatTraits<Point, 1> // size of empty class in C++ is 1 byte and not 0 byte { using FloatType = double; }; // ----------------------------> mapping BuildType -> GridType <-------------------------------------- /// @brief Maps from a templated build type to a GridType enum template<typename BuildT> __hostdev__ inline GridType toGridType() { if (util::is_same<BuildT, float>::value) { // resolved at compile-time return GridType::Float; } else if (util::is_same<BuildT, double>::value) { return GridType::Double; } else if (util::is_same<BuildT, int16_t>::value) { return GridType::Int16; } else if (util::is_same<BuildT, int32_t>::value) { return GridType::Int32; } else if (util::is_same<BuildT, int64_t>::value) { return GridType::Int64; } else if (util::is_same<BuildT, Vec3f>::value) { return GridType::Vec3f; } else if (util::is_same<BuildT, Vec3d>::value) { return GridType::Vec3d; } else if (util::is_same<BuildT, uint32_t>::value) { return GridType::UInt32; } else if (util::is_same<BuildT, ValueMask>::value) { return GridType::Mask; } else if (util::is_same<BuildT, Half>::value) { return GridType::Half; } else if (util::is_same<BuildT, ValueIndex>::value) { return GridType::Index; } else if (util::is_same<BuildT, ValueOnIndex>::value) { return GridType::OnIndex; } else if (util::is_same<BuildT, ValueIndexMask>::value) { return GridType::IndexMask; } else if (util::is_same<BuildT, ValueOnIndexMask>::value) { return GridType::OnIndexMask; } else if (util::is_same<BuildT, bool>::value) { return GridType::Boolean; } else if (util::is_same<BuildT, math::Rgba8>::value) { return GridType::RGBA8; } else if (util::is_same<BuildT, Fp4>::value) { return GridType::Fp4; } else if (util::is_same<BuildT, Fp8>::value) { return GridType::Fp8; } else if (util::is_same<BuildT, Fp16>::value) { return GridType::Fp16; } else if (util::is_same<BuildT, FpN>::value) { return GridType::FpN; } else if (util::is_same<BuildT, Vec4f>::value) { return GridType::Vec4f; } else if (util::is_same<BuildT, Vec4d>::value) { return GridType::Vec4d; } else if (util::is_same<BuildT, Point>::value) { return GridType::PointIndex; } else if (util::is_same<BuildT, Vec3u8>::value) { return GridType::Vec3u8; } else if (util::is_same<BuildT, Vec3u16>::value) { return GridType::Vec3u16; } else if (util::is_same<BuildT, uint8_t>::value) { return GridType::UInt8; } return GridType::Unknown; }// toGridType template<typename BuildT> [[deprecated("Use toGridType<T>() instead.")]] __hostdev__ inline GridType mapToGridType(){return toGridType<BuildT>();} // ----------------------------> mapping BuildType -> GridClass <-------------------------------------- /// @brief Maps from a templated build type to a GridClass enum template<typename BuildT> __hostdev__ inline GridClass toGridClass(GridClass defaultClass = GridClass::Unknown) { if (util::is_same<BuildT, ValueMask>::value) { return GridClass::Topology; } else if (BuildTraits<BuildT>::is_index) { return GridClass::IndexGrid; } else if (util::is_same<BuildT, math::Rgba8>::value) { return GridClass::VoxelVolume; } else if (util::is_same<BuildT, Point>::value) { return GridClass::PointIndex; } return defaultClass; } template<typename BuildT> [[deprecated("Use toGridClass<T>() instead.")]] __hostdev__ inline GridClass mapToGridClass(GridClass defaultClass = GridClass::Unknown) { return toGridClass<BuildT>(); } // ----------------------------> BitFlags <-------------------------------------- template<int N> struct BitArray; template<> struct BitArray<8> { uint8_t mFlags{0}; }; template<> struct BitArray<16> { uint16_t mFlags{0}; }; template<> struct BitArray<32> { uint32_t mFlags{0}; }; template<> struct BitArray<64> { uint64_t mFlags{0}; }; template<int N> class BitFlags : public BitArray<N> { protected: using BitArray<N>::mFlags; public: using Type = decltype(mFlags); BitFlags() {} BitFlags(Type mask) : BitArray<N>{mask} {} BitFlags(std::initializer_list<uint8_t> list) { for (auto bit : list) mFlags |= static_cast<Type>(1 << bit); } template<typename MaskT> BitFlags(std::initializer_list<MaskT> list) { for (auto mask : list) mFlags |= static_cast<Type>(mask); } __hostdev__ Type data() const { return mFlags; } __hostdev__ Type& data() { return mFlags; } __hostdev__ void initBit(std::initializer_list<uint8_t> list) { mFlags = 0u; for (auto bit : list) mFlags |= static_cast<Type>(1 << bit); } template<typename MaskT> __hostdev__ void initMask(std::initializer_list<MaskT> list) { mFlags = 0u; for (auto mask : list) mFlags |= static_cast<Type>(mask); } //__hostdev__ Type& data() { return mFlags; } //__hostdev__ Type data() const { return mFlags; } __hostdev__ Type getFlags() const { return mFlags & (static_cast<Type>(GridFlags::End) - 1u); } // mask out everything except relevant bits __hostdev__ void setOn() { mFlags = ~Type(0u); } __hostdev__ void setOff() { mFlags = Type(0u); } __hostdev__ void setBitOn(uint8_t bit) { mFlags |= static_cast<Type>(1 << bit); } __hostdev__ void setBitOff(uint8_t bit) { mFlags &= ~static_cast<Type>(1 << bit); } __hostdev__ void setBitOn(std::initializer_list<uint8_t> list) { for (auto bit : list) mFlags |= static_cast<Type>(1 << bit); } __hostdev__ void setBitOff(std::initializer_list<uint8_t> list) { for (auto bit : list) mFlags &= ~static_cast<Type>(1 << bit); } template<typename MaskT> __hostdev__ void setMaskOn(MaskT mask) { mFlags |= static_cast<Type>(mask); } template<typename MaskT> __hostdev__ void setMaskOff(MaskT mask) { mFlags &= ~static_cast<Type>(mask); } template<typename MaskT> __hostdev__ void setMaskOn(std::initializer_list<MaskT> list) { for (auto mask : list) mFlags |= static_cast<Type>(mask); } template<typename MaskT> __hostdev__ void setMaskOff(std::initializer_list<MaskT> list) { for (auto mask : list) mFlags &= ~static_cast<Type>(mask); } __hostdev__ void setBit(uint8_t bit, bool on) { on ? this->setBitOn(bit) : this->setBitOff(bit); } template<typename MaskT> __hostdev__ void setMask(MaskT mask, bool on) { on ? this->setMaskOn(mask) : this->setMaskOff(mask); } __hostdev__ bool isOn() const { return mFlags == ~Type(0u); } __hostdev__ bool isOff() const { return mFlags == Type(0u); } __hostdev__ bool isBitOn(uint8_t bit) const { return 0 != (mFlags & static_cast<Type>(1 << bit)); } __hostdev__ bool isBitOff(uint8_t bit) const { return 0 == (mFlags & static_cast<Type>(1 << bit)); } template<typename MaskT> __hostdev__ bool isMaskOn(MaskT mask) const { return 0 != (mFlags & static_cast<Type>(mask)); } template<typename MaskT> __hostdev__ bool isMaskOff(MaskT mask) const { return 0 == (mFlags & static_cast<Type>(mask)); } /// @brief return true if any of the masks in the list are on template<typename MaskT> __hostdev__ bool isMaskOn(std::initializer_list<MaskT> list) const { for (auto mask : list) { if (0 != (mFlags & static_cast<Type>(mask))) return true; } return false; } /// @brief return true if any of the masks in the list are off template<typename MaskT> __hostdev__ bool isMaskOff(std::initializer_list<MaskT> list) const { for (auto mask : list) { if (0 == (mFlags & static_cast<Type>(mask))) return true; } return false; } /// @brief required for backwards compatibility __hostdev__ BitFlags& operator=(Type n) { mFlags = n; return *this; } }; // BitFlags<N> // ----------------------------> Mask <-------------------------------------- /// @brief Bit-mask to encode active states and facilitate sequential iterators /// and a fast codec for I/O compression. template<uint32_t LOG2DIM> class Mask { public: static constexpr uint32_t SIZE = 1U << (3 * LOG2DIM); // Number of bits in mask static constexpr uint32_t WORD_COUNT = SIZE >> 6; // Number of 64 bit words /// @brief Return the memory footprint in bytes of this Mask __hostdev__ static size_t memUsage() { return sizeof(Mask); } /// @brief Return the number of bits available in this Mask __hostdev__ static uint32_t bitCount() { return SIZE; } /// @brief Return the number of machine words used by this Mask __hostdev__ static uint32_t wordCount() { return WORD_COUNT; } /// @brief Return the total number of set bits in this Mask __hostdev__ uint32_t countOn() const { uint32_t sum = 0; for (const uint64_t *w = mWords, *q = w + WORD_COUNT; w != q; ++w) sum += util::countOn(*w); return sum; } /// @brief Return the number of lower set bits in mask up to but excluding the i'th bit inline __hostdev__ uint32_t countOn(uint32_t i) const { uint32_t n = i >> 6, sum = util::countOn(mWords[n] & ((uint64_t(1) << (i & 63u)) - 1u)); for (const uint64_t* w = mWords; n--; ++w) sum += util::countOn(*w); return sum; } template<bool On> class Iterator { public: __hostdev__ Iterator() : mPos(Mask::SIZE) , mParent(nullptr) { } __hostdev__ Iterator(uint32_t pos, const Mask* parent) : mPos(pos) , mParent(parent) { } Iterator& operator=(const Iterator&) = default; __hostdev__ uint32_t operator*() const { return mPos; } __hostdev__ uint32_t pos() const { return mPos; } __hostdev__ operator bool() const { return mPos != Mask::SIZE; } __hostdev__ Iterator& operator++() { mPos = mParent->findNext<On>(mPos + 1); return *this; } __hostdev__ Iterator operator++(int) { auto tmp = *this; ++(*this); return tmp; } private: uint32_t mPos; const Mask* mParent; }; // Member class Iterator class DenseIterator { public: __hostdev__ DenseIterator(uint32_t pos = Mask::SIZE) : mPos(pos) { } DenseIterator& operator=(const DenseIterator&) = default; __hostdev__ uint32_t operator*() const { return mPos; } __hostdev__ uint32_t pos() const { return mPos; } __hostdev__ operator bool() const { return mPos != Mask::SIZE; } __hostdev__ DenseIterator& operator++() { ++mPos; return *this; } __hostdev__ DenseIterator operator++(int) { auto tmp = *this; ++mPos; return tmp; } private: uint32_t mPos; }; // Member class DenseIterator using OnIterator = Iterator<true>; using OffIterator = Iterator<false>; __hostdev__ OnIterator beginOn() const { return OnIterator(this->findFirst<true>(), this); } __hostdev__ OffIterator beginOff() const { return OffIterator(this->findFirst<false>(), this); } __hostdev__ DenseIterator beginAll() const { return DenseIterator(0); } /// @brief Initialize all bits to zero. __hostdev__ Mask() { for (uint32_t i = 0; i < WORD_COUNT; ++i) mWords[i] = 0; } __hostdev__ Mask(bool on) { const uint64_t v = on ? ~uint64_t(0) : uint64_t(0); for (uint32_t i = 0; i < WORD_COUNT; ++i) mWords[i] = v; } /// @brief Copy constructor __hostdev__ Mask(const Mask& other) { for (uint32_t i = 0; i < WORD_COUNT; ++i) mWords[i] = other.mWords[i]; } /// @brief Return a pointer to the list of words of the bit mask __hostdev__ uint64_t* words() { return mWords; } __hostdev__ const uint64_t* words() const { return mWords; } /// @brief Assignment operator that works with openvdb::util::NodeMask template<typename MaskT = Mask> __hostdev__ typename util::enable_if<!util::is_same<MaskT, Mask>::value, Mask&>::type operator=(const MaskT& other) { static_assert(sizeof(Mask) == sizeof(MaskT), "Mismatching sizeof"); static_assert(WORD_COUNT == MaskT::WORD_COUNT, "Mismatching word count"); static_assert(LOG2DIM == MaskT::LOG2DIM, "Mismatching LOG2DIM"); auto* src = reinterpret_cast<const uint64_t*>(&other); for (uint64_t *dst = mWords, *end = dst + WORD_COUNT; dst != end; ++dst) *dst = *src++; return *this; } //__hostdev__ Mask& operator=(const Mask& other){return *util::memcpy(this, &other);} Mask& operator=(const Mask&) = default; __hostdev__ bool operator==(const Mask& other) const { for (uint32_t i = 0; i < WORD_COUNT; ++i) { if (mWords[i] != other.mWords[i]) return false; } return true; } __hostdev__ bool operator!=(const Mask& other) const { return !((*this) == other); } /// @brief Return true if the given bit is set. __hostdev__ bool isOn(uint32_t n) const { return 0 != (mWords[n >> 6] & (uint64_t(1) << (n & 63))); } /// @brief Return true if the given bit is NOT set. __hostdev__ bool isOff(uint32_t n) const { return 0 == (mWords[n >> 6] & (uint64_t(1) << (n & 63))); } /// @brief Return true if all the bits are set in this Mask. __hostdev__ bool isOn() const { for (uint32_t i = 0; i < WORD_COUNT; ++i) if (mWords[i] != ~uint64_t(0)) return false; return true; } /// @brief Return true if none of the bits are set in this Mask. __hostdev__ bool isOff() const { for (uint32_t i = 0; i < WORD_COUNT; ++i) if (mWords[i] != uint64_t(0)) return false; return true; } /// @brief Set the specified bit on. __hostdev__ void setOn(uint32_t n) { mWords[n >> 6] |= uint64_t(1) << (n & 63); } /// @brief Set the specified bit off. __hostdev__ void setOff(uint32_t n) { mWords[n >> 6] &= ~(uint64_t(1) << (n & 63)); } #if defined(__CUDACC__) // the following functions only run on the GPU! __device__ inline void setOnAtomic(uint32_t n) { atomicOr(reinterpret_cast<unsigned long long int*>(this) + (n >> 6), 1ull << (n & 63)); } __device__ inline void setOffAtomic(uint32_t n) { atomicAnd(reinterpret_cast<unsigned long long int*>(this) + (n >> 6), ~(1ull << (n & 63))); } __device__ inline void setAtomic(uint32_t n, bool on) { on ? this->setOnAtomic(n) : this->setOffAtomic(n); } #endif /// @brief Set the specified bit on or off. __hostdev__ void set(uint32_t n, bool on) { #if 1 // switch between branchless auto& word = mWords[n >> 6]; n &= 63; word &= ~(uint64_t(1) << n); word |= uint64_t(on) << n; #else on ? this->setOn(n) : this->setOff(n); #endif } /// @brief Set all bits on __hostdev__ void setOn() { for (uint32_t i = 0; i < WORD_COUNT; ++i)mWords[i] = ~uint64_t(0); } /// @brief Set all bits off __hostdev__ void setOff() { for (uint32_t i = 0; i < WORD_COUNT; ++i) mWords[i] = uint64_t(0); } /// @brief Set all bits off __hostdev__ void set(bool on) { const uint64_t v = on ? ~uint64_t(0) : uint64_t(0); for (uint32_t i = 0; i < WORD_COUNT; ++i) mWords[i] = v; } /// brief Toggle the state of all bits in the mask __hostdev__ void toggle() { uint32_t n = WORD_COUNT; for (auto* w = mWords; n--; ++w) *w = ~*w; } __hostdev__ void toggle(uint32_t n) { mWords[n >> 6] ^= uint64_t(1) << (n & 63); } /// @brief Bitwise intersection __hostdev__ Mask& operator&=(const Mask& other) { uint64_t* w1 = mWords; const uint64_t* w2 = other.mWords; for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= *w2; return *this; } /// @brief Bitwise union __hostdev__ Mask& operator|=(const Mask& other) { uint64_t* w1 = mWords; const uint64_t* w2 = other.mWords; for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 |= *w2; return *this; } /// @brief Bitwise difference __hostdev__ Mask& operator-=(const Mask& other) { uint64_t* w1 = mWords; const uint64_t* w2 = other.mWords; for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 &= ~*w2; return *this; } /// @brief Bitwise XOR __hostdev__ Mask& operator^=(const Mask& other) { uint64_t* w1 = mWords; const uint64_t* w2 = other.mWords; for (uint32_t n = WORD_COUNT; n--; ++w1, ++w2) *w1 ^= *w2; return *this; } NANOVDB_HOSTDEV_DISABLE_WARNING template<bool ON> __hostdev__ uint32_t findFirst() const { uint32_t n = 0u; const uint64_t* w = mWords; for (; n < WORD_COUNT && !(ON ? *w : ~*w); ++w, ++n); return n < WORD_COUNT ? (n << 6) + util::findLowestOn(ON ? *w : ~*w) : SIZE; } NANOVDB_HOSTDEV_DISABLE_WARNING template<bool ON> __hostdev__ uint32_t findNext(uint32_t start) const { uint32_t n = start >> 6; // initiate if (n >= WORD_COUNT) return SIZE; // check for out of bounds uint32_t m = start & 63u; uint64_t b = ON ? mWords[n] : ~mWords[n]; if (b & (uint64_t(1u) << m)) return start; // simple case: start is on/off b &= ~uint64_t(0u) << m; // mask out lower bits while (!b && ++n < WORD_COUNT) b = ON ? mWords[n] : ~mWords[n]; // find next non-zero word return b ? (n << 6) + util::findLowestOn(b) : SIZE; // catch last word=0 } NANOVDB_HOSTDEV_DISABLE_WARNING template<bool ON> __hostdev__ uint32_t findPrev(uint32_t start) const { uint32_t n = start >> 6; // initiate if (n >= WORD_COUNT) return SIZE; // check for out of bounds uint32_t m = start & 63u; uint64_t b = ON ? mWords[n] : ~mWords[n]; if (b & (uint64_t(1u) << m)) return start; // simple case: start is on/off b &= (uint64_t(1u) << m) - 1u; // mask out higher bits while (!b && n) b = ON ? mWords[--n] : ~mWords[--n]; // find previous non-zero word return b ? (n << 6) + util::findHighestOn(b) : SIZE; // catch first word=0 } private: uint64_t mWords[WORD_COUNT]; }; // Mask class // ----------------------------> Map <-------------------------------------- /// @brief Defines an affine transform and its inverse represented as a 3x3 matrix and a vec3 translation struct Map { // 264B (not 32B aligned!) float mMatF[9]; // 9*4B <- 3x3 matrix float mInvMatF[9]; // 9*4B <- 3x3 matrix float mVecF[3]; // 3*4B <- translation float mTaperF; // 4B, placeholder for taper value double mMatD[9]; // 9*8B <- 3x3 matrix double mInvMatD[9]; // 9*8B <- 3x3 matrix double mVecD[3]; // 3*8B <- translation double mTaperD; // 8B, placeholder for taper value /// @brief Default constructor for the identity map __hostdev__ Map() : mMatF{ 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f} , mInvMatF{1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f} , mVecF{0.0f, 0.0f, 0.0f} , mTaperF{1.0f} , mMatD{ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0} , mInvMatD{1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0} , mVecD{0.0, 0.0, 0.0} , mTaperD{1.0} { } __hostdev__ Map(double s, const Vec3d& t = Vec3d(0.0, 0.0, 0.0)) : mMatF{float(s), 0.0f, 0.0f, 0.0f, float(s), 0.0f, 0.0f, 0.0f, float(s)} , mInvMatF{1.0f / float(s), 0.0f, 0.0f, 0.0f, 1.0f / float(s), 0.0f, 0.0f, 0.0f, 1.0f / float(s)} , mVecF{float(t[0]), float(t[1]), float(t[2])} , mTaperF{1.0f} , mMatD{s, 0.0, 0.0, 0.0, s, 0.0, 0.0, 0.0, s} , mInvMatD{1.0 / s, 0.0, 0.0, 0.0, 1.0 / s, 0.0, 0.0, 0.0, 1.0 / s} , mVecD{t[0], t[1], t[2]} , mTaperD{1.0} { } /// @brief Initialize the member data from 3x3 or 4x4 matrices /// @note This is not _hostdev__ since then MatT=openvdb::Mat4d will produce warnings template<typename MatT, typename Vec3T> void set(const MatT& mat, const MatT& invMat, const Vec3T& translate, double taper = 1.0); /// @brief Initialize the member data from 4x4 matrices /// @note The last (4th) row of invMat is actually ignored. /// This is not _hostdev__ since then Mat4T=openvdb::Mat4d will produce warnings template<typename Mat4T> void set(const Mat4T& mat, const Mat4T& invMat, double taper = 1.0) { this->set(mat, invMat, mat[3], taper); } template<typename Vec3T> void set(double scale, const Vec3T& translation, double taper = 1.0); /// @brief Apply the forward affine transformation to a vector using 64bit floating point arithmetics. /// @note Typically this operation is used for the scale, rotation and translation of index -> world mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param ijk 3D vector to be mapped - typically floating point index coordinates /// @return Forward mapping for affine transformation, i.e. (mat x ijk) + translation template<typename Vec3T> __hostdev__ Vec3T applyMap(const Vec3T& ijk) const { return math::matMult(mMatD, mVecD, ijk); } /// @brief Apply the forward affine transformation to a vector using 32bit floating point arithmetics. /// @note Typically this operation is used for the scale, rotation and translation of index -> world mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param ijk 3D vector to be mapped - typically floating point index coordinates /// @return Forward mapping for affine transformation, i.e. (mat x ijk) + translation template<typename Vec3T> __hostdev__ Vec3T applyMapF(const Vec3T& ijk) const { return math::matMult(mMatF, mVecF, ijk); } /// @brief Apply the linear forward 3x3 transformation to an input 3d vector using 64bit floating point arithmetics, /// e.g. scale and rotation WITHOUT translation. /// @note Typically this operation is used for scale and rotation from index -> world mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param ijk 3D vector to be mapped - typically floating point index coordinates /// @return linear forward 3x3 mapping of the input vector template<typename Vec3T> __hostdev__ Vec3T applyJacobian(const Vec3T& ijk) const { return math::matMult(mMatD, ijk); } /// @brief Apply the linear forward 3x3 transformation to an input 3d vector using 32bit floating point arithmetics, /// e.g. scale and rotation WITHOUT translation. /// @note Typically this operation is used for scale and rotation from index -> world mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param ijk 3D vector to be mapped - typically floating point index coordinates /// @return linear forward 3x3 mapping of the input vector template<typename Vec3T> __hostdev__ Vec3T applyJacobianF(const Vec3T& ijk) const { return math::matMult(mMatF, ijk); } /// @brief Apply the inverse affine mapping to a vector using 64bit floating point arithmetics. /// @note Typically this operation is used for the world -> index mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param xyz 3D vector to be mapped - typically floating point world coordinates /// @return Inverse affine mapping of the input @c xyz i.e. (xyz - translation) x mat^-1 template<typename Vec3T> __hostdev__ Vec3T applyInverseMap(const Vec3T& xyz) const { return math::matMult(mInvMatD, Vec3T(xyz[0] - mVecD[0], xyz[1] - mVecD[1], xyz[2] - mVecD[2])); } /// @brief Apply the inverse affine mapping to a vector using 32bit floating point arithmetics. /// @note Typically this operation is used for the world -> index mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param xyz 3D vector to be mapped - typically floating point world coordinates /// @return Inverse affine mapping of the input @c xyz i.e. (xyz - translation) x mat^-1 template<typename Vec3T> __hostdev__ Vec3T applyInverseMapF(const Vec3T& xyz) const { return math::matMult(mInvMatF, Vec3T(xyz[0] - mVecF[0], xyz[1] - mVecF[1], xyz[2] - mVecF[2])); } /// @brief Apply the linear inverse 3x3 transformation to an input 3d vector using 64bit floating point arithmetics, /// e.g. inverse scale and inverse rotation WITHOUT translation. /// @note Typically this operation is used for scale and rotation from world -> index mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param ijk 3D vector to be mapped - typically floating point index coordinates /// @return linear inverse 3x3 mapping of the input vector i.e. xyz x mat^-1 template<typename Vec3T> __hostdev__ Vec3T applyInverseJacobian(const Vec3T& xyz) const { return math::matMult(mInvMatD, xyz); } /// @brief Apply the linear inverse 3x3 transformation to an input 3d vector using 32bit floating point arithmetics, /// e.g. inverse scale and inverse rotation WITHOUT translation. /// @note Typically this operation is used for scale and rotation from world -> index mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param ijk 3D vector to be mapped - typically floating point index coordinates /// @return linear inverse 3x3 mapping of the input vector i.e. xyz x mat^-1 template<typename Vec3T> __hostdev__ Vec3T applyInverseJacobianF(const Vec3T& xyz) const { return math::matMult(mInvMatF, xyz); } /// @brief Apply the transposed inverse 3x3 transformation to an input 3d vector using 64bit floating point arithmetics, /// e.g. inverse scale and inverse rotation WITHOUT translation. /// @note Typically this operation is used for scale and rotation from world -> index mapping /// @tparam Vec3T Template type of the 3D vector to be mapped /// @param ijk 3D vector to be mapped - typically floating point index coordinates /// @return linear inverse 3x3 mapping of the input vector i.e. xyz x mat^-1 template<typename Vec3T> __hostdev__ Vec3T applyIJT(const Vec3T& xyz) const { return math::matMultT(mInvMatD, xyz); } template<typename Vec3T> __hostdev__ Vec3T applyIJTF(const Vec3T& xyz) const { return math::matMultT(mInvMatF, xyz); } /// @brief Return a voxels size in each coordinate direction, measured at the origin __hostdev__ Vec3d getVoxelSize() const { return this->applyMap(Vec3d(1)) - this->applyMap(Vec3d(0)); } }; // Map template<typename MatT, typename Vec3T> inline void Map::set(const MatT& mat, const MatT& invMat, const Vec3T& translate, double taper) { float * mf = mMatF, *vf = mVecF, *mif = mInvMatF; double *md = mMatD, *vd = mVecD, *mid = mInvMatD; mTaperF = static_cast<float>(taper); mTaperD = taper; for (int i = 0; i < 3; ++i) { *vd++ = translate[i]; //translation *vf++ = static_cast<float>(translate[i]); //translation for (int j = 0; j < 3; ++j) { *md++ = mat[j][i]; //transposed *mid++ = invMat[j][i]; *mf++ = static_cast<float>(mat[j][i]); //transposed *mif++ = static_cast<float>(invMat[j][i]); } } } template<typename Vec3T> inline void Map::set(double dx, const Vec3T& trans, double taper) { NANOVDB_ASSERT(dx > 0.0); const double mat[3][3] = { {dx, 0.0, 0.0}, // row 0 {0.0, dx, 0.0}, // row 1 {0.0, 0.0, dx} }; // row 2 const double idx = 1.0 / dx; const double invMat[3][3] = { {idx, 0.0, 0.0}, // row 0 {0.0, idx, 0.0}, // row 1 {0.0, 0.0, idx} }; // row 2 this->set(mat, invMat, trans, taper); } // ----------------------------> GridBlindMetaData <-------------------------------------- struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridBlindMetaData { // 288 bytes static const int MaxNameSize = 256; // due to NULL termination the maximum length is one less! int64_t mDataOffset; // byte offset to the blind data, relative to this GridBlindMetaData. uint64_t mValueCount; // number of blind values, e.g. point count uint32_t mValueSize;// byte size of each value, e.g. 4 if mDataType=Float and 1 if mDataType=Unknown since that amounts to char GridBlindDataSemantic mSemantic; // semantic meaning of the data. GridBlindDataClass mDataClass; // 4 bytes GridType mDataType; // 4 bytes char mName[MaxNameSize]; // note this includes the NULL termination // no padding required for 32 byte alignment // disallow copy-construction since methods like blindData and getBlindData uses the this pointer! GridBlindMetaData(const GridBlindMetaData&) = delete; // disallow copy-assignment since methods like blindData and getBlindData uses the this pointer! const GridBlindMetaData& operator=(const GridBlindMetaData&) = delete; __hostdev__ void setBlindData(void* blindData) { mDataOffset = util::PtrDiff(blindData, this); } // unsafe __hostdev__ const void* blindData() const {return util::PtrAdd(this, mDataOffset);} /// @brief Get a const pointer to the blind data represented by this meta data /// @tparam BlindDataT Expected value type of the blind data. /// @return Returns NULL if mGridType!=toGridType<BlindDataT>(), else a const point of type BlindDataT. /// @note Use mDataType=Unknown if BlindDataT is a custom data type unknown to NanoVDB. template<typename BlindDataT> __hostdev__ const BlindDataT* getBlindData() const { //if (mDataType != toGridType<BlindDataT>()) printf("getBlindData mismatch\n"); return mDataType == toGridType<BlindDataT>() ? util::PtrAdd<BlindDataT>(this, mDataOffset) : nullptr; } /// @brief return true if this meta data has a valid combination of semantic, class and value tags __hostdev__ bool isValid() const { auto check = [&]()->bool{ switch (mDataType){ case GridType::Unknown: return mValueSize==1u;// i.e. we encode data as mValueCount chars case GridType::Float: return mValueSize==4u; case GridType::Double: return mValueSize==8u; case GridType::Int16: return mValueSize==2u; case GridType::Int32: return mValueSize==4u; case GridType::Int64: return mValueSize==8u; case GridType::Vec3f: return mValueSize==12u; case GridType::Vec3d: return mValueSize==24u; case GridType::Half: return mValueSize==2u; case GridType::RGBA8: return mValueSize==4u; case GridType::Fp8: return mValueSize==1u; case GridType::Fp16: return mValueSize==2u; case GridType::Vec4f: return mValueSize==16u; case GridType::Vec4d: return mValueSize==32u; case GridType::Vec3u8: return mValueSize==3u; case GridType::Vec3u16: return mValueSize==6u; default: return true;}// all other combinations are valid }; return nanovdb::isValid(mDataClass, mSemantic, mDataType) && check(); } /// @brief return size in bytes of the blind data represented by this blind meta data /// @note This size includes possible padding for 32 byte alignment. The actual amount /// of bind data is mValueCount * mValueSize __hostdev__ uint64_t blindDataSize() const { return math::AlignUp<NANOVDB_DATA_ALIGNMENT>(mValueCount * mValueSize); } }; // GridBlindMetaData // ----------------------------> NodeTrait <-------------------------------------- /// @brief Struct to derive node type from its level in a given /// grid, tree or root while preserving constness template<typename GridOrTreeOrRootT, int LEVEL> struct NodeTrait; // Partial template specialization of above Node struct template<typename GridOrTreeOrRootT> struct NodeTrait<GridOrTreeOrRootT, 0> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = typename GridOrTreeOrRootT::LeafNodeType; using type = typename GridOrTreeOrRootT::LeafNodeType; }; template<typename GridOrTreeOrRootT> struct NodeTrait<const GridOrTreeOrRootT, 0> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = const typename GridOrTreeOrRootT::LeafNodeType; using type = const typename GridOrTreeOrRootT::LeafNodeType; }; template<typename GridOrTreeOrRootT> struct NodeTrait<GridOrTreeOrRootT, 1> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType; using type = typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType; }; template<typename GridOrTreeOrRootT> struct NodeTrait<const GridOrTreeOrRootT, 1> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType; using type = const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType::ChildNodeType; }; template<typename GridOrTreeOrRootT> struct NodeTrait<GridOrTreeOrRootT, 2> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = typename GridOrTreeOrRootT::RootNodeType::ChildNodeType; using type = typename GridOrTreeOrRootT::RootNodeType::ChildNodeType; }; template<typename GridOrTreeOrRootT> struct NodeTrait<const GridOrTreeOrRootT, 2> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType; using type = const typename GridOrTreeOrRootT::RootNodeType::ChildNodeType; }; template<typename GridOrTreeOrRootT> struct NodeTrait<GridOrTreeOrRootT, 3> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = typename GridOrTreeOrRootT::RootNodeType; using type = typename GridOrTreeOrRootT::RootNodeType; }; template<typename GridOrTreeOrRootT> struct NodeTrait<const GridOrTreeOrRootT, 3> { static_assert(GridOrTreeOrRootT::RootNodeType::LEVEL == 3, "Tree depth is not supported"); using Type = const typename GridOrTreeOrRootT::RootNodeType; using type = const typename GridOrTreeOrRootT::RootNodeType; }; // ----------------------------> Froward decelerations of random access methods <-------------------------------------- template<typename BuildT> struct GetValue; template<typename BuildT> struct SetValue; template<typename BuildT> struct SetVoxel; template<typename BuildT> struct GetState; template<typename BuildT> struct GetDim; template<typename BuildT> struct GetLeaf; template<typename BuildT> struct ProbeValue; template<typename BuildT> struct GetNodeInfo; // ----------------------------> CheckMode <---------------------------------- /// @brief List of different modes for computing for a checksum enum class CheckMode : uint32_t { Disable = 0, // no computation Empty = 0, Half = 1, Partial = 1, // fast but approximate Default = 1, // defaults to Partial Full = 2, // slow but accurate End = 3, // marks the end of the enum list StrLen = 9 + End}; /// @brief Prints CheckMode enum to a c-string /// @param dst Destination c-string /// @param mode CheckMode enum to be converted to string /// @return destinations string @c dst __hostdev__ inline char* toStr(char *dst, CheckMode mode) { switch (mode){ case CheckMode::Half: return util::strcpy(dst, "half"); case CheckMode::Full: return util::strcpy(dst, "full"); default: return util::strcpy(dst, "disabled"); } } // ----------------------------> Checksum <---------------------------------- /// @brief Class that encapsulates two CRC32 checksums, one for the Grid, Tree and Root node meta data /// and one for the remaining grid nodes. class Checksum { /// Three types of checksums: /// 1) Empty: all 64 bits are on (used to signify a disabled or undefined checksum) /// 2) Half: Upper 32 bits are on and not all of lower 32 bits are on (lower 32 bits checksum head of grid) /// 3) Full: Not all of the 64 bits are one (lower 32 bits checksum head of grid and upper 32 bits checksum tail of grid) union { uint32_t mCRC32[2]; uint64_t mCRC64; };// mCRC32[0] is checksum of Grid, Tree and Root, and mCRC32[1] is checksum of nodes public: static constexpr uint32_t EMPTY32 = ~uint32_t{0}; static constexpr uint64_t EMPTY64 = ~uint64_t(0); /// @brief default constructor initiates checksum to EMPTY __hostdev__ Checksum() : mCRC64{EMPTY64} {} /// @brief Constructor that allows the two 32bit checksums to be initiated explicitly /// @param head Initial 32bit CRC checksum of grid, tree and root data /// @param tail Initial 32bit CRC checksum of all the nodes and blind data __hostdev__ Checksum(uint32_t head, uint32_t tail) : mCRC32{head, tail} {} /// @brief /// @param checksum /// @param mode __hostdev__ Checksum(uint64_t checksum, CheckMode mode = CheckMode::Full) : mCRC64{mode == CheckMode::Disable ? EMPTY64 : checksum} { if (mode == CheckMode::Partial) mCRC32[1] = EMPTY32; } /// @brief return the 64 bit checksum of this instance [[deprecated("Use Checksum::data instead.")]] __hostdev__ uint64_t checksum() const { return mCRC64; } [[deprecated("Use Checksum::head and Ckecksum::tail instead.")]] __hostdev__ uint32_t& checksum(int i) {NANOVDB_ASSERT(i==0 || i==1); return mCRC32[i]; } [[deprecated("Use Checksum::head and Ckecksum::tail instead.")]] __hostdev__ uint32_t checksum(int i) const {NANOVDB_ASSERT(i==0 || i==1); return mCRC32[i]; } __hostdev__ uint64_t full() const { return mCRC64; } __hostdev__ uint64_t& full() { return mCRC64; } __hostdev__ uint32_t head() const { return mCRC32[0]; } __hostdev__ uint32_t& head() { return mCRC32[0]; } __hostdev__ uint32_t tail() const { return mCRC32[1]; } __hostdev__ uint32_t& tail() { return mCRC32[1]; } /// @brief return true if the 64 bit checksum is partial, i.e. of head only [[deprecated("Use Checksum::isHalf instead.")]] __hostdev__ bool isPartial() const { return mCRC32[0] != EMPTY32 && mCRC32[1] == EMPTY32; } __hostdev__ bool isHalf() const { return mCRC32[0] != EMPTY32 && mCRC32[1] == EMPTY32; } /// @brief return true if the 64 bit checksum is fill, i.e. of both had and nodes __hostdev__ bool isFull() const { return mCRC64 != EMPTY64 && mCRC32[1] != EMPTY32; } /// @brief return true if the 64 bit checksum is disables (unset) __hostdev__ bool isEmpty() const { return mCRC64 == EMPTY64; } __hostdev__ void disable() { mCRC64 = EMPTY64; } /// @brief return the mode of the 64 bit checksum __hostdev__ CheckMode mode() const { return mCRC64 == EMPTY64 ? CheckMode::Disable : mCRC32[1] == EMPTY32 ? CheckMode::Partial : CheckMode::Full; } /// @brief return true if the checksums are identical /// @param rhs other Checksum __hostdev__ bool operator==(const Checksum &rhs) const {return mCRC64 == rhs.mCRC64;} /// @brief return true if the checksums are not identical /// @param rhs other Checksum __hostdev__ bool operator!=(const Checksum &rhs) const {return mCRC64 != rhs.mCRC64;} };// Checksum /// @brief Maps 64 bit checksum to CheckMode enum /// @param checksum 64 bit checksum with two CRC32 codes /// @return CheckMode enum __hostdev__ inline CheckMode toCheckMode(const Checksum &checksum){return checksum.mode();} // ----------------------------> Grid <-------------------------------------- /* The following class and comment is for internal use only Memory layout: Grid -> 39 x double (world bbox and affine transformation) Tree -> Root 3 x ValueType + int32_t + N x Tiles (background,min,max,tileCount + tileCount x Tiles) N2 upper InternalNodes each with 2 bit masks, N2 tiles, and min/max values N1 lower InternalNodes each with 2 bit masks, N1 tiles, and min/max values N0 LeafNodes each with a bit mask, N0 ValueTypes and min/max Example layout: ("---" implies it has a custom offset, "..." implies zero or more) [GridData][TreeData]---[RootData][ROOT TILES...]---[InternalData<5>]---[InternalData<4>]---[LeafData<3>]---[BLINDMETA...]---[BLIND0]---[BLIND1]---etc. */ /// @brief Struct with all the member data of the Grid (useful during serialization of an openvdb grid) /// /// @note The transform is assumed to be affine (so linear) and have uniform scale! So frustum transforms /// and non-uniform scaling are not supported (primarily because they complicate ray-tracing in index space) /// /// @note No client code should (or can) interface with this struct so it can safely be ignored! struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) GridData { // sizeof(GridData) = 672B static const int MaxNameSize = 256; // due to NULL termination the maximum length is one less uint64_t mMagic; // 8B (0) magic to validate it is valid grid data. Checksum mChecksum; // 8B (8). Checksum of grid buffer. Version mVersion; // 4B (16) major, minor, and patch version numbers BitFlags<32> mFlags; // 4B (20). flags for grid. uint32_t mGridIndex; // 4B (24). Index of this grid in the buffer uint32_t mGridCount; // 4B (28). Total number of grids in the buffer uint64_t mGridSize; // 8B (32). byte count of this entire grid occupied in the buffer. char mGridName[MaxNameSize]; // 256B (40) Map mMap; // 264B (296). affine transformation between index and world space in both single and double precision Vec3dBBox mWorldBBox; // 48B (560). floating-point AABB of active values in WORLD SPACE (2 x 3 doubles) Vec3d mVoxelSize; // 24B (608). size of a voxel in world units GridClass mGridClass; // 4B (632). GridType mGridType; // 4B (636). int64_t mBlindMetadataOffset; // 8B (640). offset to beginning of GridBlindMetaData structures that follow this grid. uint32_t mBlindMetadataCount; // 4B (648). count of GridBlindMetaData structures that follow this grid. uint32_t mData0; // 4B (652) unused uint64_t mData1; // 8B (656) is use for the total number of values indexed by an IndexGrid uint64_t mData2; // 8B (664) padding to 32 B alignment /// @brief Use this method to initiate most member data GridData& operator=(const GridData&) = default; //__hostdev__ GridData& operator=(const GridData& other){return *util::memcpy(this, &other);} __hostdev__ void init(std::initializer_list<GridFlags> list = {GridFlags::IsBreadthFirst}, uint64_t gridSize = 0u, const Map& map = Map(), GridType gridType = GridType::Unknown, GridClass gridClass = GridClass::Unknown) { #ifdef NANOVDB_USE_NEW_MAGIC_NUMBERS mMagic = NANOVDB_MAGIC_GRID; #else mMagic = NANOVDB_MAGIC_NUMB; #endif mChecksum.disable();// all 64 bits ON means checksum is disabled mVersion = Version(); mFlags.initMask(list); mGridIndex = 0u; mGridCount = 1u; mGridSize = gridSize; mGridName[0] = '\0'; mMap = map; mWorldBBox = Vec3dBBox();// invalid bbox mVoxelSize = map.getVoxelSize(); mGridClass = gridClass; mGridType = gridType; mBlindMetadataOffset = mGridSize; // i.e. no blind data mBlindMetadataCount = 0u; // i.e. no blind data mData0 = 0u; // zero padding mData1 = 0u; // only used for index and point grids mData2 = NANOVDB_MAGIC_GRID; // since version 32.6.0 (will change in the future) } /// @brief return true if the magic number and the version are both valid __hostdev__ bool isValid() const { // Before v32.6.0: toMagic(mMagic) = MagicType::NanoVDB and mData2 was undefined // For v32.6.0: toMagic(mMagic) = MagicType::NanoVDB and toMagic(mData2) = MagicType::NanoGrid // After v32.7.X: toMagic(mMagic) = MagicType::NanoGrid and mData2 will again be undefined const MagicType magic = toMagic(mMagic); if (magic == MagicType::NanoGrid || toMagic(mData2) == MagicType::NanoGrid) return true; bool test = magic == MagicType::NanoVDB;// could be GridData or io::FileHeader if (test) test = mVersion.isCompatible(); if (test) test = mGridCount > 0u && mGridIndex < mGridCount; if (test) test = mGridClass < GridClass::End && mGridType < GridType::End; return test; } // Set and unset various bit flags __hostdev__ void setMinMaxOn(bool on = true) { mFlags.setMask(GridFlags::HasMinMax, on); } __hostdev__ void setBBoxOn(bool on = true) { mFlags.setMask(GridFlags::HasBBox, on); } __hostdev__ void setLongGridNameOn(bool on = true) { mFlags.setMask(GridFlags::HasLongGridName, on); } __hostdev__ void setAverageOn(bool on = true) { mFlags.setMask(GridFlags::HasAverage, on); } __hostdev__ void setStdDeviationOn(bool on = true) { mFlags.setMask(GridFlags::HasStdDeviation, on); } __hostdev__ bool setGridName(const char* src) { const bool success = (util::strncpy(mGridName, src, MaxNameSize)[MaxNameSize-1] == '\0'); if (!success) mGridName[MaxNameSize-1] = '\0'; return success; // returns true if input grid name is NOT longer than MaxNameSize characters } // Affine transformations based on double precision template<typename Vec3T> __hostdev__ Vec3T applyMap(const Vec3T& xyz) const { return mMap.applyMap(xyz); } // Pos: index -> world template<typename Vec3T> __hostdev__ Vec3T applyInverseMap(const Vec3T& xyz) const { return mMap.applyInverseMap(xyz); } // Pos: world -> index template<typename Vec3T> __hostdev__ Vec3T applyJacobian(const Vec3T& xyz) const { return mMap.applyJacobian(xyz); } // Dir: index -> world template<typename Vec3T> __hostdev__ Vec3T applyInverseJacobian(const Vec3T& xyz) const { return mMap.applyInverseJacobian(xyz); } // Dir: world -> index template<typename Vec3T> __hostdev__ Vec3T applyIJT(const Vec3T& xyz) const { return mMap.applyIJT(xyz); } // Affine transformations based on single precision template<typename Vec3T> __hostdev__ Vec3T applyMapF(const Vec3T& xyz) const { return mMap.applyMapF(xyz); } // Pos: index -> world template<typename Vec3T> __hostdev__ Vec3T applyInverseMapF(const Vec3T& xyz) const { return mMap.applyInverseMapF(xyz); } // Pos: world -> index template<typename Vec3T> __hostdev__ Vec3T applyJacobianF(const Vec3T& xyz) const { return mMap.applyJacobianF(xyz); } // Dir: index -> world template<typename Vec3T> __hostdev__ Vec3T applyInverseJacobianF(const Vec3T& xyz) const { return mMap.applyInverseJacobianF(xyz); } // Dir: world -> index template<typename Vec3T> __hostdev__ Vec3T applyIJTF(const Vec3T& xyz) const { return mMap.applyIJTF(xyz); } // @brief Return a non-const void pointer to the tree __hostdev__ void* treePtr() { return this + 1; }// TreeData is always right after GridData // @brief Return a const void pointer to the tree __hostdev__ const void* treePtr() const { return this + 1; }// TreeData is always right after GridData /// @brief Return a non-const void pointer to the first node at @c LEVEL /// @tparam LEVEL Level of the node. LEVEL 0 means leaf node and LEVEL 3 means root node template <uint32_t LEVEL> __hostdev__ const void* nodePtr() const { static_assert(LEVEL >= 0 && LEVEL <= 3, "invalid LEVEL template parameter"); const void *treeData = this + 1;// TreeData is always right after GridData const uint64_t nodeOffset = *util::PtrAdd<uint64_t>(treeData, 8*LEVEL);// skip LEVEL uint64_t return nodeOffset ? util::PtrAdd(treeData, nodeOffset) : nullptr; } /// @brief Return a non-const void pointer to the first node at @c LEVEL /// @tparam LEVEL of the node. LEVEL 0 means leaf node and LEVEL 3 means root node /// @warning If not nodes exist at @c LEVEL NULL is returned template <uint32_t LEVEL> __hostdev__ void* nodePtr() { static_assert(LEVEL >= 0 && LEVEL <= 3, "invalid LEVEL template parameter"); void *treeData = this + 1;// TreeData is always right after GridData const uint64_t nodeOffset = *util::PtrAdd<uint64_t>(treeData, 8*LEVEL);// skip LEVEL uint64_t return nodeOffset ? util::PtrAdd(treeData, nodeOffset) : nullptr; } /// @brief Return number of nodes at @c LEVEL /// @tparam Level of the node. LEVEL 0 means leaf node and LEVEL 2 means upper node template <uint32_t LEVEL> __hostdev__ uint32_t nodeCount() const { static_assert(LEVEL >= 0 && LEVEL < 3, "invalid LEVEL template parameter"); return *util::PtrAdd<uint32_t>(this + 1, 4*(8 + LEVEL));// TreeData is always right after GridData } /// @brief Returns a const reference to the blindMetaData at the specified linear offset. /// /// @warning The linear offset is assumed to be in the valid range __hostdev__ const GridBlindMetaData* blindMetaData(uint32_t n) const { NANOVDB_ASSERT(n < mBlindMetadataCount); return util::PtrAdd<GridBlindMetaData>(this, mBlindMetadataOffset) + n; } __hostdev__ const char* gridName() const { if (mFlags.isMaskOn(GridFlags::HasLongGridName)) {// search for first blind meta data that contains a name NANOVDB_ASSERT(mBlindMetadataCount > 0); for (uint32_t i = 0; i < mBlindMetadataCount; ++i) { const auto* metaData = this->blindMetaData(i);// EXTREMELY important to be a pointer if (metaData->mDataClass == GridBlindDataClass::GridName) { NANOVDB_ASSERT(metaData->mDataType == GridType::Unknown); return metaData->template getBlindData<const char>(); } } NANOVDB_ASSERT(false); // should never hit this! } return mGridName; } /// @brief Return memory usage in bytes for this class only. __hostdev__ static uint64_t memUsage() { return sizeof(GridData); } /// @brief return AABB of active values in world space __hostdev__ const Vec3dBBox& worldBBox() const { return mWorldBBox; } /// @brief return AABB of active values in index space __hostdev__ const CoordBBox& indexBBox() const {return *(const CoordBBox*)(this->nodePtr<3>());} /// @brief return the root table has size __hostdev__ uint32_t rootTableSize() const { const void *root = this->nodePtr<3>(); return root ? *util::PtrAdd<uint32_t>(root, sizeof(CoordBBox)) : 0u; } /// @brief test if the grid is empty, e.i the root table has size 0 /// @return true if this grid contains not data whatsoever __hostdev__ bool isEmpty() const {return this->rootTableSize() == 0u;} /// @brief return true if RootData follows TreeData in memory without any extra padding /// @details TreeData is always following right after GridData, but the same might not be true for RootData __hostdev__ bool isRootConnected() const { return *(const uint64_t*)((const char*)(this + 1) + 24) == 64u;} }; // GridData // Forward declaration of accelerated random access class template<typename BuildT, int LEVEL0 = -1, int LEVEL1 = -1, int LEVEL2 = -1> class ReadAccessor; template<typename BuildT> using DefaultReadAccessor = ReadAccessor<BuildT, 0, 1, 2>; /// @brief Highest level of the data structure. Contains a tree and a world->index /// transform (that currently only supports uniform scaling and translation). /// /// @note This the API of this class to interface with client code template<typename TreeT> class Grid : public GridData { public: using TreeType = TreeT; using RootType = typename TreeT::RootType; using RootNodeType = RootType; using UpperNodeType = typename RootNodeType::ChildNodeType; using LowerNodeType = typename UpperNodeType::ChildNodeType; using LeafNodeType = typename RootType::LeafNodeType; using DataType = GridData; using ValueType = typename TreeT::ValueType; using BuildType = typename TreeT::BuildType; // in rare cases BuildType != ValueType, e.g. then BuildType = ValueMask and ValueType = bool using CoordType = typename TreeT::CoordType; using AccessorType = DefaultReadAccessor<BuildType>; /// @brief Disallow constructions, copy and assignment /// /// @note Only a Serializer, defined elsewhere, can instantiate this class Grid(const Grid&) = delete; Grid& operator=(const Grid&) = delete; ~Grid() = delete; __hostdev__ Version version() const { return DataType::mVersion; } __hostdev__ DataType* data() { return reinterpret_cast<DataType*>(this); } __hostdev__ const DataType* data() const { return reinterpret_cast<const DataType*>(this); } /// @brief Return memory usage in bytes for this class only. //__hostdev__ static uint64_t memUsage() { return sizeof(GridData); } /// @brief Return the memory footprint of the entire grid, i.e. including all nodes and blind data __hostdev__ uint64_t gridSize() const { return DataType::mGridSize; } /// @brief Return index of this grid in the buffer __hostdev__ uint32_t gridIndex() const { return DataType::mGridIndex; } /// @brief Return total number of grids in the buffer __hostdev__ uint32_t gridCount() const { return DataType::mGridCount; } /// @brief @brief Return the total number of values indexed by this IndexGrid /// /// @note This method is only defined for IndexGrid = NanoGrid<ValueIndex || ValueOnIndex || ValueIndexMask || ValueOnIndexMask> template<typename T = BuildType> __hostdev__ typename util::enable_if<BuildTraits<T>::is_index, const uint64_t&>::type valueCount() const { return DataType::mData1; } /// @brief @brief Return the total number of points indexed by this PointGrid /// /// @note This method is only defined for PointGrid = NanoGrid<Point> template<typename T = BuildType> __hostdev__ typename util::enable_if<util::is_same<T, Point>::value, const uint64_t&>::type pointCount() const { return DataType::mData1; } /// @brief Return a const reference to the tree __hostdev__ const TreeT& tree() const { return *reinterpret_cast<const TreeT*>(this->treePtr()); } /// @brief Return a non-const reference to the tree __hostdev__ TreeT& tree() { return *reinterpret_cast<TreeT*>(this->treePtr()); } /// @brief Return a new instance of a ReadAccessor used to access values in this grid __hostdev__ AccessorType getAccessor() const { return AccessorType(this->tree().root()); } /// @brief Return a const reference to the size of a voxel in world units __hostdev__ const Vec3d& voxelSize() const { return DataType::mVoxelSize; } /// @brief Return a const reference to the Map for this grid __hostdev__ const Map& map() const { return DataType::mMap; } /// @brief world to index space transformation template<typename Vec3T> __hostdev__ Vec3T worldToIndex(const Vec3T& xyz) const { return this->applyInverseMap(xyz); } /// @brief index to world space transformation template<typename Vec3T> __hostdev__ Vec3T indexToWorld(const Vec3T& xyz) const { return this->applyMap(xyz); } /// @brief transformation from index space direction to world space direction /// @warning assumes dir to be normalized template<typename Vec3T> __hostdev__ Vec3T indexToWorldDir(const Vec3T& dir) const { return this->applyJacobian(dir); } /// @brief transformation from world space direction to index space direction /// @warning assumes dir to be normalized template<typename Vec3T> __hostdev__ Vec3T worldToIndexDir(const Vec3T& dir) const { return this->applyInverseJacobian(dir); } /// @brief transform the gradient from index space to world space. /// @details Applies the inverse jacobian transform map. template<typename Vec3T> __hostdev__ Vec3T indexToWorldGrad(const Vec3T& grad) const { return this->applyIJT(grad); } /// @brief world to index space transformation template<typename Vec3T> __hostdev__ Vec3T worldToIndexF(const Vec3T& xyz) const { return this->applyInverseMapF(xyz); } /// @brief index to world space transformation template<typename Vec3T> __hostdev__ Vec3T indexToWorldF(const Vec3T& xyz) const { return this->applyMapF(xyz); } /// @brief transformation from index space direction to world space direction /// @warning assumes dir to be normalized template<typename Vec3T> __hostdev__ Vec3T indexToWorldDirF(const Vec3T& dir) const { return this->applyJacobianF(dir); } /// @brief transformation from world space direction to index space direction /// @warning assumes dir to be normalized template<typename Vec3T> __hostdev__ Vec3T worldToIndexDirF(const Vec3T& dir) const { return this->applyInverseJacobianF(dir); } /// @brief Transforms the gradient from index space to world space. /// @details Applies the inverse jacobian transform map. template<typename Vec3T> __hostdev__ Vec3T indexToWorldGradF(const Vec3T& grad) const { return DataType::applyIJTF(grad); } /// @brief Computes a AABB of active values in world space //__hostdev__ const Vec3dBBox& worldBBox() const { return DataType::mWorldBBox; } /// @brief Computes a AABB of active values in index space /// /// @note This method is returning a floating point bounding box and not a CoordBBox. This makes /// it more useful for clipping rays. //__hostdev__ const BBox<CoordType>& indexBBox() const { return this->tree().bbox(); } /// @brief Return the total number of active voxels in this tree. __hostdev__ uint64_t activeVoxelCount() const { return this->tree().activeVoxelCount(); } /// @brief Methods related to the classification of this grid __hostdev__ bool isValid() const { return DataType::isValid(); } __hostdev__ const GridType& gridType() const { return DataType::mGridType; } __hostdev__ const GridClass& gridClass() const { return DataType::mGridClass; } __hostdev__ bool isLevelSet() const { return DataType::mGridClass == GridClass::LevelSet; } __hostdev__ bool isFogVolume() const { return DataType::mGridClass == GridClass::FogVolume; } __hostdev__ bool isStaggered() const { return DataType::mGridClass == GridClass::Staggered; } __hostdev__ bool isPointIndex() const { return DataType::mGridClass == GridClass::PointIndex; } __hostdev__ bool isGridIndex() const { return DataType::mGridClass == GridClass::IndexGrid; } __hostdev__ bool isPointData() const { return DataType::mGridClass == GridClass::PointData; } __hostdev__ bool isMask() const { return DataType::mGridClass == GridClass::Topology; } __hostdev__ bool isUnknown() const { return DataType::mGridClass == GridClass::Unknown; } __hostdev__ bool hasMinMax() const { return DataType::mFlags.isMaskOn(GridFlags::HasMinMax); } __hostdev__ bool hasBBox() const { return DataType::mFlags.isMaskOn(GridFlags::HasBBox); } __hostdev__ bool hasLongGridName() const { return DataType::mFlags.isMaskOn(GridFlags::HasLongGridName); } __hostdev__ bool hasAverage() const { return DataType::mFlags.isMaskOn(GridFlags::HasAverage); } __hostdev__ bool hasStdDeviation() const { return DataType::mFlags.isMaskOn(GridFlags::HasStdDeviation); } __hostdev__ bool isBreadthFirst() const { return DataType::mFlags.isMaskOn(GridFlags::IsBreadthFirst); } /// @brief return true if the specified node type is layed out breadth-first in memory and has a fixed size. /// This allows for sequential access to the nodes. template<typename NodeT> __hostdev__ bool isSequential() const { return NodeT::FIXED_SIZE && this->isBreadthFirst(); } /// @brief return true if the specified node level is layed out breadth-first in memory and has a fixed size. /// This allows for sequential access to the nodes. template<int LEVEL> __hostdev__ bool isSequential() const { return NodeTrait<TreeT, LEVEL>::type::FIXED_SIZE && this->isBreadthFirst(); } /// @brief return true if nodes at all levels can safely be accessed with simple linear offsets __hostdev__ bool isSequential() const { return UpperNodeType::FIXED_SIZE && LowerNodeType::FIXED_SIZE && LeafNodeType::FIXED_SIZE && this->isBreadthFirst(); } /// @brief Return a c-string with the name of this grid __hostdev__ const char* gridName() const { return DataType::gridName(); } /// @brief Return a c-string with the name of this grid, truncated to 255 characters __hostdev__ const char* shortGridName() const { return DataType::mGridName; } /// @brief Return checksum of the grid buffer. __hostdev__ const Checksum& checksum() const { return DataType::mChecksum; } /// @brief Return true if this grid is empty, i.e. contains no values or nodes. //__hostdev__ bool isEmpty() const { return this->tree().isEmpty(); } /// @brief Return the count of blind-data encoded in this grid __hostdev__ uint32_t blindDataCount() const { return DataType::mBlindMetadataCount; } /// @brief Return the index of the first blind data with specified name if found, otherwise -1. __hostdev__ int findBlindData(const char* name) const; /// @brief Return the index of the first blind data with specified semantic if found, otherwise -1. __hostdev__ int findBlindDataForSemantic(GridBlindDataSemantic semantic) const; /// @brief Returns a const pointer to the blindData at the specified linear offset. /// /// @warning Pointer might be NULL and the linear offset is assumed to be in the valid range // this method is deprecated !!!! [[deprecated("Use Grid::getBlindData<T>() instead.")]] __hostdev__ const void* blindData(uint32_t n) const { printf("\nnanovdb::Grid::blindData is unsafe and hence deprecated! Please use nanovdb::Grid::getBlindData instead.\n\n"); NANOVDB_ASSERT(n < DataType::mBlindMetadataCount); return this->blindMetaData(n).blindData(); } template <typename BlindDataT> __hostdev__ const BlindDataT* getBlindData(uint32_t n) const { if (n >= DataType::mBlindMetadataCount) return nullptr;// index is out of bounds return this->blindMetaData(n).template getBlindData<BlindDataT>();// NULL if mismatching BlindDataT } template <typename BlindDataT> __hostdev__ BlindDataT* getBlindData(uint32_t n) { if (n >= DataType::mBlindMetadataCount) return nullptr;// index is out of bounds return const_cast<BlindDataT*>(this->blindMetaData(n).template getBlindData<BlindDataT>());// NULL if mismatching BlindDataT } __hostdev__ const GridBlindMetaData& blindMetaData(uint32_t n) const { return *DataType::blindMetaData(n); } private: static_assert(sizeof(GridData) % NANOVDB_DATA_ALIGNMENT == 0, "sizeof(GridData) is misaligned"); }; // Class Grid template<typename TreeT> __hostdev__ int Grid<TreeT>::findBlindDataForSemantic(GridBlindDataSemantic semantic) const { for (uint32_t i = 0, n = this->blindDataCount(); i < n; ++i) { if (this->blindMetaData(i).mSemantic == semantic) return int(i); } return -1; } template<typename TreeT> __hostdev__ int Grid<TreeT>::findBlindData(const char* name) const { auto test = [&](int n) { const char* str = this->blindMetaData(n).mName; for (int i = 0; i < GridBlindMetaData::MaxNameSize; ++i) { if (name[i] != str[i]) return false; if (name[i] == '\0' && str[i] == '\0') return true; } return true; // all len characters matched }; for (int i = 0, n = this->blindDataCount(); i < n; ++i) if (test(i)) return i; return -1; } // ----------------------------> Tree <-------------------------------------- struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) TreeData { // sizeof(TreeData) == 64B int64_t mNodeOffset[4];// 32B, byte offset from this tree to first leaf, lower, upper and root node. If mNodeCount[N]=0 => mNodeOffset[N]==mNodeOffset[N+1] uint32_t mNodeCount[3]; // 12B, total number of nodes of type: leaf, lower internal, upper internal uint32_t mTileCount[3]; // 12B, total number of active tile values at the lower internal, upper internal and root node levels uint64_t mVoxelCount; // 8B, total number of active voxels in the root and all its child nodes. // No padding since it's always 32B aligned //__hostdev__ TreeData& operator=(const TreeData& other){return *util::memcpy(this, &other);} TreeData& operator=(const TreeData&) = default; __hostdev__ void setRoot(const void* root) { NANOVDB_ASSERT(root); mNodeOffset[3] = util::PtrDiff(root, this); } /// @brief Get a non-const void pointer to the root node (never NULL) __hostdev__ void* getRoot() { return util::PtrAdd(this, mNodeOffset[3]); } /// @brief Get a const void pointer to the root node (never NULL) __hostdev__ const void* getRoot() const { return util::PtrAdd(this, mNodeOffset[3]); } template<typename NodeT> __hostdev__ void setFirstNode(const NodeT* node) {mNodeOffset[NodeT::LEVEL] = (node ? util::PtrDiff(node, this) : 0);} /// @brief Return true if the root is empty, i.e. has not child nodes or constant tiles __hostdev__ bool isEmpty() const {return mNodeOffset[3] ? *util::PtrAdd<uint32_t>(this, mNodeOffset[3] + sizeof(CoordBBox)) == 0 : true;} /// @brief Return the index bounding box of all the active values in this tree, i.e. in all nodes of the tree __hostdev__ CoordBBox bbox() const {return mNodeOffset[3] ? *util::PtrAdd<CoordBBox>(this, mNodeOffset[3]) : CoordBBox();} /// @brief return true if RootData is layout out immediately after TreeData in memory __hostdev__ bool isRootNext() const {return mNodeOffset[3] ? mNodeOffset[3] == sizeof(TreeData) : false; } };// TreeData // ----------------------------> GridTree <-------------------------------------- /// @brief defines a tree type from a grid type while preserving constness template<typename GridT> struct GridTree { using Type = typename GridT::TreeType; using type = typename GridT::TreeType; }; template<typename GridT> struct GridTree<const GridT> { using Type = const typename GridT::TreeType; using type = const typename GridT::TreeType; }; // ----------------------------> Tree <-------------------------------------- /// @brief VDB Tree, which is a thin wrapper around a RootNode. template<typename RootT> class Tree : public TreeData { static_assert(RootT::LEVEL == 3, "Tree depth is not supported"); static_assert(RootT::ChildNodeType::LOG2DIM == 5, "Tree configuration is not supported"); static_assert(RootT::ChildNodeType::ChildNodeType::LOG2DIM == 4, "Tree configuration is not supported"); static_assert(RootT::LeafNodeType::LOG2DIM == 3, "Tree configuration is not supported"); public: using DataType = TreeData; using RootType = RootT; using RootNodeType = RootT; using UpperNodeType = typename RootNodeType::ChildNodeType; using LowerNodeType = typename UpperNodeType::ChildNodeType; using LeafNodeType = typename RootType::LeafNodeType; using ValueType = typename RootT::ValueType; using BuildType = typename RootT::BuildType; // in rare cases BuildType != ValueType, e.g. then BuildType = ValueMask and ValueType = bool using CoordType = typename RootT::CoordType; using AccessorType = DefaultReadAccessor<BuildType>; using Node3 = RootT; using Node2 = typename RootT::ChildNodeType; using Node1 = typename Node2::ChildNodeType; using Node0 = LeafNodeType; /// @brief This class cannot be constructed or deleted Tree() = delete; Tree(const Tree&) = delete; Tree& operator=(const Tree&) = delete; ~Tree() = delete; __hostdev__ DataType* data() { return reinterpret_cast<DataType*>(this); } __hostdev__ const DataType* data() const { return reinterpret_cast<const DataType*>(this); } /// @brief return memory usage in bytes for the class __hostdev__ static uint64_t memUsage() { return sizeof(DataType); } __hostdev__ RootT& root() {return *reinterpret_cast<RootT*>(DataType::getRoot());} __hostdev__ const RootT& root() const {return *reinterpret_cast<const RootT*>(DataType::getRoot());} __hostdev__ AccessorType getAccessor() const { return AccessorType(this->root()); } /// @brief Return the value of the given voxel (regardless of state or location in the tree.) __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->root().getValue(ijk); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->root().getValue(CoordType(i, j, k)); } /// @brief Return the active state of the given voxel (regardless of state or location in the tree.) __hostdev__ bool isActive(const CoordType& ijk) const { return this->root().isActive(ijk); } /// @brief Return true if this tree is empty, i.e. contains no values or nodes //__hostdev__ bool isEmpty() const { return this->root().isEmpty(); } /// @brief Combines the previous two methods in a single call __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->root().probeValue(ijk, v); } /// @brief Return a const reference to the background value. __hostdev__ const ValueType& background() const { return this->root().background(); } /// @brief Sets the extrema values of all the active values in this tree, i.e. in all nodes of the tree __hostdev__ void extrema(ValueType& min, ValueType& max) const; /// @brief Return a const reference to the index bounding box of all the active values in this tree, i.e. in all nodes of the tree //__hostdev__ const BBox<CoordType>& bbox() const { return this->root().bbox(); } /// @brief Return the total number of active voxels in this tree. __hostdev__ uint64_t activeVoxelCount() const { return DataType::mVoxelCount; } /// @brief Return the total number of active tiles at the specified level of the tree. /// /// @details level = 1,2,3 corresponds to active tile count in lower internal nodes, upper /// internal nodes, and the root level. Note active values at the leaf level are /// referred to as active voxels (see activeVoxelCount defined above). __hostdev__ const uint32_t& activeTileCount(uint32_t level) const { NANOVDB_ASSERT(level > 0 && level <= 3); // 1, 2, or 3 return DataType::mTileCount[level - 1]; } template<typename NodeT> __hostdev__ uint32_t nodeCount() const { static_assert(NodeT::LEVEL < 3, "Invalid NodeT"); return DataType::mNodeCount[NodeT::LEVEL]; } __hostdev__ uint32_t nodeCount(int level) const { NANOVDB_ASSERT(level < 3); return DataType::mNodeCount[level]; } __hostdev__ uint32_t totalNodeCount() const { return DataType::mNodeCount[0] + DataType::mNodeCount[1] + DataType::mNodeCount[2]; } /// @brief return a pointer to the first node of the specified type /// /// @warning Note it may return NULL if no nodes exist template<typename NodeT> __hostdev__ NodeT* getFirstNode() { const int64_t nodeOffset = DataType::mNodeOffset[NodeT::LEVEL]; return nodeOffset ? util::PtrAdd<NodeT>(this, nodeOffset) : nullptr; } /// @brief return a const pointer to the first node of the specified type /// /// @warning Note it may return NULL if no nodes exist template<typename NodeT> __hostdev__ const NodeT* getFirstNode() const { const int64_t nodeOffset = DataType::mNodeOffset[NodeT::LEVEL]; return nodeOffset ? util::PtrAdd<NodeT>(this, nodeOffset) : nullptr; } /// @brief return a pointer to the first node at the specified level /// /// @warning Note it may return NULL if no nodes exist template<int LEVEL> __hostdev__ typename NodeTrait<RootT, LEVEL>::type* getFirstNode() { return this->template getFirstNode<typename NodeTrait<RootT, LEVEL>::type>(); } /// @brief return a const pointer to the first node of the specified level /// /// @warning Note it may return NULL if no nodes exist template<int LEVEL> __hostdev__ const typename NodeTrait<RootT, LEVEL>::type* getFirstNode() const { return this->template getFirstNode<typename NodeTrait<RootT, LEVEL>::type>(); } /// @brief Template specializations of getFirstNode __hostdev__ LeafNodeType* getFirstLeaf() { return this->getFirstNode<LeafNodeType>(); } __hostdev__ const LeafNodeType* getFirstLeaf() const { return this->getFirstNode<LeafNodeType>(); } __hostdev__ typename NodeTrait<RootT, 1>::type* getFirstLower() { return this->getFirstNode<1>(); } __hostdev__ const typename NodeTrait<RootT, 1>::type* getFirstLower() const { return this->getFirstNode<1>(); } __hostdev__ typename NodeTrait<RootT, 2>::type* getFirstUpper() { return this->getFirstNode<2>(); } __hostdev__ const typename NodeTrait<RootT, 2>::type* getFirstUpper() const { return this->getFirstNode<2>(); } template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { return this->root().template get<OpT>(ijk, args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) { return this->root().template set<OpT>(ijk, args...); } private: static_assert(sizeof(DataType) % NANOVDB_DATA_ALIGNMENT == 0, "sizeof(TreeData) is misaligned"); }; // Tree class template<typename RootT> __hostdev__ void Tree<RootT>::extrema(ValueType& min, ValueType& max) const { min = this->root().minimum(); max = this->root().maximum(); } // --------------------------> RootData <------------------------------------ /// @brief Struct with all the member data of the RootNode (useful during serialization of an openvdb RootNode) /// /// @note No client code should (or can) interface with this struct so it can safely be ignored! template<typename ChildT> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) RootData { using ValueT = typename ChildT::ValueType; using BuildT = typename ChildT::BuildType; // in rare cases BuildType != ValueType, e.g. then BuildType = ValueMask and ValueType = bool using CoordT = typename ChildT::CoordType; using StatsT = typename ChildT::FloatType; static constexpr bool FIXED_SIZE = false; /// @brief Return a key based on the coordinates of a voxel #ifdef NANOVDB_USE_SINGLE_ROOT_KEY using KeyT = uint64_t; template<typename CoordType> __hostdev__ static KeyT CoordToKey(const CoordType& ijk) { static_assert(sizeof(CoordT) == sizeof(CoordType), "Mismatching sizeof"); static_assert(32 - ChildT::TOTAL <= 21, "Cannot use 64 bit root keys"); return (KeyT(uint32_t(ijk[2]) >> ChildT::TOTAL)) | // z is the lower 21 bits (KeyT(uint32_t(ijk[1]) >> ChildT::TOTAL) << 21) | // y is the middle 21 bits (KeyT(uint32_t(ijk[0]) >> ChildT::TOTAL) << 42); // x is the upper 21 bits } __hostdev__ static CoordT KeyToCoord(const KeyT& key) { static constexpr uint64_t MASK = (1u << 21) - 1; // used to mask out 21 lower bits return CoordT(((key >> 42) & MASK) << ChildT::TOTAL, // x are the upper 21 bits ((key >> 21) & MASK) << ChildT::TOTAL, // y are the middle 21 bits (key & MASK) << ChildT::TOTAL); // z are the lower 21 bits } #else using KeyT = CoordT; __hostdev__ static KeyT CoordToKey(const CoordT& ijk) { return ijk & ~ChildT::MASK; } __hostdev__ static CoordT KeyToCoord(const KeyT& key) { return key; } #endif math::BBox<CoordT> mBBox; // 24B. AABB of active values in index space. uint32_t mTableSize; // 4B. number of tiles and child pointers in the root node ValueT mBackground; // background value, i.e. value of any unset voxel ValueT mMinimum; // typically 4B, minimum of all the active values ValueT mMaximum; // typically 4B, maximum of all the active values StatsT mAverage; // typically 4B, average of all the active values in this node and its child nodes StatsT mStdDevi; // typically 4B, standard deviation of all the active values in this node and its child nodes /// @brief Return padding of this class in bytes, due to aliasing and 32B alignment /// /// @note The extra bytes are not necessarily at the end, but can come from aliasing of individual data members. __hostdev__ static constexpr uint32_t padding() { return sizeof(RootData) - (24 + 4 + 3 * sizeof(ValueT) + 2 * sizeof(StatsT)); } struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) Tile { template<typename CoordType> __hostdev__ void setChild(const CoordType& k, const void* ptr, const RootData* data) { key = CoordToKey(k); state = false; child = util::PtrDiff(ptr, data); } template<typename CoordType, typename ValueType> __hostdev__ void setValue(const CoordType& k, bool s, const ValueType& v) { key = CoordToKey(k); state = s; value = v; child = 0; } __hostdev__ bool isChild() const { return child != 0; } __hostdev__ bool isValue() const { return child == 0; } __hostdev__ bool isActive() const { return child == 0 && state; } __hostdev__ CoordT origin() const { return KeyToCoord(key); } KeyT key; // NANOVDB_USE_SINGLE_ROOT_KEY ? 8B : 12B int64_t child; // 8B. signed byte offset from this node to the child node. 0 means it is a constant tile, so use value. uint32_t state; // 4B. state of tile value ValueT value; // value of tile (i.e. no child node) }; // Tile /// @brief Returns a non-const reference to the tile at the specified linear offset. /// /// @warning The linear offset is assumed to be in the valid range __hostdev__ const Tile* tile(uint32_t n) const { NANOVDB_ASSERT(n < mTableSize); return reinterpret_cast<const Tile*>(this + 1) + n; } __hostdev__ Tile* tile(uint32_t n) { NANOVDB_ASSERT(n < mTableSize); return reinterpret_cast<Tile*>(this + 1) + n; } __hostdev__ Tile* probeTile(const CoordT& ijk) { #if 1 // switch between linear and binary seach const auto key = CoordToKey(ijk); for (Tile *p = reinterpret_cast<Tile*>(this + 1), *q = p + mTableSize; p < q; ++p) if (p->key == key) return p; return nullptr; #else // do not enable binary search if tiles are not guaranteed to be sorted!!!!!! int32_t low = 0, high = mTableSize; // low is inclusive and high is exclusive while (low != high) { int mid = low + ((high - low) >> 1); const Tile* tile = &tiles[mid]; if (tile->key == key) { return tile; } else if (tile->key < key) { low = mid + 1; } else { high = mid; } } return nullptr; #endif } __hostdev__ inline const Tile* probeTile(const CoordT& ijk) const { return const_cast<RootData*>(this)->probeTile(ijk); } /// @brief Returns a const reference to the child node in the specified tile. /// /// @warning A child node is assumed to exist in the specified tile __hostdev__ ChildT* getChild(const Tile* tile) { NANOVDB_ASSERT(tile->child); return util::PtrAdd<ChildT>(this, tile->child); } __hostdev__ const ChildT* getChild(const Tile* tile) const { NANOVDB_ASSERT(tile->child); return util::PtrAdd<ChildT>(this, tile->child); } __hostdev__ const ValueT& getMin() const { return mMinimum; } __hostdev__ const ValueT& getMax() const { return mMaximum; } __hostdev__ const StatsT& average() const { return mAverage; } __hostdev__ const StatsT& stdDeviation() const { return mStdDevi; } __hostdev__ void setMin(const ValueT& v) { mMinimum = v; } __hostdev__ void setMax(const ValueT& v) { mMaximum = v; } __hostdev__ void setAvg(const StatsT& v) { mAverage = v; } __hostdev__ void setDev(const StatsT& v) { mStdDevi = v; } /// @brief This class cannot be constructed or deleted RootData() = delete; RootData(const RootData&) = delete; RootData& operator=(const RootData&) = delete; ~RootData() = delete; }; // RootData // --------------------------> RootNode <------------------------------------ /// @brief Top-most node of the VDB tree structure. template<typename ChildT> class RootNode : public RootData<ChildT> { public: using DataType = RootData<ChildT>; using ChildNodeType = ChildT; using RootType = RootNode<ChildT>; // this allows RootNode to behave like a Tree using RootNodeType = RootType; using UpperNodeType = ChildT; using LowerNodeType = typename UpperNodeType::ChildNodeType; using LeafNodeType = typename ChildT::LeafNodeType; using ValueType = typename DataType::ValueT; using FloatType = typename DataType::StatsT; using BuildType = typename DataType::BuildT; // in rare cases BuildType != ValueType, e.g. then BuildType = ValueMask and ValueType = bool using CoordType = typename ChildT::CoordType; using BBoxType = math::BBox<CoordType>; using AccessorType = DefaultReadAccessor<BuildType>; using Tile = typename DataType::Tile; static constexpr bool FIXED_SIZE = DataType::FIXED_SIZE; static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL; // level 0 = leaf template<typename RootT> class BaseIter { protected: using DataT = typename util::match_const<DataType, RootT>::type; using TileT = typename util::match_const<Tile, RootT>::type; DataT* mData; uint32_t mPos, mSize; __hostdev__ BaseIter(DataT* data = nullptr, uint32_t n = 0) : mData(data) , mPos(0) , mSize(n) { } public: __hostdev__ operator bool() const { return mPos < mSize; } __hostdev__ uint32_t pos() const { return mPos; } __hostdev__ void next() { ++mPos; } __hostdev__ TileT* tile() const { return mData->tile(mPos); } __hostdev__ CoordType getOrigin() const { NANOVDB_ASSERT(*this); return this->tile()->origin(); } __hostdev__ CoordType getCoord() const { NANOVDB_ASSERT(*this); return this->tile()->origin(); } }; // Member class BaseIter template<typename RootT> class ChildIter : public BaseIter<RootT> { static_assert(util::is_same<typename util::remove_const<RootT>::type, RootNode>::value, "Invalid RootT"); using BaseT = BaseIter<RootT>; using NodeT = typename util::match_const<ChildT, RootT>::type; public: __hostdev__ ChildIter() : BaseT() { } __hostdev__ ChildIter(RootT* parent) : BaseT(parent->data(), parent->tileCount()) { NANOVDB_ASSERT(BaseT::mData); while (*this && !this->tile()->isChild()) this->next(); } __hostdev__ NodeT& operator*() const { NANOVDB_ASSERT(*this); return *BaseT::mData->getChild(this->tile()); } __hostdev__ NodeT* operator->() const { NANOVDB_ASSERT(*this); return BaseT::mData->getChild(this->tile()); } __hostdev__ ChildIter& operator++() { NANOVDB_ASSERT(BaseT::mData); this->next(); while (*this && this->tile()->isValue()) this->next(); return *this; } __hostdev__ ChildIter operator++(int) { auto tmp = *this; ++(*this); return tmp; } }; // Member class ChildIter using ChildIterator = ChildIter<RootNode>; using ConstChildIterator = ChildIter<const RootNode>; __hostdev__ ChildIterator beginChild() { return ChildIterator(this); } __hostdev__ ConstChildIterator cbeginChild() const { return ConstChildIterator(this); } template<typename RootT> class ValueIter : public BaseIter<RootT> { using BaseT = BaseIter<RootT>; public: __hostdev__ ValueIter() : BaseT() { } __hostdev__ ValueIter(RootT* parent) : BaseT(parent->data(), parent->tileCount()) { NANOVDB_ASSERT(BaseT::mData); while (*this && this->tile()->isChild()) this->next(); } __hostdev__ ValueType operator*() const { NANOVDB_ASSERT(*this); return this->tile()->value; } __hostdev__ bool isActive() const { NANOVDB_ASSERT(*this); return this->tile()->state; } __hostdev__ ValueIter& operator++() { NANOVDB_ASSERT(BaseT::mData); this->next(); while (*this && this->tile()->isChild()) this->next(); return *this; } __hostdev__ ValueIter operator++(int) { auto tmp = *this; ++(*this); return tmp; } }; // Member class ValueIter using ValueIterator = ValueIter<RootNode>; using ConstValueIterator = ValueIter<const RootNode>; __hostdev__ ValueIterator beginValue() { return ValueIterator(this); } __hostdev__ ConstValueIterator cbeginValueAll() const { return ConstValueIterator(this); } template<typename RootT> class ValueOnIter : public BaseIter<RootT> { using BaseT = BaseIter<RootT>; public: __hostdev__ ValueOnIter() : BaseT() { } __hostdev__ ValueOnIter(RootT* parent) : BaseT(parent->data(), parent->tileCount()) { NANOVDB_ASSERT(BaseT::mData); while (*this && !this->tile()->isActive()) ++BaseT::mPos; } __hostdev__ ValueType operator*() const { NANOVDB_ASSERT(*this); return this->tile()->value; } __hostdev__ ValueOnIter& operator++() { NANOVDB_ASSERT(BaseT::mData); this->next(); while (*this && !this->tile()->isActive()) this->next(); return *this; } __hostdev__ ValueOnIter operator++(int) { auto tmp = *this; ++(*this); return tmp; } }; // Member class ValueOnIter using ValueOnIterator = ValueOnIter<RootNode>; using ConstValueOnIterator = ValueOnIter<const RootNode>; __hostdev__ ValueOnIterator beginValueOn() { return ValueOnIterator(this); } __hostdev__ ConstValueOnIterator cbeginValueOn() const { return ConstValueOnIterator(this); } template<typename RootT> class DenseIter : public BaseIter<RootT> { using BaseT = BaseIter<RootT>; using NodeT = typename util::match_const<ChildT, RootT>::type; public: __hostdev__ DenseIter() : BaseT() { } __hostdev__ DenseIter(RootT* parent) : BaseT(parent->data(), parent->tileCount()) { NANOVDB_ASSERT(BaseT::mData); } __hostdev__ NodeT* probeChild(ValueType& value) const { NANOVDB_ASSERT(*this); NodeT* child = nullptr; auto* t = this->tile(); if (t->isChild()) { child = BaseT::mData->getChild(t); } else { value = t->value; } return child; } __hostdev__ bool isValueOn() const { NANOVDB_ASSERT(*this); return this->tile()->state; } __hostdev__ DenseIter& operator++() { NANOVDB_ASSERT(BaseT::mData); this->next(); return *this; } __hostdev__ DenseIter operator++(int) { auto tmp = *this; ++(*this); return tmp; } }; // Member class DenseIter using DenseIterator = DenseIter<RootNode>; using ConstDenseIterator = DenseIter<const RootNode>; __hostdev__ DenseIterator beginDense() { return DenseIterator(this); } __hostdev__ ConstDenseIterator cbeginDense() const { return ConstDenseIterator(this); } __hostdev__ ConstDenseIterator cbeginChildAll() const { return ConstDenseIterator(this); } /// @brief This class cannot be constructed or deleted RootNode() = delete; RootNode(const RootNode&) = delete; RootNode& operator=(const RootNode&) = delete; ~RootNode() = delete; __hostdev__ AccessorType getAccessor() const { return AccessorType(*this); } __hostdev__ DataType* data() { return reinterpret_cast<DataType*>(this); } __hostdev__ const DataType* data() const { return reinterpret_cast<const DataType*>(this); } /// @brief Return a const reference to the index bounding box of all the active values in this tree, i.e. in all nodes of the tree __hostdev__ const BBoxType& bbox() const { return DataType::mBBox; } /// @brief Return the total number of active voxels in the root and all its child nodes. /// @brief Return a const reference to the background value, i.e. the value associated with /// any coordinate location that has not been set explicitly. __hostdev__ const ValueType& background() const { return DataType::mBackground; } /// @brief Return the number of tiles encoded in this root node __hostdev__ const uint32_t& tileCount() const { return DataType::mTableSize; } __hostdev__ const uint32_t& getTableSize() const { return DataType::mTableSize; } /// @brief Return a const reference to the minimum active value encoded in this root node and any of its child nodes __hostdev__ const ValueType& minimum() const { return DataType::mMinimum; } /// @brief Return a const reference to the maximum active value encoded in this root node and any of its child nodes __hostdev__ const ValueType& maximum() const { return DataType::mMaximum; } /// @brief Return a const reference to the average of all the active values encoded in this root node and any of its child nodes __hostdev__ const FloatType& average() const { return DataType::mAverage; } /// @brief Return the variance of all the active values encoded in this root node and any of its child nodes __hostdev__ FloatType variance() const { return math::Pow2(DataType::mStdDevi); } /// @brief Return a const reference to the standard deviation of all the active values encoded in this root node and any of its child nodes __hostdev__ const FloatType& stdDeviation() const { return DataType::mStdDevi; } /// @brief Return the expected memory footprint in bytes with the specified number of tiles __hostdev__ static uint64_t memUsage(uint32_t tableSize) { return sizeof(RootNode) + tableSize * sizeof(Tile); } /// @brief Return the actual memory footprint of this root node __hostdev__ uint64_t memUsage() const { return sizeof(RootNode) + DataType::mTableSize * sizeof(Tile); } /// @brief Return true if this RootNode is empty, i.e. contains no values or nodes __hostdev__ bool isEmpty() const { return DataType::mTableSize == uint32_t(0); } #ifdef NANOVDB_NEW_ACCESSOR_METHODS /// @brief Return the value of the given voxel __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildType>>(ijk); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->template get<GetValue<BuildType>>(CoordType(i, j, k)); } __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildType>>(ijk); } /// @brief return the state and updates the value of the specified voxel __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildType>>(ijk, v); } __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildType>>(ijk); } #else // NANOVDB_NEW_ACCESSOR_METHODS /// @brief Return the value of the given voxel __hostdev__ ValueType getValue(const CoordType& ijk) const { if (const Tile* tile = DataType::probeTile(ijk)) { return tile->isChild() ? this->getChild(tile)->getValue(ijk) : tile->value; } return DataType::mBackground; } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ bool isActive(const CoordType& ijk) const { if (const Tile* tile = DataType::probeTile(ijk)) { return tile->isChild() ? this->getChild(tile)->isActive(ijk) : tile->state; } return false; } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { if (const Tile* tile = DataType::probeTile(ijk)) { if (tile->isChild()) { const auto* child = this->getChild(tile); return child->probeValue(ijk, v); } v = tile->value; return tile->state; } v = DataType::mBackground; return false; } __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const { const Tile* tile = DataType::probeTile(ijk); if (tile && tile->isChild()) { const auto* child = this->getChild(tile); return child->probeLeaf(ijk); } return nullptr; } #endif // NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ const ChildNodeType* probeChild(const CoordType& ijk) const { const Tile* tile = DataType::probeTile(ijk); return tile && tile->isChild() ? this->getChild(tile) : nullptr; } __hostdev__ ChildNodeType* probeChild(const CoordType& ijk) { const Tile* tile = DataType::probeTile(ijk); return tile && tile->isChild() ? this->getChild(tile) : nullptr; } template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { if (const Tile* tile = this->probeTile(ijk)) { if (tile->isChild()) return this->getChild(tile)->template get<OpT>(ijk, args...); return OpT::get(*tile, args...); } return OpT::get(*this, args...); } template<typename OpT, typename... ArgsT> // __hostdev__ auto // occasionally fails with NVCC __hostdev__ decltype(OpT::set(util::declval<Tile&>(), util::declval<ArgsT>()...)) set(const CoordType& ijk, ArgsT&&... args) { if (Tile* tile = DataType::probeTile(ijk)) { if (tile->isChild()) return this->getChild(tile)->template set<OpT>(ijk, args...); return OpT::set(*tile, args...); } return OpT::set(*this, args...); } private: static_assert(sizeof(DataType) % NANOVDB_DATA_ALIGNMENT == 0, "sizeof(RootData) is misaligned"); static_assert(sizeof(typename DataType::Tile) % NANOVDB_DATA_ALIGNMENT == 0, "sizeof(RootData::Tile) is misaligned"); template<typename, int, int, int> friend class ReadAccessor; template<typename> friend class Tree; #ifndef NANOVDB_NEW_ACCESSOR_METHODS /// @brief Private method to return node information and update a ReadAccessor template<typename AccT> __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(const CoordType& ijk, const AccT& acc) const { using NodeInfoT = typename AccT::NodeInfo; if (const Tile* tile = this->probeTile(ijk)) { if (tile->isChild()) { const auto* child = this->getChild(tile); acc.insert(ijk, child); return child->getNodeInfoAndCache(ijk, acc); } return NodeInfoT{LEVEL, ChildT::dim(), tile->value, tile->value, tile->value, 0, tile->origin(), tile->origin() + CoordType(ChildT::DIM)}; } return NodeInfoT{LEVEL, ChildT::dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]}; } /// @brief Private method to return a voxel value and update a ReadAccessor template<typename AccT> __hostdev__ ValueType getValueAndCache(const CoordType& ijk, const AccT& acc) const { if (const Tile* tile = this->probeTile(ijk)) { if (tile->isChild()) { const auto* child = this->getChild(tile); acc.insert(ijk, child); return child->getValueAndCache(ijk, acc); } return tile->value; } return DataType::mBackground; } template<typename AccT> __hostdev__ bool isActiveAndCache(const CoordType& ijk, const AccT& acc) const { const Tile* tile = this->probeTile(ijk); if (tile && tile->isChild()) { const auto* child = this->getChild(tile); acc.insert(ijk, child); return child->isActiveAndCache(ijk, acc); } return false; } template<typename AccT> __hostdev__ bool probeValueAndCache(const CoordType& ijk, ValueType& v, const AccT& acc) const { if (const Tile* tile = this->probeTile(ijk)) { if (tile->isChild()) { const auto* child = this->getChild(tile); acc.insert(ijk, child); return child->probeValueAndCache(ijk, v, acc); } v = tile->value; return tile->state; } v = DataType::mBackground; return false; } template<typename AccT> __hostdev__ const LeafNodeType* probeLeafAndCache(const CoordType& ijk, const AccT& acc) const { const Tile* tile = this->probeTile(ijk); if (tile && tile->isChild()) { const auto* child = this->getChild(tile); acc.insert(ijk, child); return child->probeLeafAndCache(ijk, acc); } return nullptr; } #endif // NANOVDB_NEW_ACCESSOR_METHODS template<typename RayT, typename AccT> __hostdev__ uint32_t getDimAndCache(const CoordType& ijk, const RayT& ray, const AccT& acc) const { if (const Tile* tile = this->probeTile(ijk)) { if (tile->isChild()) { const auto* child = this->getChild(tile); acc.insert(ijk, child); return child->getDimAndCache(ijk, ray, acc); } return 1 << ChildT::TOTAL; //tile value } return ChildNodeType::dim(); // background } template<typename OpT, typename AccT, typename... ArgsT> //__hostdev__ decltype(OpT::get(util::declval<const Tile&>(), util::declval<ArgsT>()...)) __hostdev__ auto getAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) const { if (const Tile* tile = this->probeTile(ijk)) { if (tile->isChild()) { const ChildT* child = this->getChild(tile); acc.insert(ijk, child); return child->template getAndCache<OpT>(ijk, acc, args...); } return OpT::get(*tile, args...); } return OpT::get(*this, args...); } template<typename OpT, typename AccT, typename... ArgsT> // __hostdev__ auto // occasionally fails with NVCC __hostdev__ decltype(OpT::set(util::declval<Tile&>(), util::declval<ArgsT>()...)) setAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) { if (Tile* tile = DataType::probeTile(ijk)) { if (tile->isChild()) { ChildT* child = this->getChild(tile); acc.insert(ijk, child); return child->template setAndCache<OpT>(ijk, acc, args...); } return OpT::set(*tile, args...); } return OpT::set(*this, args...); } }; // RootNode class // After the RootNode the memory layout is assumed to be the sorted Tiles // --------------------------> InternalNode <------------------------------------ /// @brief Struct with all the member data of the InternalNode (useful during serialization of an openvdb InternalNode) /// /// @note No client code should (or can) interface with this struct so it can safely be ignored! template<typename ChildT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) InternalData { using ValueT = typename ChildT::ValueType; using BuildT = typename ChildT::BuildType; // in rare cases BuildType != ValueType, e.g. then BuildType = ValueMask and ValueType = bool using StatsT = typename ChildT::FloatType; using CoordT = typename ChildT::CoordType; using MaskT = typename ChildT::template MaskType<LOG2DIM>; static constexpr bool FIXED_SIZE = true; union Tile { ValueT value; int64_t child; //signed 64 bit byte offset relative to this InternalData, i.e. child-pointer = Tile::child + this /// @brief This class cannot be constructed or deleted Tile() = delete; Tile(const Tile&) = delete; Tile& operator=(const Tile&) = delete; ~Tile() = delete; }; math::BBox<CoordT> mBBox; // 24B. node bounding box. | uint64_t mFlags; // 8B. node flags. | 32B aligned MaskT mValueMask; // LOG2DIM(5): 4096B, LOG2DIM(4): 512B | 32B aligned MaskT mChildMask; // LOG2DIM(5): 4096B, LOG2DIM(4): 512B | 32B aligned ValueT mMinimum; // typically 4B ValueT mMaximum; // typically 4B StatsT mAverage; // typically 4B, average of all the active values in this node and its child nodes StatsT mStdDevi; // typically 4B, standard deviation of all the active values in this node and its child nodes // possible padding, e.g. 28 byte padding when ValueType = bool /// @brief Return padding of this class in bytes, due to aliasing and 32B alignment /// /// @note The extra bytes are not necessarily at the end, but can come from aliasing of individual data members. __hostdev__ static constexpr uint32_t padding() { return sizeof(InternalData) - (24u + 8u + 2 * (sizeof(MaskT) + sizeof(ValueT) + sizeof(StatsT)) + (1u << (3 * LOG2DIM)) * (sizeof(ValueT) > 8u ? sizeof(ValueT) : 8u)); } alignas(32) Tile mTable[1u << (3 * LOG2DIM)]; // sizeof(ValueT) x (16*16*16 or 32*32*32) __hostdev__ static uint64_t memUsage() { return sizeof(InternalData); } __hostdev__ void setChild(uint32_t n, const void* ptr) { NANOVDB_ASSERT(mChildMask.isOn(n)); mTable[n].child = util::PtrDiff(ptr, this); } template<typename ValueT> __hostdev__ void setValue(uint32_t n, const ValueT& v) { NANOVDB_ASSERT(!mChildMask.isOn(n)); mTable[n].value = v; } /// @brief Returns a pointer to the child node at the specifed linear offset. __hostdev__ ChildT* getChild(uint32_t n) { NANOVDB_ASSERT(mChildMask.isOn(n)); return util::PtrAdd<ChildT>(this, mTable[n].child); } __hostdev__ const ChildT* getChild(uint32_t n) const { NANOVDB_ASSERT(mChildMask.isOn(n)); return util::PtrAdd<ChildT>(this, mTable[n].child); } __hostdev__ ValueT getValue(uint32_t n) const { NANOVDB_ASSERT(mChildMask.isOff(n)); return mTable[n].value; } __hostdev__ bool isActive(uint32_t n) const { NANOVDB_ASSERT(mChildMask.isOff(n)); return mValueMask.isOn(n); } __hostdev__ bool isChild(uint32_t n) const { return mChildMask.isOn(n); } template<typename T> __hostdev__ void setOrigin(const T& ijk) { mBBox[0] = ijk; } __hostdev__ const ValueT& getMin() const { return mMinimum; } __hostdev__ const ValueT& getMax() const { return mMaximum; } __hostdev__ const StatsT& average() const { return mAverage; } __hostdev__ const StatsT& stdDeviation() const { return mStdDevi; } #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstringop-overflow" #endif __hostdev__ void setMin(const ValueT& v) { mMinimum = v; } __hostdev__ void setMax(const ValueT& v) { mMaximum = v; } __hostdev__ void setAvg(const StatsT& v) { mAverage = v; } __hostdev__ void setDev(const StatsT& v) { mStdDevi = v; } #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__llvm__) #pragma GCC diagnostic pop #endif /// @brief This class cannot be constructed or deleted InternalData() = delete; InternalData(const InternalData&) = delete; InternalData& operator=(const InternalData&) = delete; ~InternalData() = delete; }; // InternalData /// @brief Internal nodes of a VDB tree template<typename ChildT, uint32_t Log2Dim = ChildT::LOG2DIM + 1> class InternalNode : public InternalData<ChildT, Log2Dim> { public: using DataType = InternalData<ChildT, Log2Dim>; using ValueType = typename DataType::ValueT; using FloatType = typename DataType::StatsT; using BuildType = typename DataType::BuildT; // in rare cases BuildType != ValueType, e.g. then BuildType = ValueMask and ValueType = bool using LeafNodeType = typename ChildT::LeafNodeType; using ChildNodeType = ChildT; using CoordType = typename ChildT::CoordType; static constexpr bool FIXED_SIZE = DataType::FIXED_SIZE; template<uint32_t LOG2> using MaskType = typename ChildT::template MaskType<LOG2>; template<bool On> using MaskIterT = typename Mask<Log2Dim>::template Iterator<On>; static constexpr uint32_t LOG2DIM = Log2Dim; static constexpr uint32_t TOTAL = LOG2DIM + ChildT::TOTAL; // dimension in index space static constexpr uint32_t DIM = 1u << TOTAL; // number of voxels along each axis of this node static constexpr uint32_t SIZE = 1u << (3 * LOG2DIM); // number of tile values (or child pointers) static constexpr uint32_t MASK = (1u << TOTAL) - 1u; static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL; // level 0 = leaf static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node /// @brief Visits child nodes of this node only template <typename ParentT> class ChildIter : public MaskIterT<true> { static_assert(util::is_same<typename util::remove_const<ParentT>::type, InternalNode>::value, "Invalid ParentT"); using BaseT = MaskIterT<true>; using NodeT = typename util::match_const<ChildT, ParentT>::type; ParentT* mParent; public: __hostdev__ ChildIter() : BaseT() , mParent(nullptr) { } __hostdev__ ChildIter(ParentT* parent) : BaseT(parent->mChildMask.beginOn()) , mParent(parent) { } ChildIter& operator=(const ChildIter&) = default; __hostdev__ NodeT& operator*() const { NANOVDB_ASSERT(*this); return *mParent->getChild(BaseT::pos()); } __hostdev__ NodeT* operator->() const { NANOVDB_ASSERT(*this); return mParent->getChild(BaseT::pos()); } __hostdev__ CoordType getOrigin() const { NANOVDB_ASSERT(*this); return (*this)->origin(); } __hostdev__ CoordType getCoord() const {return this->getOrigin();} }; // Member class ChildIter using ChildIterator = ChildIter<InternalNode>; using ConstChildIterator = ChildIter<const InternalNode>; __hostdev__ ChildIterator beginChild() { return ChildIterator(this); } __hostdev__ ConstChildIterator cbeginChild() const { return ConstChildIterator(this); } /// @brief Visits all tile values in this node, i.e. both inactive and active tiles class ValueIterator : public MaskIterT<false> { using BaseT = MaskIterT<false>; const InternalNode* mParent; public: __hostdev__ ValueIterator() : BaseT() , mParent(nullptr) { } __hostdev__ ValueIterator(const InternalNode* parent) : BaseT(parent->data()->mChildMask.beginOff()) , mParent(parent) { } ValueIterator& operator=(const ValueIterator&) = default; __hostdev__ ValueType operator*() const { NANOVDB_ASSERT(*this); return mParent->data()->getValue(BaseT::pos()); } __hostdev__ CoordType getOrigin() const { NANOVDB_ASSERT(*this); return mParent->offsetToGlobalCoord(BaseT::pos()); } __hostdev__ CoordType getCoord() const {return this->getOrigin();} __hostdev__ bool isActive() const { NANOVDB_ASSERT(*this); return mParent->data()->isActive(BaseT::mPos); } }; // Member class ValueIterator __hostdev__ ValueIterator beginValue() const { return ValueIterator(this); } __hostdev__ ValueIterator cbeginValueAll() const { return ValueIterator(this); } /// @brief Visits active tile values of this node only class ValueOnIterator : public MaskIterT<true> { using BaseT = MaskIterT<true>; const InternalNode* mParent; public: __hostdev__ ValueOnIterator() : BaseT() , mParent(nullptr) { } __hostdev__ ValueOnIterator(const InternalNode* parent) : BaseT(parent->data()->mValueMask.beginOn()) , mParent(parent) { } ValueOnIterator& operator=(const ValueOnIterator&) = default; __hostdev__ ValueType operator*() const { NANOVDB_ASSERT(*this); return mParent->data()->getValue(BaseT::pos()); } __hostdev__ CoordType getOrigin() const { NANOVDB_ASSERT(*this); return mParent->offsetToGlobalCoord(BaseT::pos()); } __hostdev__ CoordType getCoord() const {return this->getOrigin();} }; // Member class ValueOnIterator __hostdev__ ValueOnIterator beginValueOn() const { return ValueOnIterator(this); } __hostdev__ ValueOnIterator cbeginValueOn() const { return ValueOnIterator(this); } /// @brief Visits all tile values and child nodes of this node class DenseIterator : public Mask<Log2Dim>::DenseIterator { using BaseT = typename Mask<Log2Dim>::DenseIterator; const DataType* mParent; public: __hostdev__ DenseIterator() : BaseT() , mParent(nullptr) { } __hostdev__ DenseIterator(const InternalNode* parent) : BaseT(0) , mParent(parent->data()) { } DenseIterator& operator=(const DenseIterator&) = default; __hostdev__ const ChildT* probeChild(ValueType& value) const { NANOVDB_ASSERT(mParent && bool(*this)); const ChildT* child = nullptr; if (mParent->mChildMask.isOn(BaseT::pos())) { child = mParent->getChild(BaseT::pos()); } else { value = mParent->getValue(BaseT::pos()); } return child; } __hostdev__ bool isValueOn() const { NANOVDB_ASSERT(mParent && bool(*this)); return mParent->isActive(BaseT::pos()); } __hostdev__ CoordType getOrigin() const { NANOVDB_ASSERT(mParent && bool(*this)); return mParent->offsetToGlobalCoord(BaseT::pos()); } __hostdev__ CoordType getCoord() const {return this->getOrigin();} }; // Member class DenseIterator __hostdev__ DenseIterator beginDense() const { return DenseIterator(this); } __hostdev__ DenseIterator cbeginChildAll() const { return DenseIterator(this); } // matches openvdb /// @brief This class cannot be constructed or deleted InternalNode() = delete; InternalNode(const InternalNode&) = delete; InternalNode& operator=(const InternalNode&) = delete; ~InternalNode() = delete; __hostdev__ DataType* data() { return reinterpret_cast<DataType*>(this); } __hostdev__ const DataType* data() const { return reinterpret_cast<const DataType*>(this); } /// @brief Return the dimension, in voxel units, of this internal node (typically 8*16 or 8*16*32) __hostdev__ static uint32_t dim() { return 1u << TOTAL; } /// @brief Return memory usage in bytes for the class __hostdev__ static size_t memUsage() { return DataType::memUsage(); } /// @brief Return a const reference to the bit mask of active voxels in this internal node __hostdev__ const MaskType<LOG2DIM>& valueMask() const { return DataType::mValueMask; } __hostdev__ const MaskType<LOG2DIM>& getValueMask() const { return DataType::mValueMask; } /// @brief Return a const reference to the bit mask of child nodes in this internal node __hostdev__ const MaskType<LOG2DIM>& childMask() const { return DataType::mChildMask; } __hostdev__ const MaskType<LOG2DIM>& getChildMask() const { return DataType::mChildMask; } /// @brief Return the origin in index space of this leaf node __hostdev__ CoordType origin() const { return DataType::mBBox.min() & ~MASK; } /// @brief Return a const reference to the minimum active value encoded in this internal node and any of its child nodes __hostdev__ const ValueType& minimum() const { return this->getMin(); } /// @brief Return a const reference to the maximum active value encoded in this internal node and any of its child nodes __hostdev__ const ValueType& maximum() const { return this->getMax(); } /// @brief Return a const reference to the average of all the active values encoded in this internal node and any of its child nodes __hostdev__ const FloatType& average() const { return DataType::mAverage; } /// @brief Return the variance of all the active values encoded in this internal node and any of its child nodes __hostdev__ FloatType variance() const { return DataType::mStdDevi * DataType::mStdDevi; } /// @brief Return a const reference to the standard deviation of all the active values encoded in this internal node and any of its child nodes __hostdev__ const FloatType& stdDeviation() const { return DataType::mStdDevi; } /// @brief Return a const reference to the bounding box in index space of active values in this internal node and any of its child nodes __hostdev__ const math::BBox<CoordType>& bbox() const { return DataType::mBBox; } /// @brief If the first entry in this node's table is a tile, return the tile's value. /// Otherwise, return the result of calling getFirstValue() on the child. __hostdev__ ValueType getFirstValue() const { return DataType::mChildMask.isOn(0) ? this->getChild(0)->getFirstValue() : DataType::getValue(0); } /// @brief If the last entry in this node's table is a tile, return the tile's value. /// Otherwise, return the result of calling getLastValue() on the child. __hostdev__ ValueType getLastValue() const { return DataType::mChildMask.isOn(SIZE - 1) ? this->getChild(SIZE - 1)->getLastValue() : DataType::getValue(SIZE - 1); } #ifdef NANOVDB_NEW_ACCESSOR_METHODS /// @brief Return the value of the given voxel __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildType>>(ijk); } __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildType>>(ijk); } /// @brief return the state and updates the value of the specified voxel __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildType>>(ijk, v); } __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildType>>(ijk); } #else // NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { const uint32_t n = CoordToOffset(ijk); return DataType::mChildMask.isOn(n) ? this->getChild(n)->getValue(ijk) : DataType::getValue(n); } __hostdev__ bool isActive(const CoordType& ijk) const { const uint32_t n = CoordToOffset(ijk); return DataType::mChildMask.isOn(n) ? this->getChild(n)->isActive(ijk) : DataType::isActive(n); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOn(n)) return this->getChild(n)->probeValue(ijk, v); v = DataType::getValue(n); return DataType::isActive(n); } __hostdev__ const LeafNodeType* probeLeaf(const CoordType& ijk) const { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOn(n)) return this->getChild(n)->probeLeaf(ijk); return nullptr; } #endif // NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ChildNodeType* probeChild(const CoordType& ijk) { const uint32_t n = CoordToOffset(ijk); return DataType::mChildMask.isOn(n) ? this->getChild(n) : nullptr; } __hostdev__ const ChildNodeType* probeChild(const CoordType& ijk) const { const uint32_t n = CoordToOffset(ijk); return DataType::mChildMask.isOn(n) ? this->getChild(n) : nullptr; } /// @brief Return the linear offset corresponding to the given coordinate __hostdev__ static uint32_t CoordToOffset(const CoordType& ijk) { return (((ijk[0] & MASK) >> ChildT::TOTAL) << (2 * LOG2DIM)) | // note, we're using bitwise OR instead of + (((ijk[1] & MASK) >> ChildT::TOTAL) << (LOG2DIM)) | ((ijk[2] & MASK) >> ChildT::TOTAL); } /// @return the local coordinate of the n'th tile or child node __hostdev__ static Coord OffsetToLocalCoord(uint32_t n) { NANOVDB_ASSERT(n < SIZE); const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1); return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & ((1 << LOG2DIM) - 1)); } /// @brief modifies local coordinates to global coordinates of a tile or child node __hostdev__ void localToGlobalCoord(Coord& ijk) const { ijk <<= ChildT::TOTAL; ijk += this->origin(); } __hostdev__ Coord offsetToGlobalCoord(uint32_t n) const { Coord ijk = InternalNode::OffsetToLocalCoord(n); this->localToGlobalCoord(ijk); return ijk; } /// @brief Return true if this node or any of its child nodes contain active values __hostdev__ bool isActive() const { return DataType::mFlags & uint32_t(2); } template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { const uint32_t n = CoordToOffset(ijk); if (this->isChild(n)) return this->getChild(n)->template get<OpT>(ijk, args...); return OpT::get(*this, n, args...); } template<typename OpT, typename... ArgsT> //__hostdev__ auto // occasionally fails with NVCC __hostdev__ decltype(OpT::set(util::declval<InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...)) set(const CoordType& ijk, ArgsT&&... args) { const uint32_t n = CoordToOffset(ijk); if (this->isChild(n)) return this->getChild(n)->template set<OpT>(ijk, args...); return OpT::set(*this, n, args...); } private: static_assert(sizeof(DataType) % NANOVDB_DATA_ALIGNMENT == 0, "sizeof(InternalData) is misaligned"); template<typename, int, int, int> friend class ReadAccessor; template<typename> friend class RootNode; template<typename, uint32_t> friend class InternalNode; #ifndef NANOVDB_NEW_ACCESSOR_METHODS /// @brief Private read access method used by the ReadAccessor template<typename AccT> __hostdev__ ValueType getValueAndCache(const CoordType& ijk, const AccT& acc) const { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOff(n)) return DataType::getValue(n); const ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->getValueAndCache(ijk, acc); } template<typename AccT> __hostdev__ bool isActiveAndCache(const CoordType& ijk, const AccT& acc) const { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOff(n)) return DataType::isActive(n); const ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->isActiveAndCache(ijk, acc); } template<typename AccT> __hostdev__ bool probeValueAndCache(const CoordType& ijk, ValueType& v, const AccT& acc) const { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOff(n)) { v = DataType::getValue(n); return DataType::isActive(n); } const ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->probeValueAndCache(ijk, v, acc); } template<typename AccT> __hostdev__ const LeafNodeType* probeLeafAndCache(const CoordType& ijk, const AccT& acc) const { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOff(n)) return nullptr; const ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->probeLeafAndCache(ijk, acc); } template<typename AccT> __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(const CoordType& ijk, const AccT& acc) const { using NodeInfoT = typename AccT::NodeInfo; const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOff(n)) { return NodeInfoT{LEVEL, this->dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]}; } const ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->getNodeInfoAndCache(ijk, acc); } #endif // NANOVDB_NEW_ACCESSOR_METHODS template<typename RayT, typename AccT> __hostdev__ uint32_t getDimAndCache(const CoordType& ijk, const RayT& ray, const AccT& acc) const { if (DataType::mFlags & uint32_t(1u)) return this->dim(); // skip this node if the 1st bit is set //if (!ray.intersects( this->bbox() )) return 1<<TOTAL; const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOn(n)) { const ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->getDimAndCache(ijk, ray, acc); } return ChildNodeType::dim(); // tile value } template<typename OpT, typename AccT, typename... ArgsT> __hostdev__ auto //__hostdev__ decltype(OpT::get(util::declval<const InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...)) getAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) const { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOff(n)) return OpT::get(*this, n, args...); const ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->template getAndCache<OpT>(ijk, acc, args...); } template<typename OpT, typename AccT, typename... ArgsT> //__hostdev__ auto // occasionally fails with NVCC __hostdev__ decltype(OpT::set(util::declval<InternalNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...)) setAndCache(const CoordType& ijk, const AccT& acc, ArgsT&&... args) { const uint32_t n = CoordToOffset(ijk); if (DataType::mChildMask.isOff(n)) return OpT::set(*this, n, args...); ChildT* child = this->getChild(n); acc.insert(ijk, child); return child->template setAndCache<OpT>(ijk, acc, args...); } }; // InternalNode class // --------------------------> LeafData<T> <------------------------------------ /// @brief Stuct with all the member data of the LeafNode (useful during serialization of an openvdb LeafNode) /// /// @note No client code should (or can) interface with this struct so it can safely be ignored! template<typename ValueT, typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData { static_assert(sizeof(CoordT) == sizeof(Coord), "Mismatching sizeof"); static_assert(sizeof(MaskT<LOG2DIM>) == sizeof(Mask<LOG2DIM>), "Mismatching sizeof"); using ValueType = ValueT; using BuildType = ValueT; using FloatType = typename FloatTraits<ValueT>::FloatType; using ArrayType = ValueT; // type used for the internal mValue array static constexpr bool FIXED_SIZE = true; CoordT mBBoxMin; // 12B. uint8_t mBBoxDif[3]; // 3B. uint8_t mFlags; // 1B. bit0: skip render?, bit1: has bbox?, bit3: unused, bit4: has stats, bits5,6,7: bit-width for FpN MaskT<LOG2DIM> mValueMask; // LOG2DIM(3): 64B. ValueType mMinimum; // typically 4B ValueType mMaximum; // typically 4B FloatType mAverage; // typically 4B, average of all the active values in this node and its child nodes FloatType mStdDevi; // typically 4B, standard deviation of all the active values in this node and its child nodes alignas(32) ValueType mValues[1u << 3 * LOG2DIM]; /// @brief Return padding of this class in bytes, due to aliasing and 32B alignment /// /// @note The extra bytes are not necessarily at the end, but can come from aliasing of individual data members. __hostdev__ static constexpr uint32_t padding() { return sizeof(LeafData) - (12 + 3 + 1 + sizeof(MaskT<LOG2DIM>) + 2 * (sizeof(ValueT) + sizeof(FloatType)) + (1u << (3 * LOG2DIM)) * sizeof(ValueT)); } __hostdev__ static uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ static bool hasStats() { return true; } __hostdev__ ValueType getValue(uint32_t i) const { return mValues[i]; } __hostdev__ void setValueOnly(uint32_t offset, const ValueType& value) { mValues[offset] = value; } __hostdev__ void setValue(uint32_t offset, const ValueType& value) { mValueMask.setOn(offset); mValues[offset] = value; } __hostdev__ void setOn(uint32_t offset) { mValueMask.setOn(offset); } __hostdev__ ValueType getMin() const { return mMinimum; } __hostdev__ ValueType getMax() const { return mMaximum; } __hostdev__ FloatType getAvg() const { return mAverage; } __hostdev__ FloatType getDev() const { return mStdDevi; } __hostdev__ void setMin(const ValueType& v) { mMinimum = v; } __hostdev__ void setMax(const ValueType& v) { mMaximum = v; } __hostdev__ void setAvg(const FloatType& v) { mAverage = v; } __hostdev__ void setDev(const FloatType& v) { mStdDevi = v; } template<typename T> __hostdev__ void setOrigin(const T& ijk) { mBBoxMin = ijk; } __hostdev__ void fill(const ValueType& v) { for (auto *p = mValues, *q = p + 512; p != q; ++p) *p = v; } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<ValueT> // --------------------------> LeafFnBase <------------------------------------ /// @brief Base-class for quantized float leaf nodes template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafFnBase { static_assert(sizeof(CoordT) == sizeof(Coord), "Mismatching sizeof"); static_assert(sizeof(MaskT<LOG2DIM>) == sizeof(Mask<LOG2DIM>), "Mismatching sizeof"); using ValueType = float; using FloatType = float; CoordT mBBoxMin; // 12B. uint8_t mBBoxDif[3]; // 3B. uint8_t mFlags; // 1B. bit0: skip render?, bit1: has bbox?, bit3: unused, bit4: has stats, bits5,6,7: bit-width for FpN MaskT<LOG2DIM> mValueMask; // LOG2DIM(3): 64B. float mMinimum; // 4B - minimum of ALL values in this node float mQuantum; // = (max - min)/15 4B uint16_t mMin, mMax, mAvg, mDev; // quantized representations of statistics of active values // no padding since it's always 32B aligned __hostdev__ static uint64_t memUsage() { return sizeof(LeafFnBase); } __hostdev__ static bool hasStats() { return true; } /// @brief Return padding of this class in bytes, due to aliasing and 32B alignment /// /// @note The extra bytes are not necessarily at the end, but can come from aliasing of individual data members. __hostdev__ static constexpr uint32_t padding() { return sizeof(LeafFnBase) - (12 + 3 + 1 + sizeof(MaskT<LOG2DIM>) + 2 * 4 + 4 * 2); } __hostdev__ void init(float min, float max, uint8_t bitWidth) { mMinimum = min; mQuantum = (max - min) / float((1 << bitWidth) - 1); } __hostdev__ void setOn(uint32_t offset) { mValueMask.setOn(offset); } /// @brief return the quantized minimum of the active values in this node __hostdev__ float getMin() const { return mMin * mQuantum + mMinimum; } /// @brief return the quantized maximum of the active values in this node __hostdev__ float getMax() const { return mMax * mQuantum + mMinimum; } /// @brief return the quantized average of the active values in this node __hostdev__ float getAvg() const { return mAvg * mQuantum + mMinimum; } /// @brief return the quantized standard deviation of the active values in this node /// @note 0 <= StdDev <= max-min or 0 <= StdDev/(max-min) <= 1 __hostdev__ float getDev() const { return mDev * mQuantum; } /// @note min <= X <= max or 0 <= (X-min)/(min-max) <= 1 __hostdev__ void setMin(float min) { mMin = uint16_t((min - mMinimum) / mQuantum + 0.5f); } /// @note min <= X <= max or 0 <= (X-min)/(min-max) <= 1 __hostdev__ void setMax(float max) { mMax = uint16_t((max - mMinimum) / mQuantum + 0.5f); } /// @note min <= avg <= max or 0 <= (avg-min)/(min-max) <= 1 __hostdev__ void setAvg(float avg) { mAvg = uint16_t((avg - mMinimum) / mQuantum + 0.5f); } /// @note 0 <= StdDev <= max-min or 0 <= StdDev/(max-min) <= 1 __hostdev__ void setDev(float dev) { mDev = uint16_t(dev / mQuantum + 0.5f); } template<typename T> __hostdev__ void setOrigin(const T& ijk) { mBBoxMin = ijk; } }; // LeafFnBase // --------------------------> LeafData<Fp4> <------------------------------------ /// @brief Stuct with all the member data of the LeafNode (useful during serialization of an openvdb LeafNode) /// /// @note No client code should (or can) interface with this struct so it can safely be ignored! template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<Fp4, CoordT, MaskT, LOG2DIM> : public LeafFnBase<CoordT, MaskT, LOG2DIM> { using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>; using BuildType = Fp4; using ArrayType = uint8_t; // type used for the internal mValue array static constexpr bool FIXED_SIZE = true; alignas(32) uint8_t mCode[1u << (3 * LOG2DIM - 1)]; // LeafFnBase is 32B aligned and so is mCode __hostdev__ static constexpr uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ static constexpr uint32_t padding() { static_assert(BaseT::padding() == 0, "expected no padding in LeafFnBase"); return sizeof(LeafData) - sizeof(BaseT) - (1u << (3 * LOG2DIM - 1)); } __hostdev__ static constexpr uint8_t bitWidth() { return 4u; } __hostdev__ float getValue(uint32_t i) const { #if 0 const uint8_t c = mCode[i>>1]; return ( (i&1) ? c >> 4 : c & uint8_t(15) )*BaseT::mQuantum + BaseT::mMinimum; #else return ((mCode[i >> 1] >> ((i & 1) << 2)) & uint8_t(15)) * BaseT::mQuantum + BaseT::mMinimum; #endif } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<Fp4> // --------------------------> LeafBase<Fp8> <------------------------------------ template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<Fp8, CoordT, MaskT, LOG2DIM> : public LeafFnBase<CoordT, MaskT, LOG2DIM> { using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>; using BuildType = Fp8; using ArrayType = uint8_t; // type used for the internal mValue array static constexpr bool FIXED_SIZE = true; alignas(32) uint8_t mCode[1u << 3 * LOG2DIM]; __hostdev__ static constexpr int64_t memUsage() { return sizeof(LeafData); } __hostdev__ static constexpr uint32_t padding() { static_assert(BaseT::padding() == 0, "expected no padding in LeafFnBase"); return sizeof(LeafData) - sizeof(BaseT) - (1u << 3 * LOG2DIM); } __hostdev__ static constexpr uint8_t bitWidth() { return 8u; } __hostdev__ float getValue(uint32_t i) const { return mCode[i] * BaseT::mQuantum + BaseT::mMinimum; // code * (max-min)/255 + min } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<Fp8> // --------------------------> LeafData<Fp16> <------------------------------------ template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<Fp16, CoordT, MaskT, LOG2DIM> : public LeafFnBase<CoordT, MaskT, LOG2DIM> { using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>; using BuildType = Fp16; using ArrayType = uint16_t; // type used for the internal mValue array static constexpr bool FIXED_SIZE = true; alignas(32) uint16_t mCode[1u << 3 * LOG2DIM]; __hostdev__ static constexpr uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ static constexpr uint32_t padding() { static_assert(BaseT::padding() == 0, "expected no padding in LeafFnBase"); return sizeof(LeafData) - sizeof(BaseT) - 2 * (1u << 3 * LOG2DIM); } __hostdev__ static constexpr uint8_t bitWidth() { return 16u; } __hostdev__ float getValue(uint32_t i) const { return mCode[i] * BaseT::mQuantum + BaseT::mMinimum; // code * (max-min)/65535 + min } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<Fp16> // --------------------------> LeafData<FpN> <------------------------------------ template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<FpN, CoordT, MaskT, LOG2DIM> : public LeafFnBase<CoordT, MaskT, LOG2DIM> { // this class has no additional data members, however every instance is immediately followed by // bitWidth*64 bytes. Since its base class is 32B aligned so are the bitWidth*64 bytes using BaseT = LeafFnBase<CoordT, MaskT, LOG2DIM>; using BuildType = FpN; static constexpr bool FIXED_SIZE = false; __hostdev__ static constexpr uint32_t padding() { static_assert(BaseT::padding() == 0, "expected no padding in LeafFnBase"); return 0; } __hostdev__ uint8_t bitWidth() const { return 1 << (BaseT::mFlags >> 5); } // 4,8,16,32 = 2^(2,3,4,5) __hostdev__ size_t memUsage() const { return sizeof(*this) + this->bitWidth() * 64; } __hostdev__ static size_t memUsage(uint32_t bitWidth) { return 96u + bitWidth * 64; } __hostdev__ float getValue(uint32_t i) const { #ifdef NANOVDB_FPN_BRANCHLESS // faster const int b = BaseT::mFlags >> 5; // b = 0, 1, 2, 3, 4 corresponding to 1, 2, 4, 8, 16 bits #if 0 // use LUT uint16_t code = reinterpret_cast<const uint16_t*>(this + 1)[i >> (4 - b)]; const static uint8_t shift[5] = {15, 7, 3, 1, 0}; const static uint16_t mask[5] = {1, 3, 15, 255, 65535}; code >>= (i & shift[b]) << b; code &= mask[b]; #else // no LUT uint32_t code = reinterpret_cast<const uint32_t*>(this + 1)[i >> (5 - b)]; code >>= (i & ((32 >> b) - 1)) << b; code &= (1 << (1 << b)) - 1; #endif #else // use branched version (slow) float code; auto* values = reinterpret_cast<const uint8_t*>(this + 1); switch (BaseT::mFlags >> 5) { case 0u: // 1 bit float code = float((values[i >> 3] >> (i & 7)) & uint8_t(1)); break; case 1u: // 2 bits float code = float((values[i >> 2] >> ((i & 3) << 1)) & uint8_t(3)); break; case 2u: // 4 bits float code = float((values[i >> 1] >> ((i & 1) << 2)) & uint8_t(15)); break; case 3u: // 8 bits float code = float(values[i]); break; default: // 16 bits float code = float(reinterpret_cast<const uint16_t*>(values)[i]); } #endif return float(code) * BaseT::mQuantum + BaseT::mMinimum; // code * (max-min)/UNITS + min } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<FpN> // --------------------------> LeafData<bool> <------------------------------------ // Partial template specialization of LeafData with bool template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<bool, CoordT, MaskT, LOG2DIM> { static_assert(sizeof(CoordT) == sizeof(Coord), "Mismatching sizeof"); static_assert(sizeof(MaskT<LOG2DIM>) == sizeof(Mask<LOG2DIM>), "Mismatching sizeof"); using ValueType = bool; using BuildType = bool; using FloatType = bool; // dummy value type using ArrayType = MaskT<LOG2DIM>; // type used for the internal mValue array static constexpr bool FIXED_SIZE = true; CoordT mBBoxMin; // 12B. uint8_t mBBoxDif[3]; // 3B. uint8_t mFlags; // 1B. bit0: skip render?, bit1: has bbox?, bit3: unused, bit4: has stats, bits5,6,7: bit-width for FpN MaskT<LOG2DIM> mValueMask; // LOG2DIM(3): 64B. MaskT<LOG2DIM> mValues; // LOG2DIM(3): 64B. uint64_t mPadding[2]; // 16B padding to 32B alignment __hostdev__ static constexpr uint32_t padding() { return sizeof(LeafData) - 12u - 3u - 1u - 2 * sizeof(MaskT<LOG2DIM>) - 16u; } __hostdev__ static uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ static bool hasStats() { return false; } __hostdev__ bool getValue(uint32_t i) const { return mValues.isOn(i); } __hostdev__ bool getMin() const { return false; } // dummy __hostdev__ bool getMax() const { return false; } // dummy __hostdev__ bool getAvg() const { return false; } // dummy __hostdev__ bool getDev() const { return false; } // dummy __hostdev__ void setValue(uint32_t offset, bool v) { mValueMask.setOn(offset); mValues.set(offset, v); } __hostdev__ void setOn(uint32_t offset) { mValueMask.setOn(offset); } __hostdev__ void setMin(const bool&) {} // no-op __hostdev__ void setMax(const bool&) {} // no-op __hostdev__ void setAvg(const bool&) {} // no-op __hostdev__ void setDev(const bool&) {} // no-op template<typename T> __hostdev__ void setOrigin(const T& ijk) { mBBoxMin = ijk; } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<bool> // --------------------------> LeafData<ValueMask> <------------------------------------ // Partial template specialization of LeafData with ValueMask template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<ValueMask, CoordT, MaskT, LOG2DIM> { static_assert(sizeof(CoordT) == sizeof(Coord), "Mismatching sizeof"); static_assert(sizeof(MaskT<LOG2DIM>) == sizeof(Mask<LOG2DIM>), "Mismatching sizeof"); using ValueType = bool; using BuildType = ValueMask; using FloatType = bool; // dummy value type using ArrayType = void; // type used for the internal mValue array - void means missing static constexpr bool FIXED_SIZE = true; CoordT mBBoxMin; // 12B. uint8_t mBBoxDif[3]; // 3B. uint8_t mFlags; // 1B. bit0: skip render?, bit1: has bbox?, bit3: unused, bit4: has stats, bits5,6,7: bit-width for FpN MaskT<LOG2DIM> mValueMask; // LOG2DIM(3): 64B. uint64_t mPadding[2]; // 16B padding to 32B alignment __hostdev__ static uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ static bool hasStats() { return false; } __hostdev__ static constexpr uint32_t padding() { return sizeof(LeafData) - (12u + 3u + 1u + sizeof(MaskT<LOG2DIM>) + 2 * 8u); } __hostdev__ bool getValue(uint32_t i) const { return mValueMask.isOn(i); } __hostdev__ bool getMin() const { return false; } // dummy __hostdev__ bool getMax() const { return false; } // dummy __hostdev__ bool getAvg() const { return false; } // dummy __hostdev__ bool getDev() const { return false; } // dummy __hostdev__ void setValue(uint32_t offset, bool) { mValueMask.setOn(offset); } __hostdev__ void setOn(uint32_t offset) { mValueMask.setOn(offset); } __hostdev__ void setMin(const ValueType&) {} // no-op __hostdev__ void setMax(const ValueType&) {} // no-op __hostdev__ void setAvg(const FloatType&) {} // no-op __hostdev__ void setDev(const FloatType&) {} // no-op template<typename T> __hostdev__ void setOrigin(const T& ijk) { mBBoxMin = ijk; } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<ValueMask> // --------------------------> LeafIndexBase <------------------------------------ // Partial template specialization of LeafData with ValueIndex template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafIndexBase { static_assert(sizeof(CoordT) == sizeof(Coord), "Mismatching sizeof"); static_assert(sizeof(MaskT<LOG2DIM>) == sizeof(Mask<LOG2DIM>), "Mismatching sizeof"); using ValueType = uint64_t; using FloatType = uint64_t; using ArrayType = void; // type used for the internal mValue array - void means missing static constexpr bool FIXED_SIZE = true; CoordT mBBoxMin; // 12B. uint8_t mBBoxDif[3]; // 3B. uint8_t mFlags; // 1B. bit0: skip render?, bit1: has bbox?, bit3: unused, bit4: has stats, bits5,6,7: bit-width for FpN MaskT<LOG2DIM> mValueMask; // LOG2DIM(3): 64B. uint64_t mOffset, mPrefixSum; // 8B offset to first value in this leaf node and 9-bit prefix sum __hostdev__ static constexpr uint32_t padding() { return sizeof(LeafIndexBase) - (12u + 3u + 1u + sizeof(MaskT<LOG2DIM>) + 2 * 8u); } __hostdev__ static uint64_t memUsage() { return sizeof(LeafIndexBase); } __hostdev__ bool hasStats() const { return mFlags & (uint8_t(1) << 4); } // return the offset to the first value indexed by this leaf node __hostdev__ const uint64_t& firstOffset() const { return mOffset; } __hostdev__ void setMin(const ValueType&) {} // no-op __hostdev__ void setMax(const ValueType&) {} // no-op __hostdev__ void setAvg(const FloatType&) {} // no-op __hostdev__ void setDev(const FloatType&) {} // no-op __hostdev__ void setOn(uint32_t offset) { mValueMask.setOn(offset); } template<typename T> __hostdev__ void setOrigin(const T& ijk) { mBBoxMin = ijk; } protected: /// @brief This class should be used as an abstract class and only constructed or deleted via child classes LeafIndexBase() = default; LeafIndexBase(const LeafIndexBase&) = default; LeafIndexBase& operator=(const LeafIndexBase&) = default; ~LeafIndexBase() = default; }; // LeafIndexBase // --------------------------> LeafData<ValueIndex> <------------------------------------ // Partial template specialization of LeafData with ValueIndex template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<ValueIndex, CoordT, MaskT, LOG2DIM> : public LeafIndexBase<CoordT, MaskT, LOG2DIM> { using BaseT = LeafIndexBase<CoordT, MaskT, LOG2DIM>; using BuildType = ValueIndex; // return the total number of values indexed by this leaf node, excluding the optional 4 stats __hostdev__ static uint32_t valueCount() { return uint32_t(512); } // 8^3 = 2^9 // return the offset to the last value indexed by this leaf node (disregarding optional stats) __hostdev__ uint64_t lastOffset() const { return BaseT::mOffset + 511u; } // 2^9 - 1 // if stats are available, they are always placed after the last voxel value in this leaf node __hostdev__ uint64_t getMin() const { return this->hasStats() ? BaseT::mOffset + 512u : 0u; } __hostdev__ uint64_t getMax() const { return this->hasStats() ? BaseT::mOffset + 513u : 0u; } __hostdev__ uint64_t getAvg() const { return this->hasStats() ? BaseT::mOffset + 514u : 0u; } __hostdev__ uint64_t getDev() const { return this->hasStats() ? BaseT::mOffset + 515u : 0u; } __hostdev__ uint64_t getValue(uint32_t i) const { return BaseT::mOffset + i; } // dense leaf node with active and inactive voxels }; // LeafData<ValueIndex> // --------------------------> LeafData<ValueOnIndex> <------------------------------------ template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<ValueOnIndex, CoordT, MaskT, LOG2DIM> : public LeafIndexBase<CoordT, MaskT, LOG2DIM> { using BaseT = LeafIndexBase<CoordT, MaskT, LOG2DIM>; using BuildType = ValueOnIndex; __hostdev__ uint32_t valueCount() const { return util::countOn(BaseT::mValueMask.words()[7]) + (BaseT::mPrefixSum >> 54u & 511u); // last 9 bits of mPrefixSum do not account for the last word in mValueMask } __hostdev__ uint64_t lastOffset() const { return BaseT::mOffset + this->valueCount() - 1u; } __hostdev__ uint64_t getMin() const { return this->hasStats() ? this->lastOffset() + 1u : 0u; } __hostdev__ uint64_t getMax() const { return this->hasStats() ? this->lastOffset() + 2u : 0u; } __hostdev__ uint64_t getAvg() const { return this->hasStats() ? this->lastOffset() + 3u : 0u; } __hostdev__ uint64_t getDev() const { return this->hasStats() ? this->lastOffset() + 4u : 0u; } __hostdev__ uint64_t getValue(uint32_t i) const { //return mValueMask.isOn(i) ? mOffset + mValueMask.countOn(i) : 0u;// for debugging uint32_t n = i >> 6; const uint64_t w = BaseT::mValueMask.words()[n], mask = uint64_t(1) << (i & 63u); if (!(w & mask)) return uint64_t(0); // if i'th value is inactive return offset to background value uint64_t sum = BaseT::mOffset + util::countOn(w & (mask - 1u)); if (n--) sum += BaseT::mPrefixSum >> (9u * n) & 511u; return sum; } }; // LeafData<ValueOnIndex> // --------------------------> LeafData<ValueIndexMask> <------------------------------------ template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<ValueIndexMask, CoordT, MaskT, LOG2DIM> : public LeafData<ValueIndex, CoordT, MaskT, LOG2DIM> { using BuildType = ValueIndexMask; MaskT<LOG2DIM> mMask; __hostdev__ static uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ bool isMaskOn(uint32_t offset) const { return mMask.isOn(offset); } __hostdev__ void setMask(uint32_t offset, bool v) { mMask.set(offset, v); } }; // LeafData<ValueIndexMask> template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<ValueOnIndexMask, CoordT, MaskT, LOG2DIM> : public LeafData<ValueOnIndex, CoordT, MaskT, LOG2DIM> { using BuildType = ValueOnIndexMask; MaskT<LOG2DIM> mMask; __hostdev__ static uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ bool isMaskOn(uint32_t offset) const { return mMask.isOn(offset); } __hostdev__ void setMask(uint32_t offset, bool v) { mMask.set(offset, v); } }; // LeafData<ValueOnIndexMask> // --------------------------> LeafData<Point> <------------------------------------ template<typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> struct NANOVDB_ALIGN(NANOVDB_DATA_ALIGNMENT) LeafData<Point, CoordT, MaskT, LOG2DIM> { static_assert(sizeof(CoordT) == sizeof(Coord), "Mismatching sizeof"); static_assert(sizeof(MaskT<LOG2DIM>) == sizeof(Mask<LOG2DIM>), "Mismatching sizeof"); using ValueType = uint64_t; using BuildType = Point; using FloatType = typename FloatTraits<ValueType>::FloatType; using ArrayType = uint16_t; // type used for the internal mValue array static constexpr bool FIXED_SIZE = true; CoordT mBBoxMin; // 12B. uint8_t mBBoxDif[3]; // 3B. uint8_t mFlags; // 1B. bit0: skip render?, bit1: has bbox?, bit3: unused, bit4: has stats, bits5,6,7: bit-width for FpN MaskT<LOG2DIM> mValueMask; // LOG2DIM(3): 64B. uint64_t mOffset; // 8B uint64_t mPointCount; // 8B alignas(32) uint16_t mValues[1u << 3 * LOG2DIM]; // 1KB // no padding /// @brief Return padding of this class in bytes, due to aliasing and 32B alignment /// /// @note The extra bytes are not necessarily at the end, but can come from aliasing of individual data members. __hostdev__ static constexpr uint32_t padding() { return sizeof(LeafData) - (12u + 3u + 1u + sizeof(MaskT<LOG2DIM>) + 2 * 8u + (1u << 3 * LOG2DIM) * 2u); } __hostdev__ static uint64_t memUsage() { return sizeof(LeafData); } __hostdev__ uint64_t offset() const { return mOffset; } __hostdev__ uint64_t pointCount() const { return mPointCount; } __hostdev__ uint64_t first(uint32_t i) const { return i ? uint64_t(mValues[i - 1u]) + mOffset : mOffset; } __hostdev__ uint64_t last(uint32_t i) const { return uint64_t(mValues[i]) + mOffset; } __hostdev__ uint64_t getValue(uint32_t i) const { return uint64_t(mValues[i]); } __hostdev__ void setValueOnly(uint32_t offset, uint16_t value) { mValues[offset] = value; } __hostdev__ void setValue(uint32_t offset, uint16_t value) { mValueMask.setOn(offset); mValues[offset] = value; } __hostdev__ void setOn(uint32_t offset) { mValueMask.setOn(offset); } __hostdev__ ValueType getMin() const { return mOffset; } __hostdev__ ValueType getMax() const { return mPointCount; } __hostdev__ FloatType getAvg() const { return 0.0f; } __hostdev__ FloatType getDev() const { return 0.0f; } __hostdev__ void setMin(const ValueType&) {} __hostdev__ void setMax(const ValueType&) {} __hostdev__ void setAvg(const FloatType&) {} __hostdev__ void setDev(const FloatType&) {} template<typename T> __hostdev__ void setOrigin(const T& ijk) { mBBoxMin = ijk; } /// @brief This class cannot be constructed or deleted LeafData() = delete; LeafData(const LeafData&) = delete; LeafData& operator=(const LeafData&) = delete; ~LeafData() = delete; }; // LeafData<Point> // --------------------------> LeafNode<T> <------------------------------------ /// @brief Leaf nodes of the VDB tree. (defaults to 8x8x8 = 512 voxels) template<typename BuildT, typename CoordT = Coord, template<uint32_t> class MaskT = Mask, uint32_t Log2Dim = 3> class LeafNode : public LeafData<BuildT, CoordT, MaskT, Log2Dim> { public: struct ChildNodeType { static constexpr uint32_t TOTAL = 0; static constexpr uint32_t DIM = 1; __hostdev__ static uint32_t dim() { return 1u; } }; // Voxel using LeafNodeType = LeafNode<BuildT, CoordT, MaskT, Log2Dim>; using DataType = LeafData<BuildT, CoordT, MaskT, Log2Dim>; using ValueType = typename DataType::ValueType; using FloatType = typename DataType::FloatType; using BuildType = typename DataType::BuildType; using CoordType = CoordT; static constexpr bool FIXED_SIZE = DataType::FIXED_SIZE; template<uint32_t LOG2> using MaskType = MaskT<LOG2>; template<bool ON> using MaskIterT = typename Mask<Log2Dim>::template Iterator<ON>; /// @brief Visits all active values in a leaf node class ValueOnIterator : public MaskIterT<true> { using BaseT = MaskIterT<true>; const LeafNode* mParent; public: __hostdev__ ValueOnIterator() : BaseT() , mParent(nullptr) { } __hostdev__ ValueOnIterator(const LeafNode* parent) : BaseT(parent->data()->mValueMask.beginOn()) , mParent(parent) { } ValueOnIterator& operator=(const ValueOnIterator&) = default; __hostdev__ ValueType operator*() const { NANOVDB_ASSERT(*this); return mParent->getValue(BaseT::pos()); } __hostdev__ CoordT getCoord() const { NANOVDB_ASSERT(*this); return mParent->offsetToGlobalCoord(BaseT::pos()); } }; // Member class ValueOnIterator __hostdev__ ValueOnIterator beginValueOn() const { return ValueOnIterator(this); } __hostdev__ ValueOnIterator cbeginValueOn() const { return ValueOnIterator(this); } /// @brief Visits all inactive values in a leaf node class ValueOffIterator : public MaskIterT<false> { using BaseT = MaskIterT<false>; const LeafNode* mParent; public: __hostdev__ ValueOffIterator() : BaseT() , mParent(nullptr) { } __hostdev__ ValueOffIterator(const LeafNode* parent) : BaseT(parent->data()->mValueMask.beginOff()) , mParent(parent) { } ValueOffIterator& operator=(const ValueOffIterator&) = default; __hostdev__ ValueType operator*() const { NANOVDB_ASSERT(*this); return mParent->getValue(BaseT::pos()); } __hostdev__ CoordT getCoord() const { NANOVDB_ASSERT(*this); return mParent->offsetToGlobalCoord(BaseT::pos()); } }; // Member class ValueOffIterator __hostdev__ ValueOffIterator beginValueOff() const { return ValueOffIterator(this); } __hostdev__ ValueOffIterator cbeginValueOff() const { return ValueOffIterator(this); } /// @brief Visits all values in a leaf node, i.e. both active and inactive values class ValueIterator { const LeafNode* mParent; uint32_t mPos; public: __hostdev__ ValueIterator() : mParent(nullptr) , mPos(1u << 3 * Log2Dim) { } __hostdev__ ValueIterator(const LeafNode* parent) : mParent(parent) , mPos(0) { NANOVDB_ASSERT(parent); } ValueIterator& operator=(const ValueIterator&) = default; __hostdev__ ValueType operator*() const { NANOVDB_ASSERT(*this); return mParent->getValue(mPos); } __hostdev__ CoordT getCoord() const { NANOVDB_ASSERT(*this); return mParent->offsetToGlobalCoord(mPos); } __hostdev__ bool isActive() const { NANOVDB_ASSERT(*this); return mParent->isActive(mPos); } __hostdev__ operator bool() const { return mPos < (1u << 3 * Log2Dim); } __hostdev__ ValueIterator& operator++() { ++mPos; return *this; } __hostdev__ ValueIterator operator++(int) { auto tmp = *this; ++(*this); return tmp; } }; // Member class ValueIterator __hostdev__ ValueIterator beginValue() const { return ValueIterator(this); } __hostdev__ ValueIterator cbeginValueAll() const { return ValueIterator(this); } static_assert(util::is_same<ValueType, typename BuildToValueMap<BuildType>::Type>::value, "Mismatching BuildType"); static constexpr uint32_t LOG2DIM = Log2Dim; static constexpr uint32_t TOTAL = LOG2DIM; // needed by parent nodes static constexpr uint32_t DIM = 1u << TOTAL; // number of voxels along each axis of this node static constexpr uint32_t SIZE = 1u << 3 * LOG2DIM; // total number of voxels represented by this node static constexpr uint32_t MASK = (1u << LOG2DIM) - 1u; // mask for bit operations static constexpr uint32_t LEVEL = 0; // level 0 = leaf static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL); // total voxel count represented by this node __hostdev__ DataType* data() { return reinterpret_cast<DataType*>(this); } __hostdev__ const DataType* data() const { return reinterpret_cast<const DataType*>(this); } /// @brief Return a const reference to the bit mask of active voxels in this leaf node __hostdev__ const MaskType<LOG2DIM>& valueMask() const { return DataType::mValueMask; } __hostdev__ const MaskType<LOG2DIM>& getValueMask() const { return DataType::mValueMask; } /// @brief Return a const reference to the minimum active value encoded in this leaf node __hostdev__ ValueType minimum() const { return DataType::getMin(); } /// @brief Return a const reference to the maximum active value encoded in this leaf node __hostdev__ ValueType maximum() const { return DataType::getMax(); } /// @brief Return a const reference to the average of all the active values encoded in this leaf node __hostdev__ FloatType average() const { return DataType::getAvg(); } /// @brief Return the variance of all the active values encoded in this leaf node __hostdev__ FloatType variance() const { return Pow2(DataType::getDev()); } /// @brief Return a const reference to the standard deviation of all the active values encoded in this leaf node __hostdev__ FloatType stdDeviation() const { return DataType::getDev(); } __hostdev__ uint8_t flags() const { return DataType::mFlags; } /// @brief Return the origin in index space of this leaf node __hostdev__ CoordT origin() const { return DataType::mBBoxMin & ~MASK; } /// @brief Compute the local coordinates from a linear offset /// @param n Linear offset into this nodes dense table /// @return Local (vs global) 3D coordinates __hostdev__ static CoordT OffsetToLocalCoord(uint32_t n) { NANOVDB_ASSERT(n < SIZE); const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1); return CoordT(n >> 2 * LOG2DIM, m >> LOG2DIM, m & MASK); } /// @brief Converts (in place) a local index coordinate to a global index coordinate __hostdev__ void localToGlobalCoord(Coord& ijk) const { ijk += this->origin(); } __hostdev__ CoordT offsetToGlobalCoord(uint32_t n) const { return OffsetToLocalCoord(n) + this->origin(); } /// @brief Return the dimension, in index space, of this leaf node (typically 8 as for openvdb leaf nodes!) __hostdev__ static uint32_t dim() { return 1u << LOG2DIM; } /// @brief Return the bounding box in index space of active values in this leaf node __hostdev__ math::BBox<CoordT> bbox() const { math::BBox<CoordT> bbox(DataType::mBBoxMin, DataType::mBBoxMin); if (this->hasBBox()) { bbox.max()[0] += DataType::mBBoxDif[0]; bbox.max()[1] += DataType::mBBoxDif[1]; bbox.max()[2] += DataType::mBBoxDif[2]; } else { // very rare case bbox = math::BBox<CoordT>(); // invalid } return bbox; } /// @brief Return the total number of voxels (e.g. values) encoded in this leaf node __hostdev__ static uint32_t voxelCount() { return 1u << (3 * LOG2DIM); } __hostdev__ static uint32_t padding() { return DataType::padding(); } /// @brief return memory usage in bytes for the leaf node __hostdev__ uint64_t memUsage() const { return DataType::memUsage(); } /// @brief This class cannot be constructed or deleted LeafNode() = delete; LeafNode(const LeafNode&) = delete; LeafNode& operator=(const LeafNode&) = delete; ~LeafNode() = delete; /// @brief Return the voxel value at the given offset. __hostdev__ ValueType getValue(uint32_t offset) const { return DataType::getValue(offset); } /// @brief Return the voxel value at the given coordinate. __hostdev__ ValueType getValue(const CoordT& ijk) const { return DataType::getValue(CoordToOffset(ijk)); } /// @brief Return the first value in this leaf node. __hostdev__ ValueType getFirstValue() const { return this->getValue(0); } /// @brief Return the last value in this leaf node. __hostdev__ ValueType getLastValue() const { return this->getValue(SIZE - 1); } /// @brief Sets the value at the specified location and activate its state. /// /// @note This is safe since it does not change the topology of the tree (unlike setValue methods on the other nodes) __hostdev__ void setValue(const CoordT& ijk, const ValueType& v) { DataType::setValue(CoordToOffset(ijk), v); } /// @brief Sets the value at the specified location but leaves its state unchanged. /// /// @note This is safe since it does not change the topology of the tree (unlike setValue methods on the other nodes) __hostdev__ void setValueOnly(uint32_t offset, const ValueType& v) { DataType::setValueOnly(offset, v); } __hostdev__ void setValueOnly(const CoordT& ijk, const ValueType& v) { DataType::setValueOnly(CoordToOffset(ijk), v); } /// @brief Return @c true if the voxel value at the given coordinate is active. __hostdev__ bool isActive(const CoordT& ijk) const { return DataType::mValueMask.isOn(CoordToOffset(ijk)); } __hostdev__ bool isActive(uint32_t n) const { return DataType::mValueMask.isOn(n); } /// @brief Return @c true if any of the voxel value are active in this leaf node. __hostdev__ bool isActive() const { //NANOVDB_ASSERT( bool(DataType::mFlags & uint8_t(2)) != DataType::mValueMask.isOff() ); //return DataType::mFlags & uint8_t(2); return !DataType::mValueMask.isOff(); } __hostdev__ bool hasBBox() const { return DataType::mFlags & uint8_t(2); } /// @brief Return @c true if the voxel value at the given coordinate is active and updates @c v with the value. __hostdev__ bool probeValue(const CoordT& ijk, ValueType& v) const { const uint32_t n = CoordToOffset(ijk); v = DataType::getValue(n); return DataType::mValueMask.isOn(n); } __hostdev__ const LeafNode* probeLeaf(const CoordT&) const { return this; } /// @brief Return the linear offset corresponding to the given coordinate __hostdev__ static uint32_t CoordToOffset(const CoordT& ijk) { return ((ijk[0] & MASK) << (2 * LOG2DIM)) | ((ijk[1] & MASK) << LOG2DIM) | (ijk[2] & MASK); } /// @brief Updates the local bounding box of active voxels in this node. Return true if bbox was updated. /// /// @warning It assumes that the origin and value mask have already been set. /// /// @details This method is based on few (intrinsic) bit operations and hence is relatively fast. /// However, it should only only be called if either the value mask has changed or if the /// active bounding box is still undefined. e.g. during construction of this node. __hostdev__ bool updateBBox(); template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { return OpT::get(*this, CoordToOffset(ijk), args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto get(const uint32_t n, ArgsT&&... args) const { return OpT::get(*this, n, args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) { return OpT::set(*this, CoordToOffset(ijk), args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto set(const uint32_t n, ArgsT&&... args) { return OpT::set(*this, n, args...); } private: static_assert(sizeof(DataType) % NANOVDB_DATA_ALIGNMENT == 0, "sizeof(LeafData) is misaligned"); template<typename, int, int, int> friend class ReadAccessor; template<typename> friend class RootNode; template<typename, uint32_t> friend class InternalNode; #ifndef NANOVDB_NEW_ACCESSOR_METHODS /// @brief Private method to return a voxel value and update a (dummy) ReadAccessor template<typename AccT> __hostdev__ ValueType getValueAndCache(const CoordT& ijk, const AccT&) const { return this->getValue(ijk); } /// @brief Return the node information. template<typename AccT> __hostdev__ typename AccT::NodeInfo getNodeInfoAndCache(const CoordType& /*ijk*/, const AccT& /*acc*/) const { using NodeInfoT = typename AccT::NodeInfo; return NodeInfoT{LEVEL, this->dim(), this->minimum(), this->maximum(), this->average(), this->stdDeviation(), this->bbox()[0], this->bbox()[1]}; } template<typename AccT> __hostdev__ bool isActiveAndCache(const CoordT& ijk, const AccT&) const { return this->isActive(ijk); } template<typename AccT> __hostdev__ bool probeValueAndCache(const CoordT& ijk, ValueType& v, const AccT&) const { return this->probeValue(ijk, v); } template<typename AccT> __hostdev__ const LeafNode* probeLeafAndCache(const CoordT&, const AccT&) const { return this; } #endif template<typename RayT, typename AccT> __hostdev__ uint32_t getDimAndCache(const CoordT&, const RayT& /*ray*/, const AccT&) const { if (DataType::mFlags & uint8_t(1u)) return this->dim(); // skip this node if the 1st bit is set //if (!ray.intersects( this->bbox() )) return 1 << LOG2DIM; return ChildNodeType::dim(); } template<typename OpT, typename AccT, typename... ArgsT> __hostdev__ auto //__hostdev__ decltype(OpT::get(util::declval<const LeafNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...)) getAndCache(const CoordType& ijk, const AccT&, ArgsT&&... args) const { return OpT::get(*this, CoordToOffset(ijk), args...); } template<typename OpT, typename AccT, typename... ArgsT> //__hostdev__ auto // occasionally fails with NVCC __hostdev__ decltype(OpT::set(util::declval<LeafNode&>(), util::declval<uint32_t>(), util::declval<ArgsT>()...)) setAndCache(const CoordType& ijk, const AccT&, ArgsT&&... args) { return OpT::set(*this, CoordToOffset(ijk), args...); } }; // LeafNode class // --------------------------> LeafNode<T>::updateBBox <------------------------------------ template<typename ValueT, typename CoordT, template<uint32_t> class MaskT, uint32_t LOG2DIM> __hostdev__ inline bool LeafNode<ValueT, CoordT, MaskT, LOG2DIM>::updateBBox() { static_assert(LOG2DIM == 3, "LeafNode::updateBBox: only supports LOGDIM = 3!"); if (DataType::mValueMask.isOff()) { DataType::mFlags &= ~uint8_t(2); // set 2nd bit off, which indicates that this nodes has no bbox return false; } auto update = [&](uint32_t min, uint32_t max, int axis) { NANOVDB_ASSERT(min <= max && max < 8); DataType::mBBoxMin[axis] = (DataType::mBBoxMin[axis] & ~MASK) + int(min); DataType::mBBoxDif[axis] = uint8_t(max - min); }; uint64_t *w = DataType::mValueMask.words(), word64 = *w; uint32_t Xmin = word64 ? 0u : 8u, Xmax = Xmin; for (int i = 1; i < 8; ++i) { // last loop over 8 64 bit words if (w[i]) { // skip if word has no set bits word64 |= w[i]; // union 8 x 64 bits words into one 64 bit word if (Xmin == 8) Xmin = i; // only set once Xmax = i; } } NANOVDB_ASSERT(word64); update(Xmin, Xmax, 0); update(util::findLowestOn(word64) >> 3, util::findHighestOn(word64) >> 3, 1); const uint32_t *p = reinterpret_cast<const uint32_t*>(&word64), word32 = p[0] | p[1]; const uint16_t *q = reinterpret_cast<const uint16_t*>(&word32), word16 = q[0] | q[1]; const uint8_t *b = reinterpret_cast<const uint8_t*>(&word16), byte = b[0] | b[1]; NANOVDB_ASSERT(byte); update(util::findLowestOn(static_cast<uint32_t>(byte)), util::findHighestOn(static_cast<uint32_t>(byte)), 2); DataType::mFlags |= uint8_t(2); // set 2nd bit on, which indicates that this nodes has a bbox return true; } // LeafNode::updateBBox // --------------------------> Template specializations and traits <------------------------------------ /// @brief Template specializations to the default configuration used in OpenVDB: /// Root -> 32^3 -> 16^3 -> 8^3 template<typename BuildT> using NanoLeaf = LeafNode<BuildT, Coord, Mask, 3>; template<typename BuildT> using NanoLower = InternalNode<NanoLeaf<BuildT>, 4>; template<typename BuildT> using NanoUpper = InternalNode<NanoLower<BuildT>, 5>; template<typename BuildT> using NanoRoot = RootNode<NanoUpper<BuildT>>; template<typename BuildT> using NanoTree = Tree<NanoRoot<BuildT>>; template<typename BuildT> using NanoGrid = Grid<NanoTree<BuildT>>; /// @brief Trait to map from LEVEL to node type template<typename BuildT, int LEVEL> struct NanoNode; // Partial template specialization of above Node struct template<typename BuildT> struct NanoNode<BuildT, 0> { using Type = NanoLeaf<BuildT>; using type = NanoLeaf<BuildT>; }; template<typename BuildT> struct NanoNode<BuildT, 1> { using Type = NanoLower<BuildT>; using type = NanoLower<BuildT>; }; template<typename BuildT> struct NanoNode<BuildT, 2> { using Type = NanoUpper<BuildT>; using type = NanoUpper<BuildT>; }; template<typename BuildT> struct NanoNode<BuildT, 3> { using Type = NanoRoot<BuildT>; using type = NanoRoot<BuildT>; }; using FloatTree = NanoTree<float>; using Fp4Tree = NanoTree<Fp4>; using Fp8Tree = NanoTree<Fp8>; using Fp16Tree = NanoTree<Fp16>; using FpNTree = NanoTree<FpN>; using DoubleTree = NanoTree<double>; using Int32Tree = NanoTree<int32_t>; using UInt32Tree = NanoTree<uint32_t>; using Int64Tree = NanoTree<int64_t>; using Vec3fTree = NanoTree<Vec3f>; using Vec3dTree = NanoTree<Vec3d>; using Vec4fTree = NanoTree<Vec4f>; using Vec4dTree = NanoTree<Vec4d>; using Vec3ITree = NanoTree<Vec3i>; using MaskTree = NanoTree<ValueMask>; using BoolTree = NanoTree<bool>; using IndexTree = NanoTree<ValueIndex>; using OnIndexTree = NanoTree<ValueOnIndex>; using IndexMaskTree = NanoTree<ValueIndexMask>; using OnIndexMaskTree = NanoTree<ValueOnIndexMask>; using FloatGrid = Grid<FloatTree>; using Fp4Grid = Grid<Fp4Tree>; using Fp8Grid = Grid<Fp8Tree>; using Fp16Grid = Grid<Fp16Tree>; using FpNGrid = Grid<FpNTree>; using DoubleGrid = Grid<DoubleTree>; using Int32Grid = Grid<Int32Tree>; using UInt32Grid = Grid<UInt32Tree>; using Int64Grid = Grid<Int64Tree>; using Vec3fGrid = Grid<Vec3fTree>; using Vec3dGrid = Grid<Vec3dTree>; using Vec4fGrid = Grid<Vec4fTree>; using Vec4dGrid = Grid<Vec4dTree>; using Vec3IGrid = Grid<Vec3ITree>; using MaskGrid = Grid<MaskTree>; using BoolGrid = Grid<BoolTree>; using PointGrid = Grid<Point>; using IndexGrid = Grid<IndexTree>; using OnIndexGrid = Grid<OnIndexTree>; using IndexMaskGrid = Grid<IndexMaskTree>; using OnIndexMaskGrid = Grid<OnIndexMaskTree>; // --------------------------> callNanoGrid <------------------------------------ /** * @brief Below is an example of the struct used for generic programming with callNanoGrid * @details For an example see "struct Crc32TailOld" in nanovdb/tools/GridChecksum.h or * "struct IsNanoGridValid" in nanovdb/tools/GridValidator.h * @code * struct OpT { // define these two static functions with non-const GridData * template <typename BuildT> * static auto known( GridData *gridData, args...); * static auto unknown( GridData *gridData, args...); * // or alternatively these two static functions with const GridData * template <typename BuildT> * static auto known(const GridData *gridData, args...); * static auto unknown(const GridData *gridData, args...); * }; * @endcode * * @brief Here is an example of how to use callNanoGrid in client code * @code * return callNanoGrid<OpT>(gridData, args...); * @endcode */ /// @brief Use this function, which depends a pointer to GridData, to call /// other functions that depend on a NanoGrid of a known ValueType. /// @details This function allows for generic programming by converting GridData /// to a NanoGrid of the type encoded in GridData::mGridType. template<typename OpT, typename GridDataT, typename... ArgsT> auto callNanoGrid(GridDataT *gridData, ArgsT&&... args) { static_assert(util::is_same<GridDataT, GridData, const GridData>::value, "Expected gridData to be of type GridData* or const GridData*"); switch (gridData->mGridType){ case GridType::Float: return OpT::template known<float>(gridData, args...); case GridType::Double: return OpT::template known<double>(gridData, args...); case GridType::Int16: return OpT::template known<int16_t>(gridData, args...); case GridType::Int32: return OpT::template known<int32_t>(gridData, args...); case GridType::Int64: return OpT::template known<int64_t>(gridData, args...); case GridType::Vec3f: return OpT::template known<Vec3f>(gridData, args...); case GridType::Vec3d: return OpT::template known<Vec3d>(gridData, args...); case GridType::UInt32: return OpT::template known<uint32_t>(gridData, args...); case GridType::Mask: return OpT::template known<ValueMask>(gridData, args...); case GridType::Index: return OpT::template known<ValueIndex>(gridData, args...); case GridType::OnIndex: return OpT::template known<ValueOnIndex>(gridData, args...); case GridType::IndexMask: return OpT::template known<ValueIndexMask>(gridData, args...); case GridType::OnIndexMask: return OpT::template known<ValueOnIndexMask>(gridData, args...); case GridType::Boolean: return OpT::template known<bool>(gridData, args...); case GridType::RGBA8: return OpT::template known<math::Rgba8>(gridData, args...); case GridType::Fp4: return OpT::template known<Fp4>(gridData, args...); case GridType::Fp8: return OpT::template known<Fp8>(gridData, args...); case GridType::Fp16: return OpT::template known<Fp16>(gridData, args...); case GridType::FpN: return OpT::template known<FpN>(gridData, args...); case GridType::Vec4f: return OpT::template known<Vec4f>(gridData, args...); case GridType::Vec4d: return OpT::template known<Vec4d>(gridData, args...); case GridType::UInt8: return OpT::template known<uint8_t>(gridData, args...); default: return OpT::unknown(gridData, args...); } }// callNanoGrid // --------------------------> ReadAccessor <------------------------------------ /// @brief A read-only value accessor with three levels of node caching. This allows for /// inverse tree traversal during lookup, which is on average significantly faster /// than calling the equivalent method on the tree (i.e. top-down traversal). /// /// @note By virtue of the fact that a value accessor accelerates random access operations /// by re-using cached access patterns, this access should be reused for multiple access /// operations. In other words, never create an instance of this accessor for a single /// access only. In general avoid single access operations with this accessor, and /// if that is not possible call the corresponding method on the tree instead. /// /// @warning Since this ReadAccessor internally caches raw pointers to the nodes of the tree /// structure, it is not safe to copy between host and device, or even to share among /// multiple threads on the same host or device. However, it is light-weight so simple /// instantiate one per thread (on the host and/or device). /// /// @details Used to accelerated random access into a VDB tree. Provides on average /// O(1) random access operations by means of inverse tree traversal, /// which amortizes the non-const time complexity of the root node. template<typename BuildT> class ReadAccessor<BuildT, -1, -1, -1> { using GridT = NanoGrid<BuildT>; // grid using TreeT = NanoTree<BuildT>; // tree using RootT = NanoRoot<BuildT>; // root node using LeafT = NanoLeaf<BuildT>; // Leaf node using FloatType = typename RootT::FloatType; using CoordValueType = typename RootT::CoordType::ValueType; mutable const RootT* mRoot; // 8 bytes (mutable to allow for access methods to be const) public: using BuildType = BuildT; using ValueType = typename RootT::ValueType; using CoordType = typename RootT::CoordType; static const int CacheLevels = 0; #ifndef NANOVDB_NEW_ACCESSOR_METHODS struct NodeInfo { uint32_t mLevel; // 4B uint32_t mDim; // 4B ValueType mMinimum; // typically 4B ValueType mMaximum; // typically 4B FloatType mAverage; // typically 4B FloatType mStdDevi; // typically 4B CoordType mBBoxMin; // 3*4B CoordType mBBoxMax; // 3*4B }; #endif /// @brief Constructor from a root node __hostdev__ ReadAccessor(const RootT& root) : mRoot{&root} { } /// @brief Constructor from a grid __hostdev__ ReadAccessor(const GridT& grid) : ReadAccessor(grid.tree().root()) { } /// @brief Constructor from a tree __hostdev__ ReadAccessor(const TreeT& tree) : ReadAccessor(tree.root()) { } /// @brief Reset this access to its initial state, i.e. with an empty cache /// @node Noop since this template specialization has no cache __hostdev__ void clear() {} __hostdev__ const RootT& root() const { return *mRoot; } /// @brief Defaults constructors ReadAccessor(const ReadAccessor&) = default; ~ReadAccessor() = default; ReadAccessor& operator=(const ReadAccessor&) = default; #ifdef NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ auto getNodeInfo(const CoordType& ijk) const { return this->template get<GetNodeInfo<BuildT>>(ijk); } __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); } #else // NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { return mRoot->getValueAndCache(ijk, *this); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->getValue(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const { return mRoot->getNodeInfoAndCache(ijk, *this); } __hostdev__ bool isActive(const CoordType& ijk) const { return mRoot->isActiveAndCache(ijk, *this); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return mRoot->probeValueAndCache(ijk, v, *this); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return mRoot->probeLeafAndCache(ijk, *this); } #endif // NANOVDB_NEW_ACCESSOR_METHODS template<typename RayT> __hostdev__ uint32_t getDim(const CoordType& ijk, const RayT& ray) const { return mRoot->getDimAndCache(ijk, ray, *this); } template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { return mRoot->template get<OpT>(ijk, args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) const { return const_cast<RootT*>(mRoot)->template set<OpT>(ijk, args...); } private: /// @brief Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, uint32_t> friend class InternalNode; template<typename, typename, template<uint32_t> class, uint32_t> friend class LeafNode; /// @brief No-op template<typename NodeT> __hostdev__ void insert(const CoordType&, const NodeT*) const {} }; // ReadAccessor<ValueT, -1, -1, -1> class /// @brief Node caching at a single tree level template<typename BuildT, int LEVEL0> class ReadAccessor<BuildT, LEVEL0, -1, -1> //e.g. 0, 1, 2 { static_assert(LEVEL0 >= 0 && LEVEL0 <= 2, "LEVEL0 should be 0, 1, or 2"); using GridT = NanoGrid<BuildT>; // grid using TreeT = NanoTree<BuildT>; using RootT = NanoRoot<BuildT>; // root node using LeafT = NanoLeaf<BuildT>; // Leaf node using NodeT = typename NodeTrait<TreeT, LEVEL0>::type; using CoordT = typename RootT::CoordType; using ValueT = typename RootT::ValueType; using FloatType = typename RootT::FloatType; using CoordValueType = typename RootT::CoordT::ValueType; // All member data are mutable to allow for access methods to be const mutable CoordT mKey; // 3*4 = 12 bytes mutable const RootT* mRoot; // 8 bytes mutable const NodeT* mNode; // 8 bytes public: using BuildType = BuildT; using ValueType = ValueT; using CoordType = CoordT; static const int CacheLevels = 1; #ifndef NANOVDB_NEW_ACCESSOR_METHODS using NodeInfo = typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo; #endif /// @brief Constructor from a root node __hostdev__ ReadAccessor(const RootT& root) : mKey(CoordType::max()) , mRoot(&root) , mNode(nullptr) { } /// @brief Constructor from a grid __hostdev__ ReadAccessor(const GridT& grid) : ReadAccessor(grid.tree().root()) { } /// @brief Constructor from a tree __hostdev__ ReadAccessor(const TreeT& tree) : ReadAccessor(tree.root()) { } /// @brief Reset this access to its initial state, i.e. with an empty cache __hostdev__ void clear() { mKey = CoordType::max(); mNode = nullptr; } __hostdev__ const RootT& root() const { return *mRoot; } /// @brief Defaults constructors ReadAccessor(const ReadAccessor&) = default; ~ReadAccessor() = default; ReadAccessor& operator=(const ReadAccessor&) = default; __hostdev__ bool isCached(const CoordType& ijk) const { return (ijk[0] & int32_t(~NodeT::MASK)) == mKey[0] && (ijk[1] & int32_t(~NodeT::MASK)) == mKey[1] && (ijk[2] & int32_t(~NodeT::MASK)) == mKey[2]; } #ifdef NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ auto getNodeInfo(const CoordType& ijk) const { return this->template get<GetNodeInfo<BuildT>>(ijk); } __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); } #else // NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { if (this->isCached(ijk)) return mNode->getValueAndCache(ijk, *this); return mRoot->getValueAndCache(ijk, *this); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->getValue(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const { if (this->isCached(ijk)) return mNode->getNodeInfoAndCache(ijk, *this); return mRoot->getNodeInfoAndCache(ijk, *this); } __hostdev__ bool isActive(const CoordType& ijk) const { if (this->isCached(ijk)) return mNode->isActiveAndCache(ijk, *this); return mRoot->isActiveAndCache(ijk, *this); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { if (this->isCached(ijk)) return mNode->probeValueAndCache(ijk, v, *this); return mRoot->probeValueAndCache(ijk, v, *this); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { if (this->isCached(ijk)) return mNode->probeLeafAndCache(ijk, *this); return mRoot->probeLeafAndCache(ijk, *this); } #endif // NANOVDB_NEW_ACCESSOR_METHODS template<typename RayT> __hostdev__ uint32_t getDim(const CoordType& ijk, const RayT& ray) const { if (this->isCached(ijk)) return mNode->getDimAndCache(ijk, ray, *this); return mRoot->getDimAndCache(ijk, ray, *this); } template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { if (this->isCached(ijk)) return mNode->template getAndCache<OpT>(ijk, *this, args...); return mRoot->template getAndCache<OpT>(ijk, *this, args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) const { if (this->isCached(ijk)) return const_cast<NodeT*>(mNode)->template setAndCache<OpT>(ijk, *this, args...); return const_cast<RootT*>(mRoot)->template setAndCache<OpT>(ijk, *this, args...); } private: /// @brief Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, uint32_t> friend class InternalNode; template<typename, typename, template<uint32_t> class, uint32_t> friend class LeafNode; /// @brief Inserts a leaf node and key pair into this ReadAccessor __hostdev__ void insert(const CoordType& ijk, const NodeT* node) const { mKey = ijk & ~NodeT::MASK; mNode = node; } // no-op template<typename OtherNodeT> __hostdev__ void insert(const CoordType&, const OtherNodeT*) const {} }; // ReadAccessor<ValueT, LEVEL0> template<typename BuildT, int LEVEL0, int LEVEL1> class ReadAccessor<BuildT, LEVEL0, LEVEL1, -1> //e.g. (0,1), (1,2), (0,2) { static_assert(LEVEL0 >= 0 && LEVEL0 <= 2, "LEVEL0 must be 0, 1, 2"); static_assert(LEVEL1 >= 0 && LEVEL1 <= 2, "LEVEL1 must be 0, 1, 2"); static_assert(LEVEL0 < LEVEL1, "Level 0 must be lower than level 1"); using GridT = NanoGrid<BuildT>; // grid using TreeT = NanoTree<BuildT>; using RootT = NanoRoot<BuildT>; using LeafT = NanoLeaf<BuildT>; using Node1T = typename NodeTrait<TreeT, LEVEL0>::type; using Node2T = typename NodeTrait<TreeT, LEVEL1>::type; using CoordT = typename RootT::CoordType; using ValueT = typename RootT::ValueType; using FloatType = typename RootT::FloatType; using CoordValueType = typename RootT::CoordT::ValueType; // All member data are mutable to allow for access methods to be const #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY // 44 bytes total mutable CoordT mKey; // 3*4 = 12 bytes #else // 68 bytes total mutable CoordT mKeys[2]; // 2*3*4 = 24 bytes #endif mutable const RootT* mRoot; mutable const Node1T* mNode1; mutable const Node2T* mNode2; public: using BuildType = BuildT; using ValueType = ValueT; using CoordType = CoordT; static const int CacheLevels = 2; #ifndef NANOVDB_NEW_ACCESSOR_METHODS using NodeInfo = typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo; #endif /// @brief Constructor from a root node __hostdev__ ReadAccessor(const RootT& root) #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY : mKey(CoordType::max()) #else : mKeys{CoordType::max(), CoordType::max()} #endif , mRoot(&root) , mNode1(nullptr) , mNode2(nullptr) { } /// @brief Constructor from a grid __hostdev__ ReadAccessor(const GridT& grid) : ReadAccessor(grid.tree().root()) { } /// @brief Constructor from a tree __hostdev__ ReadAccessor(const TreeT& tree) : ReadAccessor(tree.root()) { } /// @brief Reset this access to its initial state, i.e. with an empty cache __hostdev__ void clear() { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY mKey = CoordType::max(); #else mKeys[0] = mKeys[1] = CoordType::max(); #endif mNode1 = nullptr; mNode2 = nullptr; } __hostdev__ const RootT& root() const { return *mRoot; } /// @brief Defaults constructors ReadAccessor(const ReadAccessor&) = default; ~ReadAccessor() = default; ReadAccessor& operator=(const ReadAccessor&) = default; #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY __hostdev__ bool isCached1(CoordValueType dirty) const { if (!mNode1) return false; if (dirty & int32_t(~Node1T::MASK)) { mNode1 = nullptr; return false; } return true; } __hostdev__ bool isCached2(CoordValueType dirty) const { if (!mNode2) return false; if (dirty & int32_t(~Node2T::MASK)) { mNode2 = nullptr; return false; } return true; } __hostdev__ CoordValueType computeDirty(const CoordType& ijk) const { return (ijk[0] ^ mKey[0]) | (ijk[1] ^ mKey[1]) | (ijk[2] ^ mKey[2]); } #else __hostdev__ bool isCached1(const CoordType& ijk) const { return (ijk[0] & int32_t(~Node1T::MASK)) == mKeys[0][0] && (ijk[1] & int32_t(~Node1T::MASK)) == mKeys[0][1] && (ijk[2] & int32_t(~Node1T::MASK)) == mKeys[0][2]; } __hostdev__ bool isCached2(const CoordType& ijk) const { return (ijk[0] & int32_t(~Node2T::MASK)) == mKeys[1][0] && (ijk[1] & int32_t(~Node2T::MASK)) == mKeys[1][1] && (ijk[2] & int32_t(~Node2T::MASK)) == mKeys[1][2]; } #endif #ifdef NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ auto getNodeInfo(const CoordType& ijk) const { return this->template get<GetNodeInfo<BuildT>>(ijk); } __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); } #else // NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return mNode1->getValueAndCache(ijk, *this); } else if (this->isCached2(dirty)) { return mNode2->getValueAndCache(ijk, *this); } return mRoot->getValueAndCache(ijk, *this); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->getValue(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return mNode1->getNodeInfoAndCache(ijk, *this); } else if (this->isCached2(dirty)) { return mNode2->getNodeInfoAndCache(ijk, *this); } return mRoot->getNodeInfoAndCache(ijk, *this); } __hostdev__ bool isActive(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return mNode1->isActiveAndCache(ijk, *this); } else if (this->isCached2(dirty)) { return mNode2->isActiveAndCache(ijk, *this); } return mRoot->isActiveAndCache(ijk, *this); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return mNode1->probeValueAndCache(ijk, v, *this); } else if (this->isCached2(dirty)) { return mNode2->probeValueAndCache(ijk, v, *this); } return mRoot->probeValueAndCache(ijk, v, *this); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return mNode1->probeLeafAndCache(ijk, *this); } else if (this->isCached2(dirty)) { return mNode2->probeLeafAndCache(ijk, *this); } return mRoot->probeLeafAndCache(ijk, *this); } #endif // NANOVDB_NEW_ACCESSOR_METHODS template<typename RayT> __hostdev__ uint32_t getDim(const CoordType& ijk, const RayT& ray) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return mNode1->getDimAndCache(ijk, ray, *this); } else if (this->isCached2(dirty)) { return mNode2->getDimAndCache(ijk, ray, *this); } return mRoot->getDimAndCache(ijk, ray, *this); } template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return mNode1->template getAndCache<OpT>(ijk, *this, args...); } else if (this->isCached2(dirty)) { return mNode2->template getAndCache<OpT>(ijk, *this, args...); } return mRoot->template getAndCache<OpT>(ijk, *this, args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached1(dirty)) { return const_cast<Node1T*>(mNode1)->template setAndCache<OpT>(ijk, *this, args...); } else if (this->isCached2(dirty)) { return const_cast<Node2T*>(mNode2)->template setAndCache<OpT>(ijk, *this, args...); } return const_cast<RootT*>(mRoot)->template setAndCache<OpT>(ijk, *this, args...); } private: /// @brief Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, uint32_t> friend class InternalNode; template<typename, typename, template<uint32_t> class, uint32_t> friend class LeafNode; /// @brief Inserts a leaf node and key pair into this ReadAccessor __hostdev__ void insert(const CoordType& ijk, const Node1T* node) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY mKey = ijk; #else mKeys[0] = ijk & ~Node1T::MASK; #endif mNode1 = node; } __hostdev__ void insert(const CoordType& ijk, const Node2T* node) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY mKey = ijk; #else mKeys[1] = ijk & ~Node2T::MASK; #endif mNode2 = node; } template<typename OtherNodeT> __hostdev__ void insert(const CoordType&, const OtherNodeT*) const {} }; // ReadAccessor<BuildT, LEVEL0, LEVEL1> /// @brief Node caching at all (three) tree levels template<typename BuildT> class ReadAccessor<BuildT, 0, 1, 2> { using GridT = NanoGrid<BuildT>; // grid using TreeT = NanoTree<BuildT>; using RootT = NanoRoot<BuildT>; // root node using NodeT2 = NanoUpper<BuildT>; // upper internal node using NodeT1 = NanoLower<BuildT>; // lower internal node using LeafT = NanoLeaf<BuildT>; // Leaf node using CoordT = typename RootT::CoordType; using ValueT = typename RootT::ValueType; using FloatType = typename RootT::FloatType; using CoordValueType = typename RootT::CoordT::ValueType; // All member data are mutable to allow for access methods to be const #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY // 44 bytes total mutable CoordT mKey; // 3*4 = 12 bytes #else // 68 bytes total mutable CoordT mKeys[3]; // 3*3*4 = 36 bytes #endif mutable const RootT* mRoot; mutable const void* mNode[3]; // 4*8 = 32 bytes public: using BuildType = BuildT; using ValueType = ValueT; using CoordType = CoordT; static const int CacheLevels = 3; #ifndef NANOVDB_NEW_ACCESSOR_METHODS using NodeInfo = typename ReadAccessor<ValueT, -1, -1, -1>::NodeInfo; #endif /// @brief Constructor from a root node __hostdev__ ReadAccessor(const RootT& root) #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY : mKey(CoordType::max()) #else : mKeys{CoordType::max(), CoordType::max(), CoordType::max()} #endif , mRoot(&root) , mNode{nullptr, nullptr, nullptr} { } /// @brief Constructor from a grid __hostdev__ ReadAccessor(const GridT& grid) : ReadAccessor(grid.tree().root()) { } /// @brief Constructor from a tree __hostdev__ ReadAccessor(const TreeT& tree) : ReadAccessor(tree.root()) { } __hostdev__ const RootT& root() const { return *mRoot; } /// @brief Defaults constructors ReadAccessor(const ReadAccessor&) = default; ~ReadAccessor() = default; ReadAccessor& operator=(const ReadAccessor&) = default; /// @brief Return a const point to the cached node of the specified type /// /// @warning The return value could be NULL. template<typename NodeT> __hostdev__ const NodeT* getNode() const { using T = typename NodeTrait<TreeT, NodeT::LEVEL>::type; static_assert(util::is_same<T, NodeT>::value, "ReadAccessor::getNode: Invalid node type"); return reinterpret_cast<const T*>(mNode[NodeT::LEVEL]); } template<int LEVEL> __hostdev__ const typename NodeTrait<TreeT, LEVEL>::type* getNode() const { using T = typename NodeTrait<TreeT, LEVEL>::type; static_assert(LEVEL >= 0 && LEVEL <= 2, "ReadAccessor::getNode: Invalid node type"); return reinterpret_cast<const T*>(mNode[LEVEL]); } /// @brief Reset this access to its initial state, i.e. with an empty cache __hostdev__ void clear() { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY mKey = CoordType::max(); #else mKeys[0] = mKeys[1] = mKeys[2] = CoordType::max(); #endif mNode[0] = mNode[1] = mNode[2] = nullptr; } #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY template<typename NodeT> __hostdev__ bool isCached(CoordValueType dirty) const { if (!mNode[NodeT::LEVEL]) return false; if (dirty & int32_t(~NodeT::MASK)) { mNode[NodeT::LEVEL] = nullptr; return false; } return true; } __hostdev__ CoordValueType computeDirty(const CoordType& ijk) const { return (ijk[0] ^ mKey[0]) | (ijk[1] ^ mKey[1]) | (ijk[2] ^ mKey[2]); } #else template<typename NodeT> __hostdev__ bool isCached(const CoordType& ijk) const { return (ijk[0] & int32_t(~NodeT::MASK)) == mKeys[NodeT::LEVEL][0] && (ijk[1] & int32_t(~NodeT::MASK)) == mKeys[NodeT::LEVEL][1] && (ijk[2] & int32_t(~NodeT::MASK)) == mKeys[NodeT::LEVEL][2]; } #endif #ifdef NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->template get<GetValue<BuildT>>(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->template get<GetValue<BuildT>>(CoordType(i, j, k)); } __hostdev__ auto getNodeInfo(const CoordType& ijk) const { return this->template get<GetNodeInfo<BuildT>>(ijk); } __hostdev__ bool isActive(const CoordType& ijk) const { return this->template get<GetState<BuildT>>(ijk); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { return this->template get<ProbeValue<BuildT>>(ijk, v); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { return this->template get<GetLeaf<BuildT>>(ijk); } #else // NANOVDB_NEW_ACCESSOR_METHODS __hostdev__ ValueType getValue(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((LeafT*)mNode[0])->getValue(ijk); } else if (this->isCached<NodeT1>(dirty)) { return ((NodeT1*)mNode[1])->getValueAndCache(ijk, *this); } else if (this->isCached<NodeT2>(dirty)) { return ((NodeT2*)mNode[2])->getValueAndCache(ijk, *this); } return mRoot->getValueAndCache(ijk, *this); } __hostdev__ ValueType operator()(const CoordType& ijk) const { return this->getValue(ijk); } __hostdev__ ValueType operator()(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ ValueType getValue(int i, int j, int k) const { return this->getValue(CoordType(i, j, k)); } __hostdev__ NodeInfo getNodeInfo(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((LeafT*)mNode[0])->getNodeInfoAndCache(ijk, *this); } else if (this->isCached<NodeT1>(dirty)) { return ((NodeT1*)mNode[1])->getNodeInfoAndCache(ijk, *this); } else if (this->isCached<NodeT2>(dirty)) { return ((NodeT2*)mNode[2])->getNodeInfoAndCache(ijk, *this); } return mRoot->getNodeInfoAndCache(ijk, *this); } __hostdev__ bool isActive(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((LeafT*)mNode[0])->isActive(ijk); } else if (this->isCached<NodeT1>(dirty)) { return ((NodeT1*)mNode[1])->isActiveAndCache(ijk, *this); } else if (this->isCached<NodeT2>(dirty)) { return ((NodeT2*)mNode[2])->isActiveAndCache(ijk, *this); } return mRoot->isActiveAndCache(ijk, *this); } __hostdev__ bool probeValue(const CoordType& ijk, ValueType& v) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((LeafT*)mNode[0])->probeValue(ijk, v); } else if (this->isCached<NodeT1>(dirty)) { return ((NodeT1*)mNode[1])->probeValueAndCache(ijk, v, *this); } else if (this->isCached<NodeT2>(dirty)) { return ((NodeT2*)mNode[2])->probeValueAndCache(ijk, v, *this); } return mRoot->probeValueAndCache(ijk, v, *this); } __hostdev__ const LeafT* probeLeaf(const CoordType& ijk) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((LeafT*)mNode[0]); } else if (this->isCached<NodeT1>(dirty)) { return ((NodeT1*)mNode[1])->probeLeafAndCache(ijk, *this); } else if (this->isCached<NodeT2>(dirty)) { return ((NodeT2*)mNode[2])->probeLeafAndCache(ijk, *this); } return mRoot->probeLeafAndCache(ijk, *this); } #endif // NANOVDB_NEW_ACCESSOR_METHODS template<typename OpT, typename... ArgsT> __hostdev__ auto get(const CoordType& ijk, ArgsT&&... args) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((const LeafT*)mNode[0])->template getAndCache<OpT>(ijk, *this, args...); } else if (this->isCached<NodeT1>(dirty)) { return ((const NodeT1*)mNode[1])->template getAndCache<OpT>(ijk, *this, args...); } else if (this->isCached<NodeT2>(dirty)) { return ((const NodeT2*)mNode[2])->template getAndCache<OpT>(ijk, *this, args...); } return mRoot->template getAndCache<OpT>(ijk, *this, args...); } template<typename OpT, typename... ArgsT> __hostdev__ auto set(const CoordType& ijk, ArgsT&&... args) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((LeafT*)mNode[0])->template setAndCache<OpT>(ijk, *this, args...); } else if (this->isCached<NodeT1>(dirty)) { return ((NodeT1*)mNode[1])->template setAndCache<OpT>(ijk, *this, args...); } else if (this->isCached<NodeT2>(dirty)) { return ((NodeT2*)mNode[2])->template setAndCache<OpT>(ijk, *this, args...); } return ((RootT*)mRoot)->template setAndCache<OpT>(ijk, *this, args...); } template<typename RayT> __hostdev__ uint32_t getDim(const CoordType& ijk, const RayT& ray) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY const CoordValueType dirty = this->computeDirty(ijk); #else auto&& dirty = ijk; #endif if (this->isCached<LeafT>(dirty)) { return ((LeafT*)mNode[0])->getDimAndCache(ijk, ray, *this); } else if (this->isCached<NodeT1>(dirty)) { return ((NodeT1*)mNode[1])->getDimAndCache(ijk, ray, *this); } else if (this->isCached<NodeT2>(dirty)) { return ((NodeT2*)mNode[2])->getDimAndCache(ijk, ray, *this); } return mRoot->getDimAndCache(ijk, ray, *this); } private: /// @brief Allow nodes to insert themselves into the cache. template<typename> friend class RootNode; template<typename, uint32_t> friend class InternalNode; template<typename, typename, template<uint32_t> class, uint32_t> friend class LeafNode; /// @brief Inserts a leaf node and key pair into this ReadAccessor template<typename NodeT> __hostdev__ void insert(const CoordType& ijk, const NodeT* node) const { #ifdef NANOVDB_USE_SINGLE_ACCESSOR_KEY mKey = ijk; #else mKeys[NodeT::LEVEL] = ijk & ~NodeT::MASK; #endif mNode[NodeT::LEVEL] = node; } }; // ReadAccessor<BuildT, 0, 1, 2> ////////////////////////////////////////////////// /// @brief Free-standing function for convenient creation of a ReadAccessor with /// optional and customizable node caching. /// /// @details createAccessor<>(grid): No caching of nodes and hence it's thread-safe but slow /// createAccessor<0>(grid): Caching of leaf nodes only /// createAccessor<1>(grid): Caching of lower internal nodes only /// createAccessor<2>(grid): Caching of upper internal nodes only /// createAccessor<0,1>(grid): Caching of leaf and lower internal nodes /// createAccessor<0,2>(grid): Caching of leaf and upper internal nodes /// createAccessor<1,2>(grid): Caching of lower and upper internal nodes /// createAccessor<0,1,2>(grid): Caching of all nodes at all tree levels template<int LEVEL0 = -1, int LEVEL1 = -1, int LEVEL2 = -1, typename ValueT = float> ReadAccessor<ValueT, LEVEL0, LEVEL1, LEVEL2> createAccessor(const NanoGrid<ValueT>& grid) { return ReadAccessor<ValueT, LEVEL0, LEVEL1, LEVEL2>(grid); } template<int LEVEL0 = -1, int LEVEL1 = -1, int LEVEL2 = -1, typename ValueT = float> ReadAccessor<ValueT, LEVEL0, LEVEL1, LEVEL2> createAccessor(const NanoTree<ValueT>& tree) { return ReadAccessor<ValueT, LEVEL0, LEVEL1, LEVEL2>(tree); } template<int LEVEL0 = -1, int LEVEL1 = -1, int LEVEL2 = -1, typename ValueT = float> ReadAccessor<ValueT, LEVEL0, LEVEL1, LEVEL2> createAccessor(const NanoRoot<ValueT>& root) { return ReadAccessor<ValueT, LEVEL0, LEVEL1, LEVEL2>(root); } ////////////////////////////////////////////////// /// @brief This is a convenient class that allows for access to grid meta-data /// that are independent of the value type of a grid. That is, this class /// can be used to get information about a grid without actually knowing /// its ValueType. class GridMetaData { // 768 bytes (32 byte aligned) GridData mGridData; // 672B TreeData mTreeData; // 64B CoordBBox mIndexBBox; // 24B. AABB of active values in index space. uint32_t mRootTableSize, mPadding{0}; // 8B public: template<typename T> GridMetaData(const NanoGrid<T>& grid) { mGridData = *grid.data(); mTreeData = *grid.tree().data(); mIndexBBox = grid.indexBBox(); mRootTableSize = grid.tree().root().getTableSize(); } GridMetaData(const GridData* gridData) { if (GridMetaData::safeCast(gridData)) { *this = *reinterpret_cast<const GridMetaData*>(gridData); //util::memcpy(this, (const GridMetaData*)gridData); } else {// otherwise copy each member individually mGridData = *gridData; mTreeData = *reinterpret_cast<const TreeData*>(gridData->treePtr()); mIndexBBox = gridData->indexBBox(); mRootTableSize = gridData->rootTableSize(); } } GridMetaData& operator=(const GridMetaData&) = default; /// @brief return true if the RootData follows right after the TreeData. /// If so, this implies that it's safe to cast the grid from which /// this instance was constructed to a GridMetaData __hostdev__ bool safeCast() const { return mTreeData.isRootNext(); } /// @brief return true if it is safe to cast the grid to a pointer /// of type GridMetaData, i.e. construction can be avoided. __hostdev__ static bool safeCast(const GridData *gridData){ NANOVDB_ASSERT(gridData && gridData->isValid()); return gridData->isRootConnected(); } /// @brief return true if it is safe to cast the grid to a pointer /// of type GridMetaData, i.e. construction can be avoided. template<typename T> __hostdev__ static bool safeCast(const NanoGrid<T>& grid){return grid.tree().isRootNext();} __hostdev__ bool isValid() const { return mGridData.isValid(); } __hostdev__ const GridType& gridType() const { return mGridData.mGridType; } __hostdev__ const GridClass& gridClass() const { return mGridData.mGridClass; } __hostdev__ bool isLevelSet() const { return mGridData.mGridClass == GridClass::LevelSet; } __hostdev__ bool isFogVolume() const { return mGridData.mGridClass == GridClass::FogVolume; } __hostdev__ bool isStaggered() const { return mGridData.mGridClass == GridClass::Staggered; } __hostdev__ bool isPointIndex() const { return mGridData.mGridClass == GridClass::PointIndex; } __hostdev__ bool isGridIndex() const { return mGridData.mGridClass == GridClass::IndexGrid; } __hostdev__ bool isPointData() const { return mGridData.mGridClass == GridClass::PointData; } __hostdev__ bool isMask() const { return mGridData.mGridClass == GridClass::Topology; } __hostdev__ bool isUnknown() const { return mGridData.mGridClass == GridClass::Unknown; } __hostdev__ bool hasMinMax() const { return mGridData.mFlags.isMaskOn(GridFlags::HasMinMax); } __hostdev__ bool hasBBox() const { return mGridData.mFlags.isMaskOn(GridFlags::HasBBox); } __hostdev__ bool hasLongGridName() const { return mGridData.mFlags.isMaskOn(GridFlags::HasLongGridName); } __hostdev__ bool hasAverage() const { return mGridData.mFlags.isMaskOn(GridFlags::HasAverage); } __hostdev__ bool hasStdDeviation() const { return mGridData.mFlags.isMaskOn(GridFlags::HasStdDeviation); } __hostdev__ bool isBreadthFirst() const { return mGridData.mFlags.isMaskOn(GridFlags::IsBreadthFirst); } __hostdev__ uint64_t gridSize() const { return mGridData.mGridSize; } __hostdev__ uint32_t gridIndex() const { return mGridData.mGridIndex; } __hostdev__ uint32_t gridCount() const { return mGridData.mGridCount; } __hostdev__ const char* shortGridName() const { return mGridData.mGridName; } __hostdev__ const Map& map() const { return mGridData.mMap; } __hostdev__ const Vec3dBBox& worldBBox() const { return mGridData.mWorldBBox; } __hostdev__ const CoordBBox& indexBBox() const { return mIndexBBox; } __hostdev__ Vec3d voxelSize() const { return mGridData.mVoxelSize; } __hostdev__ int blindDataCount() const { return mGridData.mBlindMetadataCount; } __hostdev__ uint64_t activeVoxelCount() const { return mTreeData.mVoxelCount; } __hostdev__ const uint32_t& activeTileCount(uint32_t level) const { return mTreeData.mTileCount[level - 1]; } __hostdev__ uint32_t nodeCount(uint32_t level) const { return mTreeData.mNodeCount[level]; } __hostdev__ const Checksum& checksum() const { return mGridData.mChecksum; } __hostdev__ uint32_t rootTableSize() const { return mRootTableSize; } __hostdev__ bool isEmpty() const { return mRootTableSize == 0; } __hostdev__ Version version() const { return mGridData.mVersion; } }; // GridMetaData /// @brief Class to access points at a specific voxel location /// /// @note If GridClass::PointIndex AttT should be uint32_t and if GridClass::PointData Vec3f template<typename AttT, typename BuildT = uint32_t> class PointAccessor : public DefaultReadAccessor<BuildT> { using AccT = DefaultReadAccessor<BuildT>; const NanoGrid<BuildT>& mGrid; const AttT* mData; public: PointAccessor(const NanoGrid<BuildT>& grid) : AccT(grid.tree().root()) , mGrid(grid) , mData(grid.template getBlindData<AttT>(0)) { NANOVDB_ASSERT(grid.gridType() == toGridType<BuildT>()); NANOVDB_ASSERT((grid.gridClass() == GridClass::PointIndex && util::is_same<uint32_t, AttT>::value) || (grid.gridClass() == GridClass::PointData && util::is_same<Vec3f, AttT>::value)); } /// @brief return true if this access was initialized correctly __hostdev__ operator bool() const { return mData != nullptr; } __hostdev__ const NanoGrid<BuildT>& grid() const { return mGrid; } /// @brief Return the total number of point in the grid and set the /// iterators to the complete range of points. __hostdev__ uint64_t gridPoints(const AttT*& begin, const AttT*& end) const { const uint64_t count = mGrid.blindMetaData(0u).mValueCount; begin = mData; end = begin + count; return count; } /// @brief Return the number of points in the leaf node containing the coordinate @a ijk. /// If this return value is larger than zero then the iterators @a begin and @a end /// will point to all the attributes contained within that leaf node. __hostdev__ uint64_t leafPoints(const Coord& ijk, const AttT*& begin, const AttT*& end) const { auto* leaf = this->probeLeaf(ijk); if (leaf == nullptr) { return 0; } begin = mData + leaf->minimum(); end = begin + leaf->maximum(); return leaf->maximum(); } /// @brief get iterators over attributes to points at a specific voxel location __hostdev__ uint64_t voxelPoints(const Coord& ijk, const AttT*& begin, const AttT*& end) const { begin = end = nullptr; if (auto* leaf = this->probeLeaf(ijk)) { const uint32_t offset = NanoLeaf<BuildT>::CoordToOffset(ijk); if (leaf->isActive(offset)) { begin = mData + leaf->minimum(); end = begin + leaf->getValue(offset); if (offset > 0u) begin += leaf->getValue(offset - 1); } } return end - begin; } }; // PointAccessor template<typename AttT> class PointAccessor<AttT, Point> : public DefaultReadAccessor<Point> { using AccT = DefaultReadAccessor<Point>; const NanoGrid<Point>& mGrid; const AttT* mData; public: PointAccessor(const NanoGrid<Point>& grid) : AccT(grid.tree().root()) , mGrid(grid) , mData(grid.template getBlindData<AttT>(0)) { NANOVDB_ASSERT(mData); NANOVDB_ASSERT(grid.gridType() == GridType::PointIndex); NANOVDB_ASSERT((grid.gridClass() == GridClass::PointIndex && util::is_same<uint32_t, AttT>::value) || (grid.gridClass() == GridClass::PointData && util::is_same<Vec3f, AttT>::value) || (grid.gridClass() == GridClass::PointData && util::is_same<Vec3d, AttT>::value) || (grid.gridClass() == GridClass::PointData && util::is_same<Vec3u16, AttT>::value) || (grid.gridClass() == GridClass::PointData && util::is_same<Vec3u8, AttT>::value)); } /// @brief return true if this access was initialized correctly __hostdev__ operator bool() const { return mData != nullptr; } __hostdev__ const NanoGrid<Point>& grid() const { return mGrid; } /// @brief Return the total number of point in the grid and set the /// iterators to the complete range of points. __hostdev__ uint64_t gridPoints(const AttT*& begin, const AttT*& end) const { const uint64_t count = mGrid.blindMetaData(0u).mValueCount; begin = mData; end = begin + count; return count; } /// @brief Return the number of points in the leaf node containing the coordinate @a ijk. /// If this return value is larger than zero then the iterators @a begin and @a end /// will point to all the attributes contained within that leaf node. __hostdev__ uint64_t leafPoints(const Coord& ijk, const AttT*& begin, const AttT*& end) const { auto* leaf = this->probeLeaf(ijk); if (leaf == nullptr) return 0; begin = mData + leaf->offset(); end = begin + leaf->pointCount(); return leaf->pointCount(); } /// @brief get iterators over attributes to points at a specific voxel location __hostdev__ uint64_t voxelPoints(const Coord& ijk, const AttT*& begin, const AttT*& end) const { if (auto* leaf = this->probeLeaf(ijk)) { const uint32_t n = NanoLeaf<Point>::CoordToOffset(ijk); if (leaf->isActive(n)) { begin = mData + leaf->first(n); end = mData + leaf->last(n); return end - begin; } } begin = end = nullptr; return 0u; // no leaf or inactive voxel } }; // PointAccessor<AttT, Point> /// @brief Class to access values in channels at a specific voxel location. /// /// @note The ChannelT template parameter can be either const and non-const. template<typename ChannelT, typename IndexT = ValueIndex> class ChannelAccessor : public DefaultReadAccessor<IndexT> { static_assert(BuildTraits<IndexT>::is_index, "Expected an index build type"); using BaseT = DefaultReadAccessor<IndexT>; const NanoGrid<IndexT>& mGrid; ChannelT* mChannel; public: using ValueType = ChannelT; using TreeType = NanoTree<IndexT>; using AccessorType = ChannelAccessor<ChannelT, IndexT>; /// @brief Ctor from an IndexGrid and an integer ID of an internal channel /// that is assumed to exist as blind data in the IndexGrid. __hostdev__ ChannelAccessor(const NanoGrid<IndexT>& grid, uint32_t channelID = 0u) : BaseT(grid.tree().root()) , mGrid(grid) , mChannel(nullptr) { NANOVDB_ASSERT(isIndex(grid.gridType())); NANOVDB_ASSERT(grid.gridClass() == GridClass::IndexGrid); this->setChannel(channelID); } /// @brief Ctor from an IndexGrid and an external channel __hostdev__ ChannelAccessor(const NanoGrid<IndexT>& grid, ChannelT* channelPtr) : BaseT(grid.tree().root()) , mGrid(grid) , mChannel(channelPtr) { NANOVDB_ASSERT(isIndex(grid.gridType())); NANOVDB_ASSERT(grid.gridClass() == GridClass::IndexGrid); } /// @brief return true if this access was initialized correctly __hostdev__ operator bool() const { return mChannel != nullptr; } /// @brief Return a const reference to the IndexGrid __hostdev__ const NanoGrid<IndexT>& grid() const { return mGrid; } /// @brief Return a const reference to the tree of the IndexGrid __hostdev__ const TreeType& tree() const { return mGrid.tree(); } /// @brief Return a vector of the axial voxel sizes __hostdev__ const Vec3d& voxelSize() const { return mGrid.voxelSize(); } /// @brief Return total number of values indexed by the IndexGrid __hostdev__ const uint64_t& valueCount() const { return mGrid.valueCount(); } /// @brief Change to an external channel /// @return Pointer to channel data __hostdev__ ChannelT* setChannel(ChannelT* channelPtr) {return mChannel = channelPtr;} /// @brief Change to an internal channel, assuming it exists as as blind data /// in the IndexGrid. /// @return Pointer to channel data, which could be NULL if channelID is out of range or /// if ChannelT does not match the value type of the blind data __hostdev__ ChannelT* setChannel(uint32_t channelID) { return mChannel = const_cast<ChannelT*>(mGrid.template getBlindData<ChannelT>(channelID)); } /// @brief Return the linear offset into a channel that maps to the specified coordinate __hostdev__ uint64_t getIndex(const math::Coord& ijk) const { return BaseT::getValue(ijk); } __hostdev__ uint64_t idx(int i, int j, int k) const { return BaseT::getValue(math::Coord(i, j, k)); } /// @brief Return the value from a cached channel that maps to the specified coordinate __hostdev__ ChannelT& getValue(const math::Coord& ijk) const { return mChannel[BaseT::getValue(ijk)]; } __hostdev__ ChannelT& operator()(const math::Coord& ijk) const { return this->getValue(ijk); } __hostdev__ ChannelT& operator()(int i, int j, int k) const { return this->getValue(math::Coord(i, j, k)); } /// @brief return the state and updates the value of the specified voxel __hostdev__ bool probeValue(const math::Coord& ijk, typename util::remove_const<ChannelT>::type& v) const { uint64_t idx; const bool isActive = BaseT::probeValue(ijk, idx); v = mChannel[idx]; return isActive; } /// @brief Return the value from a specified channel that maps to the specified coordinate /// /// @note The template parameter can be either const or non-const template<typename T> __hostdev__ T& getValue(const math::Coord& ijk, T* channelPtr) const { return channelPtr[BaseT::getValue(ijk)]; } }; // ChannelAccessor #if 0 // This MiniGridHandle class is only included as a stand-alone example. Note that aligned_alloc is a C++17 feature! // Normally we recommend using GridHandle defined in util/GridHandle.h but this minimal implementation could be an // alternative when using the IO methods defined below. struct MiniGridHandle { struct BufferType { uint8_t *data; uint64_t size; BufferType(uint64_t n=0) : data(std::aligned_alloc(NANOVDB_DATA_ALIGNMENT, n)), size(n) {assert(isValid(data));} BufferType(BufferType &&other) : data(other.data), size(other.size) {other.data=nullptr; other.size=0;} ~BufferType() {std::free(data);} BufferType& operator=(const BufferType &other) = delete; BufferType& operator=(BufferType &&other){data=other.data; size=other.size; other.data=nullptr; other.size=0; return *this;} static BufferType create(size_t n, BufferType* dummy = nullptr) {return BufferType(n);} } buffer; MiniGridHandle(BufferType &&buf) : buffer(std::move(buf)) {} const uint8_t* data() const {return buffer.data;} };// MiniGridHandle #endif namespace io { /// @brief Define compression codecs /// /// @note NONE is the default, ZIP is slow but compact and BLOSC offers a great balance. /// /// @throw NanoVDB optionally supports ZIP and BLOSC compression and will throw an exception /// if its support is required but missing. enum class Codec : uint16_t { NONE = 0, ZIP = 1, BLOSC = 2, End = 3, StrLen = 6 + End }; __hostdev__ inline const char* toStr(char *dst, Codec codec) { switch (codec){ case Codec::NONE: return util::strcpy(dst, "NONE"); case Codec::ZIP: return util::strcpy(dst, "ZIP"); case Codec::BLOSC : return util::strcpy(dst, "BLOSC"); default: return util::strcpy(dst, "END"); } } __hostdev__ inline Codec toCodec(const char *str) { if (util::streq(str, "none")) return Codec::NONE; if (util::streq(str, "zip")) return Codec::ZIP; if (util::streq(str, "blosc")) return Codec::BLOSC; return Codec::End; } /// @brief Data encoded at the head of each segment of a file or stream. /// /// @note A file or stream is composed of one or more segments that each contain // one or more grids. struct FileHeader {// 16 bytes uint64_t magic;// 8 bytes Version version;// 4 bytes version numbers uint16_t gridCount;// 2 bytes Codec codec;// 2 bytes bool isValid() const {return magic == NANOVDB_MAGIC_NUMB || magic == NANOVDB_MAGIC_FILE;} }; // FileHeader ( 16 bytes = 2 words ) // @brief Data encoded for each of the grids associated with a segment. // Grid size in memory (uint64_t) | // Grid size on disk (uint64_t) | // Grid name hash key (uint64_t) | // Numer of active voxels (uint64_t) | // Grid type (uint32_t) | // Grid class (uint32_t) | // Characters in grid name (uint32_t) | // AABB in world space (2*3*double) | one per grid in file // AABB in index space (2*3*int) | // Size of a voxel in world units (3*double) | // Byte size of the grid name (uint32_t) | // Number of nodes per level (4*uint32_t) | // Numer of active tiles per level (3*uint32_t) | // Codec for file compression (uint16_t) | // Padding due to 8B alignment (uint16_t) | // Version number (uint32_t) | struct FileMetaData {// 176 bytes uint64_t gridSize, fileSize, nameKey, voxelCount; // 4 * 8 = 32B. GridType gridType; // 4B. GridClass gridClass; // 4B. Vec3dBBox worldBBox; // 2 * 3 * 8 = 48B. CoordBBox indexBBox; // 2 * 3 * 4 = 24B. Vec3d voxelSize; // 24B. uint32_t nameSize; // 4B. uint32_t nodeCount[4]; //4 x 4 = 16B uint32_t tileCount[3];// 3 x 4 = 12B Codec codec; // 2B uint16_t padding;// 2B, due to 8B alignment from uint64_t Version version;// 4B }; // FileMetaData // the following code block uses std and therefore needs to be ignored by CUDA and HIP #if !defined(__CUDA_ARCH__) && !defined(__HIP__) // Note that starting with version 32.6.0 it is possible to write and read raw grid buffers to // files, e.g. os.write((const char*)&buffer.data(), buffer.size()) or more conveniently as // handle.write(fileName). In addition to this simple approach we offer the methods below to // write traditional uncompressed nanovdb files that unlike raw files include metadata that // is used for tools like nanovdb_print. /// /// @brief This is a standalone alternative to io::writeGrid(...,Codec::NONE) defined in util/IO.h /// Unlike the latter this function has no dependencies at all, not even NanoVDB.h, so it also /// works if client code only includes PNanoVDB.h! /// /// @details Writes a raw NanoVDB buffer, possibly with multiple grids, to a stream WITHOUT compression. /// It follows all the conventions in util/IO.h so the stream can be read by all existing client /// code of NanoVDB. /// /// @note This method will always write uncompressed grids to the stream, i.e. Blosc or ZIP compression /// is never applied! This is a fundamental limitation and feature of this standalone function. /// /// @throw std::invalid_argument if buffer does not point to a valid NanoVDB grid. /// /// @warning This is pretty ugly code that involves lots of pointer and bit manipulations - not for the faint of heart :) template<typename StreamT> // StreamT class must support: "void write(const char*, size_t)" void writeUncompressedGrid(StreamT& os, const GridData* gridData, bool raw = false) { NANOVDB_ASSERT(gridData->mMagic == NANOVDB_MAGIC_NUMB || gridData->mMagic == NANOVDB_MAGIC_GRID); NANOVDB_ASSERT(gridData->mVersion.isCompatible()); if (!raw) {// segment with a single grid: FileHeader, FileMetaData, gridName, Grid #ifdef NANOVDB_USE_NEW_MAGIC_NUMBERS FileHeader head{NANOVDB_MAGIC_FILE, gridData->mVersion, 1u, Codec::NONE}; #else FileHeader head{NANOVDB_MAGIC_NUMB, gridData->mVersion, 1u, Codec::NONE}; #endif const char* gridName = gridData->gridName(); const uint32_t nameSize = util::strlen(gridName) + 1;// include '\0' const TreeData* treeData = (const TreeData*)(gridData->treePtr()); FileMetaData meta{gridData->mGridSize, gridData->mGridSize, 0u, treeData->mVoxelCount, gridData->mGridType, gridData->mGridClass, gridData->mWorldBBox, treeData->bbox(), gridData->mVoxelSize, nameSize, {treeData->mNodeCount[0], treeData->mNodeCount[1], treeData->mNodeCount[2], 1u}, {treeData->mTileCount[0], treeData->mTileCount[1], treeData->mTileCount[2]}, Codec::NONE, 0u, gridData->mVersion }; // FileMetaData os.write((const char*)&head, sizeof(FileHeader)); // write header os.write((const char*)&meta, sizeof(FileMetaData)); // write meta data os.write(gridName, nameSize); // write grid name } os.write((const char*)gridData, gridData->mGridSize);// write the grid }// writeUncompressedGrid /// @brief write multiple NanoVDB grids to a single file, without compression. /// @note To write all grids in a single GridHandle simply use handle.write("fieNane") template<typename GridHandleT, template<typename...> class VecT> void writeUncompressedGrids(const char* fileName, const VecT<GridHandleT>& handles, bool raw = false) { #ifdef NANOVDB_USE_IOSTREAMS // use this to switch between std::ofstream or FILE implementations std::ofstream os(fileName, std::ios::out | std::ios::binary | std::ios::trunc); #else struct StreamT { FILE* fptr; StreamT(const char* name) { fptr = fopen(name, "wb"); } ~StreamT() { fclose(fptr); } void write(const char* data, size_t n) { fwrite(data, 1, n, fptr); } bool is_open() const { return fptr != NULL; } } os(fileName); #endif if (!os.is_open()) { fprintf(stderr, "nanovdb::writeUncompressedGrids: Unable to open file \"%s\"for output\n", fileName); exit(EXIT_FAILURE); } for (auto& h : handles) { for (uint32_t n=0; n<h.gridCount(); ++n) writeUncompressedGrid(os, h.gridData(n), raw); } } // writeUncompressedGrids /// @brief read all uncompressed grids from a stream and return their handles. /// /// @throw std::invalid_argument if stream does not contain a single uncompressed valid NanoVDB grid /// /// @details StreamT class must support: "bool read(char*, size_t)" and "void skip(uint32_t)" template<typename GridHandleT, typename StreamT, template<typename...> class VecT> VecT<GridHandleT> readUncompressedGrids(StreamT& is, const typename GridHandleT::BufferType& pool = typename GridHandleT::BufferType()) { VecT<GridHandleT> handles; GridData data; is.read((char*)&data, sizeof(GridData)); if (data.isValid()) {// stream contains a raw grid buffer uint64_t size = data.mGridSize, sum = 0u; while(data.mGridIndex + 1u < data.mGridCount) { is.skip(data.mGridSize - sizeof(GridData));// skip grid is.read((char*)&data, sizeof(GridData));// read sizeof(GridData) bytes sum += data.mGridSize; } is.skip(-int64_t(sum + sizeof(GridData)));// rewind to start auto buffer = GridHandleT::BufferType::create(size + sum, &pool); is.read((char*)(buffer.data()), buffer.size()); handles.emplace_back(std::move(buffer)); } else {// Header0, MetaData0, gridName0, Grid0...HeaderN, MetaDataN, gridNameN, GridN is.skip(-sizeof(GridData));// rewind FileHeader head; while(is.read((char*)&head, sizeof(FileHeader))) { if (!head.isValid()) { fprintf(stderr, "nanovdb::readUncompressedGrids: invalid magic number = \"%s\"\n", (const char*)&(head.magic)); exit(EXIT_FAILURE); } else if (!head.version.isCompatible()) { char str[20]; fprintf(stderr, "nanovdb::readUncompressedGrids: invalid major version = \"%s\"\n", toStr(str, head.version)); exit(EXIT_FAILURE); } else if (head.codec != Codec::NONE) { char str[8]; fprintf(stderr, "nanovdb::readUncompressedGrids: invalid codec = \"%s\"\n", toStr(str, head.codec)); exit(EXIT_FAILURE); } FileMetaData meta; for (uint16_t i = 0; i < head.gridCount; ++i) { // read all grids in segment is.read((char*)&meta, sizeof(FileMetaData));// read meta data is.skip(meta.nameSize); // skip grid name auto buffer = GridHandleT::BufferType::create(meta.gridSize, &pool); is.read((char*)buffer.data(), meta.gridSize);// read grid handles.emplace_back(std::move(buffer)); }// loop over grids in segment }// loop over segments } return handles; } // readUncompressedGrids /// @brief Read a multiple un-compressed NanoVDB grids from a file and return them as a vector. template<typename GridHandleT, template<typename...> class VecT> VecT<GridHandleT> readUncompressedGrids(const char* fileName, const typename GridHandleT::BufferType& buffer = typename GridHandleT::BufferType()) { #ifdef NANOVDB_USE_IOSTREAMS // use this to switch between std::ifstream or FILE implementations struct StreamT : public std::ifstream { StreamT(const char* name) : std::ifstream(name, std::ios::in | std::ios::binary){} void skip(int64_t off) { this->seekg(off, std::ios_base::cur); } }; #else struct StreamT { FILE* fptr; StreamT(const char* name) { fptr = fopen(name, "rb"); } ~StreamT() { fclose(fptr); } bool read(char* data, size_t n) { size_t m = fread(data, 1, n, fptr); return n == m; } void skip(int64_t off) { fseek(fptr, (long int)off, SEEK_CUR); } bool is_open() const { return fptr != NULL; } }; #endif StreamT is(fileName); if (!is.is_open()) { fprintf(stderr, "nanovdb::readUncompressedGrids: Unable to open file \"%s\"for input\n", fileName); exit(EXIT_FAILURE); } return readUncompressedGrids<GridHandleT, StreamT, VecT>(is, buffer); } // readUncompressedGrids #endif // if !defined(__CUDA_ARCH__) && !defined(__HIP__) } // namespace io // ----------------------------> Implementations of random access methods <-------------------------------------- /// @brief Implements Tree::getValue(math::Coord), i.e. return the value associated with a specific coordinate @c ijk. /// @tparam BuildT Build type of the grid being called /// @details The value at a coordinate maps to the background, a tile value or a leaf value. template<typename BuildT> struct GetValue { __hostdev__ static auto get(const NanoRoot<BuildT>& root) { return root.mBackground; } __hostdev__ static auto get(const typename NanoRoot<BuildT>::Tile& tile) { return tile.value; } __hostdev__ static auto get(const NanoUpper<BuildT>& node, uint32_t n) { return node.mTable[n].value; } __hostdev__ static auto get(const NanoLower<BuildT>& node, uint32_t n) { return node.mTable[n].value; } __hostdev__ static auto get(const NanoLeaf<BuildT>& leaf, uint32_t n) { return leaf.getValue(n); } // works with all build types }; // GetValue<BuildT> template<typename BuildT> struct SetValue { static_assert(!BuildTraits<BuildT>::is_special, "SetValue does not support special value types"); using ValueT = typename NanoLeaf<BuildT>::ValueType; __hostdev__ static auto set(NanoRoot<BuildT>&, const ValueT&) {} // no-op __hostdev__ static auto set(typename NanoRoot<BuildT>::Tile& tile, const ValueT& v) { tile.value = v; } __hostdev__ static auto set(NanoUpper<BuildT>& node, uint32_t n, const ValueT& v) { node.mTable[n].value = v; } __hostdev__ static auto set(NanoLower<BuildT>& node, uint32_t n, const ValueT& v) { node.mTable[n].value = v; } __hostdev__ static auto set(NanoLeaf<BuildT>& leaf, uint32_t n, const ValueT& v) { leaf.mValues[n] = v; } }; // SetValue<BuildT> template<typename BuildT> struct SetVoxel { static_assert(!BuildTraits<BuildT>::is_special, "SetVoxel does not support special value types"); using ValueT = typename NanoLeaf<BuildT>::ValueType; __hostdev__ static auto set(NanoRoot<BuildT>&, const ValueT&) {} // no-op __hostdev__ static auto set(typename NanoRoot<BuildT>::Tile&, const ValueT&) {} // no-op __hostdev__ static auto set(NanoUpper<BuildT>&, uint32_t, const ValueT&) {} // no-op __hostdev__ static auto set(NanoLower<BuildT>&, uint32_t, const ValueT&) {} // no-op __hostdev__ static auto set(NanoLeaf<BuildT>& leaf, uint32_t n, const ValueT& v) { leaf.mValues[n] = v; } }; // SetVoxel<BuildT> /// @brief Implements Tree::isActive(math::Coord) /// @tparam BuildT Build type of the grid being called template<typename BuildT> struct GetState { __hostdev__ static auto get(const NanoRoot<BuildT>&) { return false; } __hostdev__ static auto get(const typename NanoRoot<BuildT>::Tile& tile) { return tile.state > 0; } __hostdev__ static auto get(const NanoUpper<BuildT>& node, uint32_t n) { return node.mValueMask.isOn(n); } __hostdev__ static auto get(const NanoLower<BuildT>& node, uint32_t n) { return node.mValueMask.isOn(n); } __hostdev__ static auto get(const NanoLeaf<BuildT>& leaf, uint32_t n) { return leaf.mValueMask.isOn(n); } }; // GetState<BuildT> /// @brief Implements Tree::getDim(math::Coord) /// @tparam BuildT Build type of the grid being called template<typename BuildT> struct GetDim { __hostdev__ static uint32_t get(const NanoRoot<BuildT>&) { return 0u; } // background __hostdev__ static uint32_t get(const typename NanoRoot<BuildT>::Tile&) { return 4096u; } __hostdev__ static uint32_t get(const NanoUpper<BuildT>&, uint32_t) { return 128u; } __hostdev__ static uint32_t get(const NanoLower<BuildT>&, uint32_t) { return 8u; } __hostdev__ static uint32_t get(const NanoLeaf<BuildT>&, uint32_t) { return 1u; } }; // GetDim<BuildT> /// @brief Return the pointer to the leaf node that contains math::Coord. Implements Tree::probeLeaf(math::Coord) /// @tparam BuildT Build type of the grid being called template<typename BuildT> struct GetLeaf { __hostdev__ static const NanoLeaf<BuildT>* get(const NanoRoot<BuildT>&) { return nullptr; } __hostdev__ static const NanoLeaf<BuildT>* get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; } __hostdev__ static const NanoLeaf<BuildT>* get(const NanoUpper<BuildT>&, uint32_t) { return nullptr; } __hostdev__ static const NanoLeaf<BuildT>* get(const NanoLower<BuildT>&, uint32_t) { return nullptr; } __hostdev__ static const NanoLeaf<BuildT>* get(const NanoLeaf<BuildT>& leaf, uint32_t) { return &leaf; } }; // GetLeaf<BuildT> /// @brief Return point to the lower internal node where math::Coord maps to one of its values, i.e. terminates /// @tparam BuildT Build type of the grid being called template<typename BuildT> struct GetLower { __hostdev__ static const NanoLower<BuildT>* get(const NanoRoot<BuildT>&) { return nullptr; } __hostdev__ static const NanoLower<BuildT>* get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; } __hostdev__ static const NanoLower<BuildT>* get(const NanoUpper<BuildT>&, uint32_t) { return nullptr; } __hostdev__ static const NanoLower<BuildT>* get(const NanoLower<BuildT>& node, uint32_t) { return &node; } __hostdev__ static const NanoLower<BuildT>* get(const NanoLeaf<BuildT>&, uint32_t) { return nullptr; } }; // GetLower<BuildT> /// @brief Return point to the upper internal node where math::Coord maps to one of its values, i.e. terminates /// @tparam BuildT Build type of the grid being called template<typename BuildT> struct GetUpper { __hostdev__ static const NanoUpper<BuildT>* get(const NanoRoot<BuildT>&) { return nullptr; } __hostdev__ static const NanoUpper<BuildT>* get(const typename NanoRoot<BuildT>::Tile&) { return nullptr; } __hostdev__ static const NanoUpper<BuildT>* get(const NanoUpper<BuildT>& node, uint32_t) { return &node; } __hostdev__ static const NanoUpper<BuildT>* get(const NanoLower<BuildT>& node, uint32_t) { return nullptr; } __hostdev__ static const NanoUpper<BuildT>* get(const NanoLeaf<BuildT>&, uint32_t) { return nullptr; } }; // GetUpper<BuildT> /// @brief Implements Tree::probeLeaf(math::Coord) /// @tparam BuildT Build type of the grid being called template<typename BuildT> struct ProbeValue { using ValueT = typename BuildToValueMap<BuildT>::Type; __hostdev__ static bool get(const NanoRoot<BuildT>& root, ValueT& v) { v = root.mBackground; return false; } __hostdev__ static bool get(const typename NanoRoot<BuildT>::Tile& tile, ValueT& v) { v = tile.value; return tile.state > 0u; } __hostdev__ static bool get(const NanoUpper<BuildT>& node, uint32_t n, ValueT& v) { v = node.mTable[n].value; return node.mValueMask.isOn(n); } __hostdev__ static bool get(const NanoLower<BuildT>& node, uint32_t n, ValueT& v) { v = node.mTable[n].value; return node.mValueMask.isOn(n); } __hostdev__ static bool get(const NanoLeaf<BuildT>& leaf, uint32_t n, ValueT& v) { v = leaf.getValue(n); return leaf.mValueMask.isOn(n); } }; // ProbeValue<BuildT> /// @brief Implements Tree::getNodeInfo(math::Coord) /// @tparam BuildT Build type of the grid being called template<typename BuildT> struct GetNodeInfo { using ValueType = typename NanoLeaf<BuildT>::ValueType; using FloatType = typename NanoLeaf<BuildT>::FloatType; struct NodeInfo { uint32_t level, dim; ValueType minimum, maximum; FloatType average, stdDevi; CoordBBox bbox; }; __hostdev__ static NodeInfo get(const NanoRoot<BuildT>& root) { return NodeInfo{3u, NanoUpper<BuildT>::DIM, root.minimum(), root.maximum(), root.average(), root.stdDeviation(), root.bbox()}; } __hostdev__ static NodeInfo get(const typename NanoRoot<BuildT>::Tile& tile) { return NodeInfo{3u, NanoUpper<BuildT>::DIM, tile.value, tile.value, static_cast<FloatType>(tile.value), 0, CoordBBox::createCube(tile.origin(), NanoUpper<BuildT>::DIM)}; } __hostdev__ static NodeInfo get(const NanoUpper<BuildT>& node, uint32_t n) { return NodeInfo{2u, node.dim(), node.minimum(), node.maximum(), node.average(), node.stdDeviation(), node.bbox()}; } __hostdev__ static NodeInfo get(const NanoLower<BuildT>& node, uint32_t n) { return NodeInfo{1u, node.dim(), node.minimum(), node.maximum(), node.average(), node.stdDeviation(), node.bbox()}; } __hostdev__ static NodeInfo get(const NanoLeaf<BuildT>& leaf, uint32_t n) { return NodeInfo{0u, leaf.dim(), leaf.minimum(), leaf.maximum(), leaf.average(), leaf.stdDeviation(), leaf.bbox()}; } }; // GetNodeInfo<BuildT> } // namespace nanovdb =================================================================== #endif // end of NANOVDB_NANOVDB_H_HAS_BEEN_INCLUDED
290,924
C
42.913208
179
0.623245
NVIDIA/warp/warp/native/nanovdb/PNanoVDB.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! \file nanovdb/PNanoVDB.h \author Andrew Reidmeyer \brief This file is a portable (e.g. pointer-less) C99/GLSL/HLSL port of NanoVDB.h, which is compatible with most graphics APIs. */ #ifndef NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED #define NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED // ------------------------------------------------ Configuration ----------------------------------------------------------- // platforms //#define PNANOVDB_C //#define PNANOVDB_HLSL //#define PNANOVDB_GLSL // addressing mode // PNANOVDB_ADDRESS_32 // PNANOVDB_ADDRESS_64 #if defined(PNANOVDB_C) #ifndef PNANOVDB_ADDRESS_32 #define PNANOVDB_ADDRESS_64 #endif #elif defined(PNANOVDB_HLSL) #ifndef PNANOVDB_ADDRESS_64 #define PNANOVDB_ADDRESS_32 #endif #elif defined(PNANOVDB_GLSL) #ifndef PNANOVDB_ADDRESS_64 #define PNANOVDB_ADDRESS_32 #endif #endif // bounds checking //#define PNANOVDB_BUF_BOUNDS_CHECK // enable HDDA by default on HLSL/GLSL, make explicit on C #if defined(PNANOVDB_C) //#define PNANOVDB_HDDA #ifdef PNANOVDB_HDDA #ifndef PNANOVDB_CMATH #define PNANOVDB_CMATH #endif #endif #elif defined(PNANOVDB_HLSL) #define PNANOVDB_HDDA #elif defined(PNANOVDB_GLSL) #define PNANOVDB_HDDA #endif #ifdef PNANOVDB_CMATH #ifndef __CUDACC_RTC__ #include <math.h> #endif #endif // ------------------------------------------------ Buffer ----------------------------------------------------------- #if defined(PNANOVDB_BUF_CUSTOM) // NOP #elif defined(PNANOVDB_C) #define PNANOVDB_BUF_C #elif defined(PNANOVDB_HLSL) #define PNANOVDB_BUF_HLSL #elif defined(PNANOVDB_GLSL) #define PNANOVDB_BUF_GLSL #endif #if defined(PNANOVDB_BUF_C) #ifndef __CUDACC_RTC__ #include <stdint.h> #endif #if defined(__CUDACC__) #define PNANOVDB_BUF_FORCE_INLINE static __host__ __device__ __forceinline__ #elif defined(_WIN32) #define PNANOVDB_BUF_FORCE_INLINE static inline __forceinline #else #define PNANOVDB_BUF_FORCE_INLINE static inline __attribute__((always_inline)) #endif typedef struct pnanovdb_buf_t { uint32_t* data; #ifdef PNANOVDB_BUF_BOUNDS_CHECK uint64_t size_in_words; #endif }pnanovdb_buf_t; PNANOVDB_BUF_FORCE_INLINE pnanovdb_buf_t pnanovdb_make_buf(uint32_t* data, uint64_t size_in_words) { pnanovdb_buf_t ret; ret.data = data; #ifdef PNANOVDB_BUF_BOUNDS_CHECK ret.size_in_words = size_in_words; #endif return ret; } #if defined(PNANOVDB_ADDRESS_32) PNANOVDB_BUF_FORCE_INLINE uint32_t pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint32_t byte_offset) { uint32_t wordaddress = (byte_offset >> 2u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK return wordaddress < buf.size_in_words ? buf.data[wordaddress] : 0u; #else return buf.data[wordaddress]; #endif } PNANOVDB_BUF_FORCE_INLINE uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint32_t byte_offset) { uint64_t* data64 = (uint64_t*)buf.data; uint32_t wordaddress64 = (byte_offset >> 3u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK uint64_t size_in_words64 = buf.size_in_words >> 1u; return wordaddress64 < size_in_words64 ? data64[wordaddress64] : 0llu; #else return data64[wordaddress64]; #endif } PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint32_t byte_offset, uint32_t value) { uint32_t wordaddress = (byte_offset >> 2u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK if (wordaddress < buf.size_in_words) { buf.data[wordaddress] = value; } #else buf.data[wordaddress] = value; #endif } PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint32_t byte_offset, uint64_t value) { uint64_t* data64 = (uint64_t*)buf.data; uint32_t wordaddress64 = (byte_offset >> 3u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK uint64_t size_in_words64 = buf.size_in_words >> 1u; if (wordaddress64 < size_in_words64) { data64[wordaddress64] = value; } #else data64[wordaddress64] = value; #endif } #elif defined(PNANOVDB_ADDRESS_64) PNANOVDB_BUF_FORCE_INLINE uint32_t pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint64_t byte_offset) { uint64_t wordaddress = (byte_offset >> 2u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK return wordaddress < buf.size_in_words ? buf.data[wordaddress] : 0u; #else return buf.data[wordaddress]; #endif } PNANOVDB_BUF_FORCE_INLINE uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint64_t byte_offset) { uint64_t* data64 = (uint64_t*)buf.data; uint64_t wordaddress64 = (byte_offset >> 3u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK uint64_t size_in_words64 = buf.size_in_words >> 1u; return wordaddress64 < size_in_words64 ? data64[wordaddress64] : 0llu; #else return data64[wordaddress64]; #endif } PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint64_t byte_offset, uint32_t value) { uint64_t wordaddress = (byte_offset >> 2u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK if (wordaddress < buf.size_in_words) { buf.data[wordaddress] = value; } #else buf.data[wordaddress] = value; #endif } PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint64_t byte_offset, uint64_t value) { uint64_t* data64 = (uint64_t*)buf.data; uint64_t wordaddress64 = (byte_offset >> 3u); #ifdef PNANOVDB_BUF_BOUNDS_CHECK uint64_t size_in_words64 = buf.size_in_words >> 1u; if (wordaddress64 < size_in_words64) { data64[wordaddress64] = value; } #else data64[wordaddress64] = value; #endif } #endif typedef uint32_t pnanovdb_grid_type_t; #define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn #elif defined(PNANOVDB_BUF_HLSL) #if defined(PNANOVDB_ADDRESS_32) #define pnanovdb_buf_t StructuredBuffer<uint> uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint byte_offset) { return buf[(byte_offset >> 2u)]; } uint2 pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint byte_offset) { uint2 ret; ret.x = pnanovdb_buf_read_uint32(buf, byte_offset + 0u); ret.y = pnanovdb_buf_read_uint32(buf, byte_offset + 4u); return ret; } void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint byte_offset, uint value) { // NOP, by default no write in HLSL } void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint byte_offset, uint2 value) { // NOP, by default no write in HLSL } #elif defined(PNANOVDB_ADDRESS_64) #define pnanovdb_buf_t StructuredBuffer<uint> uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint64_t byte_offset) { return buf[uint(byte_offset >> 2u)]; } uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint64_t byte_offset) { uint64_t ret; ret = pnanovdb_buf_read_uint32(buf, byte_offset + 0u); ret = ret + (uint64_t(pnanovdb_buf_read_uint32(buf, byte_offset + 4u)) << 32u); return ret; } void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint64_t byte_offset, uint value) { // NOP, by default no write in HLSL } void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint64_t byte_offset, uint64_t value) { // NOP, by default no write in HLSL } #endif #define pnanovdb_grid_type_t uint #define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn #elif defined(PNANOVDB_BUF_GLSL) struct pnanovdb_buf_t { uint unused; // to satisfy min struct size? }; uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint byte_offset) { return pnanovdb_buf_data[(byte_offset >> 2u)]; } uvec2 pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint byte_offset) { uvec2 ret; ret.x = pnanovdb_buf_read_uint32(buf, byte_offset + 0u); ret.y = pnanovdb_buf_read_uint32(buf, byte_offset + 4u); return ret; } void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint byte_offset, uint value) { // NOP, by default no write in HLSL } void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint byte_offset, uvec2 value) { // NOP, by default no write in HLSL } #define pnanovdb_grid_type_t uint #define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn #endif // ------------------------------------------------ Basic Types ----------------------------------------------------------- // force inline #if defined(PNANOVDB_C) #if defined(__CUDACC__) #define PNANOVDB_FORCE_INLINE static __host__ __device__ __forceinline__ #elif defined(_WIN32) #define PNANOVDB_FORCE_INLINE static inline __forceinline #else #define PNANOVDB_FORCE_INLINE static inline __attribute__((always_inline)) #endif #elif defined(PNANOVDB_HLSL) #define PNANOVDB_FORCE_INLINE #elif defined(PNANOVDB_GLSL) #define PNANOVDB_FORCE_INLINE #endif // struct typedef, static const, inout #if defined(PNANOVDB_C) #define PNANOVDB_STRUCT_TYPEDEF(X) typedef struct X X; #if defined(__CUDA_ARCH__) #define PNANOVDB_STATIC_CONST constexpr __constant__ #else #define PNANOVDB_STATIC_CONST static const #endif #define PNANOVDB_INOUT(X) X* #define PNANOVDB_IN(X) const X* #define PNANOVDB_DEREF(X) (*X) #define PNANOVDB_REF(X) &X #elif defined(PNANOVDB_HLSL) #define PNANOVDB_STRUCT_TYPEDEF(X) #define PNANOVDB_STATIC_CONST static const #define PNANOVDB_INOUT(X) inout X #define PNANOVDB_IN(X) X #define PNANOVDB_DEREF(X) X #define PNANOVDB_REF(X) X #elif defined(PNANOVDB_GLSL) #define PNANOVDB_STRUCT_TYPEDEF(X) #define PNANOVDB_STATIC_CONST const #define PNANOVDB_INOUT(X) inout X #define PNANOVDB_IN(X) X #define PNANOVDB_DEREF(X) X #define PNANOVDB_REF(X) X #endif // basic types, type conversion #if defined(PNANOVDB_C) #define PNANOVDB_NATIVE_64 #ifndef __CUDACC_RTC__ #include <stdint.h> #endif #if !defined(PNANOVDB_MEMCPY_CUSTOM) #ifndef __CUDACC_RTC__ #include <string.h> #endif #define pnanovdb_memcpy memcpy #endif typedef uint32_t pnanovdb_uint32_t; typedef int32_t pnanovdb_int32_t; typedef int32_t pnanovdb_bool_t; #define PNANOVDB_FALSE 0 #define PNANOVDB_TRUE 1 typedef uint64_t pnanovdb_uint64_t; typedef int64_t pnanovdb_int64_t; typedef struct pnanovdb_coord_t { pnanovdb_int32_t x, y, z; }pnanovdb_coord_t; typedef struct pnanovdb_vec3_t { float x, y, z; }pnanovdb_vec3_t; PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return (pnanovdb_int32_t)v; } PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return (pnanovdb_int64_t)v; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return (pnanovdb_uint64_t)v; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return (pnanovdb_uint32_t)v; } PNANOVDB_FORCE_INLINE float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { float vf; pnanovdb_memcpy(&vf, &v, sizeof(vf)); return vf; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return *((pnanovdb_uint32_t*)(&v)); } PNANOVDB_FORCE_INLINE double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { double vf; pnanovdb_memcpy(&vf, &v, sizeof(vf)); return vf; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { return *((pnanovdb_uint64_t*)(&v)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return (pnanovdb_uint32_t)v; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return (pnanovdb_uint32_t)(v >> 32u); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return ((pnanovdb_uint64_t)x) | (((pnanovdb_uint64_t)y) << 32u); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return ((pnanovdb_uint64_t)x); } PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return a == b; } PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a == 0; } #ifdef PNANOVDB_CMATH PNANOVDB_FORCE_INLINE float pnanovdb_floor(float v) { return floorf(v); } #endif PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return (pnanovdb_int32_t)v; } PNANOVDB_FORCE_INLINE float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return (float)v; } PNANOVDB_FORCE_INLINE float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return (float)v; } PNANOVDB_FORCE_INLINE float pnanovdb_min(float a, float b) { return a < b ? a : b; } PNANOVDB_FORCE_INLINE float pnanovdb_max(float a, float b) { return a > b ? a : b; } #elif defined(PNANOVDB_HLSL) typedef uint pnanovdb_uint32_t; typedef int pnanovdb_int32_t; typedef bool pnanovdb_bool_t; #define PNANOVDB_FALSE false #define PNANOVDB_TRUE true typedef int3 pnanovdb_coord_t; typedef float3 pnanovdb_vec3_t; pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return int(v); } pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return uint(v); } float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { return asfloat(v); } pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return asuint(v); } float pnanovdb_floor(float v) { return floor(v); } pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return int(v); } float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return float(v); } float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return float(v); } float pnanovdb_min(float a, float b) { return min(a, b); } float pnanovdb_max(float a, float b) { return max(a, b); } #if defined(PNANOVDB_ADDRESS_32) typedef uint2 pnanovdb_uint64_t; typedef int2 pnanovdb_int64_t; pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return int2(v); } pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uint2(v); } double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return asdouble(v.x, v.y); } pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { uint2 ret; asuint(v, ret.x, ret.y); return ret; } pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return v.x; } pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return v.y; } pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uint2(x, y); } pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uint2(x, 0); } bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return (a.x == b.x) && (a.y == b.y); } bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a.x == 0 && a.y == 0; } #else typedef uint64_t pnanovdb_uint64_t; typedef int64_t pnanovdb_int64_t; pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return int64_t(v); } pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uint64_t(v); } double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return asdouble(uint(v), uint(v >> 32u)); } pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { uint2 ret; asuint(v, ret.x, ret.y); return uint64_t(ret.x) + (uint64_t(ret.y) << 32u); } pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return uint(v); } pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return uint(v >> 32u); } pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uint64_t(x) + (uint64_t(y) << 32u); } pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uint64_t(x); } bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return a == b; } bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a == 0; } #endif #elif defined(PNANOVDB_GLSL) #define pnanovdb_uint32_t uint #define pnanovdb_int32_t int #define pnanovdb_bool_t bool #define PNANOVDB_FALSE false #define PNANOVDB_TRUE true #define pnanovdb_uint64_t uvec2 #define pnanovdb_int64_t ivec2 #define pnanovdb_coord_t ivec3 #define pnanovdb_vec3_t vec3 pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return int(v); } pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return ivec2(v); } pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uvec2(v); } pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return uint(v); } float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { return uintBitsToFloat(v); } pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return floatBitsToUint(v); } double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return packDouble2x32(uvec2(v.x, v.y)); } pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { return unpackDouble2x32(v); } pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return v.x; } pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return v.y; } pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uvec2(x, y); } pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uvec2(x, 0); } bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return (a.x == b.x) && (a.y == b.y); } bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a.x == 0 && a.y == 0; } float pnanovdb_floor(float v) { return floor(v); } pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return int(v); } float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return float(v); } float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return float(v); } float pnanovdb_min(float a, float b) { return min(a, b); } float pnanovdb_max(float a, float b) { return max(a, b); } #endif // ------------------------------------------------ Coord/Vec3 Utilties ----------------------------------------------------------- #if defined(PNANOVDB_C) PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) { pnanovdb_vec3_t v; v.x = a; v.y = a; v.z = a; return v; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_add(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) { pnanovdb_vec3_t v; v.x = a.x + b.x; v.y = a.y + b.y; v.z = a.z + b.z; return v; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_sub(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) { pnanovdb_vec3_t v; v.x = a.x - b.x; v.y = a.y - b.y; v.z = a.z - b.z; return v; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_mul(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) { pnanovdb_vec3_t v; v.x = a.x * b.x; v.y = a.y * b.y; v.z = a.z * b.z; return v; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_div(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) { pnanovdb_vec3_t v; v.x = a.x / b.x; v.y = a.y / b.y; v.z = a.z / b.z; return v; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_min(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) { pnanovdb_vec3_t v; v.x = a.x < b.x ? a.x : b.x; v.y = a.y < b.y ? a.y : b.y; v.z = a.z < b.z ? a.z : b.z; return v; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_max(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) { pnanovdb_vec3_t v; v.x = a.x > b.x ? a.x : b.x; v.y = a.y > b.y ? a.y : b.y; v.z = a.z > b.z ? a.z : b.z; return v; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_coord_to_vec3(const pnanovdb_coord_t coord) { pnanovdb_vec3_t v; v.x = pnanovdb_int32_to_float(coord.x); v.y = pnanovdb_int32_to_float(coord.y); v.z = pnanovdb_int32_to_float(coord.z); return v; } PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_coord_uniform(const pnanovdb_int32_t a) { pnanovdb_coord_t v; v.x = a; v.y = a; v.z = a; return v; } PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) { pnanovdb_coord_t v; v.x = a.x + b.x; v.y = a.y + b.y; v.z = a.z + b.z; return v; } #elif defined(PNANOVDB_HLSL) pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) { return float3(a, a, a); } pnanovdb_vec3_t pnanovdb_vec3_add(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a + b; } pnanovdb_vec3_t pnanovdb_vec3_sub(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a - b; } pnanovdb_vec3_t pnanovdb_vec3_mul(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a * b; } pnanovdb_vec3_t pnanovdb_vec3_div(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a / b; } pnanovdb_vec3_t pnanovdb_vec3_min(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return min(a, b); } pnanovdb_vec3_t pnanovdb_vec3_max(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return max(a, b); } pnanovdb_vec3_t pnanovdb_coord_to_vec3(pnanovdb_coord_t coord) { return float3(coord); } pnanovdb_coord_t pnanovdb_coord_uniform(pnanovdb_int32_t a) { return int3(a, a, a); } pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) { return a + b; } #elif defined(PNANOVDB_GLSL) pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) { return vec3(a, a, a); } pnanovdb_vec3_t pnanovdb_vec3_add(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a + b; } pnanovdb_vec3_t pnanovdb_vec3_sub(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a - b; } pnanovdb_vec3_t pnanovdb_vec3_mul(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a * b; } pnanovdb_vec3_t pnanovdb_vec3_div(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a / b; } pnanovdb_vec3_t pnanovdb_vec3_min(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return min(a, b); } pnanovdb_vec3_t pnanovdb_vec3_max(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return max(a, b); } pnanovdb_vec3_t pnanovdb_coord_to_vec3(const pnanovdb_coord_t coord) { return vec3(coord); } pnanovdb_coord_t pnanovdb_coord_uniform(pnanovdb_int32_t a) { return ivec3(a, a, a); } pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) { return a + b; } #endif // ------------------------------------------------ Uint64 Utils ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint32_countbits(pnanovdb_uint32_t value) { #if defined(PNANOVDB_C) #if defined(_MSC_VER) && (_MSC_VER >= 1928) && defined(PNANOVDB_USE_INTRINSICS) return __popcnt(value); #elif (defined(__GNUC__) || defined(__clang__)) && defined(PNANOVDB_USE_INTRINSICS) return __builtin_popcount(value); #else value = value - ((value >> 1) & 0x55555555); value = (value & 0x33333333) + ((value >> 2) & 0x33333333); value = (value + (value >> 4)) & 0x0F0F0F0F; return (value * 0x01010101) >> 24; #endif #elif defined(PNANOVDB_HLSL) return countbits(value); #elif defined(PNANOVDB_GLSL) return bitCount(value); #endif } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_countbits(pnanovdb_uint64_t value) { return pnanovdb_uint32_countbits(pnanovdb_uint64_low(value)) + pnanovdb_uint32_countbits(pnanovdb_uint64_high(value)); } #if defined(PNANOVDB_ADDRESS_32) PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_offset(pnanovdb_uint64_t a, pnanovdb_uint32_t b) { pnanovdb_uint32_t low = pnanovdb_uint64_low(a); pnanovdb_uint32_t high = pnanovdb_uint64_high(a); low += b; if (low < b) { high += 1u; } return pnanovdb_uint32_as_uint64(low, high); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_dec(pnanovdb_uint64_t a) { pnanovdb_uint32_t low = pnanovdb_uint64_low(a); pnanovdb_uint32_t high = pnanovdb_uint64_high(a); if (low == 0u) { high -= 1u; } low -= 1u; return pnanovdb_uint32_as_uint64(low, high); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_to_uint32_lsr(pnanovdb_uint64_t a, pnanovdb_uint32_t b) { pnanovdb_uint32_t low = pnanovdb_uint64_low(a); pnanovdb_uint32_t high = pnanovdb_uint64_high(a); return (b >= 32u) ? (high >> (b - 32)) : ((low >> b) | ((b > 0) ? (high << (32u - b)) : 0u)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_bit_mask(pnanovdb_uint32_t bit_idx) { pnanovdb_uint32_t mask_low = bit_idx < 32u ? 1u << bit_idx : 0u; pnanovdb_uint32_t mask_high = bit_idx >= 32u ? 1u << (bit_idx - 32u) : 0u; return pnanovdb_uint32_as_uint64(mask_low, mask_high); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_and(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return pnanovdb_uint32_as_uint64( pnanovdb_uint64_low(a) & pnanovdb_uint64_low(b), pnanovdb_uint64_high(a) & pnanovdb_uint64_high(b) ); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_uint64_any_bit(pnanovdb_uint64_t a) { return pnanovdb_uint64_low(a) != 0u || pnanovdb_uint64_high(a) != 0u; } #else PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_offset(pnanovdb_uint64_t a, pnanovdb_uint32_t b) { return a + b; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_dec(pnanovdb_uint64_t a) { return a - 1u; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_to_uint32_lsr(pnanovdb_uint64_t a, pnanovdb_uint32_t b) { return pnanovdb_uint64_low(a >> b); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_bit_mask(pnanovdb_uint32_t bit_idx) { return 1llu << bit_idx; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_and(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return a & b; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_uint64_any_bit(pnanovdb_uint64_t a) { return a != 0llu; } #endif // ------------------------------------------------ Address Type ----------------------------------------------------------- #if defined(PNANOVDB_ADDRESS_32) struct pnanovdb_address_t { pnanovdb_uint32_t byte_offset; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_address_t) PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) { pnanovdb_address_t ret = address; ret.byte_offset += byte_offset; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_neg(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) { pnanovdb_address_t ret = address; ret.byte_offset -= byte_offset; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_product(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset, pnanovdb_uint32_t multiplier) { pnanovdb_address_t ret = address; ret.byte_offset += byte_offset * multiplier; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset) { pnanovdb_address_t ret = address; // lose high bits on 32-bit ret.byte_offset += pnanovdb_uint64_low(byte_offset); return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64_product(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset, pnanovdb_uint32_t multiplier) { pnanovdb_address_t ret = address; ret.byte_offset += pnanovdb_uint64_low(byte_offset) * multiplier; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_address_mask(pnanovdb_address_t address, pnanovdb_uint32_t mask) { return address.byte_offset & mask; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_mask_inv(pnanovdb_address_t address, pnanovdb_uint32_t mask) { pnanovdb_address_t ret = address; ret.byte_offset &= (~mask); return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_null() { pnanovdb_address_t ret = { 0 }; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_is_null(pnanovdb_address_t address) { return address.byte_offset == 0u; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_in_interval(pnanovdb_address_t address, pnanovdb_address_t min_address, pnanovdb_address_t max_address) { return address.byte_offset >= min_address.byte_offset && address.byte_offset < max_address.byte_offset; } #elif defined(PNANOVDB_ADDRESS_64) struct pnanovdb_address_t { pnanovdb_uint64_t byte_offset; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_address_t) PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) { pnanovdb_address_t ret = address; ret.byte_offset += byte_offset; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_neg(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) { pnanovdb_address_t ret = address; ret.byte_offset -= byte_offset; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_product(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset, pnanovdb_uint32_t multiplier) { pnanovdb_address_t ret = address; ret.byte_offset += pnanovdb_uint32_as_uint64_low(byte_offset) * pnanovdb_uint32_as_uint64_low(multiplier); return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset) { pnanovdb_address_t ret = address; ret.byte_offset += byte_offset; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64_product(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset, pnanovdb_uint32_t multiplier) { pnanovdb_address_t ret = address; ret.byte_offset += byte_offset * pnanovdb_uint32_as_uint64_low(multiplier); return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_address_mask(pnanovdb_address_t address, pnanovdb_uint32_t mask) { return pnanovdb_uint64_low(address.byte_offset) & mask; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_mask_inv(pnanovdb_address_t address, pnanovdb_uint32_t mask) { pnanovdb_address_t ret = address; ret.byte_offset &= (~pnanovdb_uint32_as_uint64_low(mask)); return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_null() { pnanovdb_address_t ret = { 0 }; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_is_null(pnanovdb_address_t address) { return address.byte_offset == 0llu; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_in_interval(pnanovdb_address_t address, pnanovdb_address_t min_address, pnanovdb_address_t max_address) { return address.byte_offset >= min_address.byte_offset && address.byte_offset < max_address.byte_offset; } #endif // ------------------------------------------------ High Level Buffer Read ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_read_uint32(pnanovdb_buf_t buf, pnanovdb_address_t address) { return pnanovdb_buf_read_uint32(buf, address.byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_read_uint64(pnanovdb_buf_t buf, pnanovdb_address_t address) { return pnanovdb_buf_read_uint64(buf, address.byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_read_int32(pnanovdb_buf_t buf, pnanovdb_address_t address) { return pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, address)); } PNANOVDB_FORCE_INLINE float pnanovdb_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address) { return pnanovdb_uint32_as_float(pnanovdb_read_uint32(buf, address)); } PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_read_int64(pnanovdb_buf_t buf, pnanovdb_address_t address) { return pnanovdb_uint64_as_int64(pnanovdb_read_uint64(buf, address)); } PNANOVDB_FORCE_INLINE double pnanovdb_read_double(pnanovdb_buf_t buf, pnanovdb_address_t address) { return pnanovdb_uint64_as_double(pnanovdb_read_uint64(buf, address)); } PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_read_coord(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_coord_t ret; ret.x = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 0u))); ret.y = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 4u))); ret.z = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 8u))); return ret; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_read_vec3(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_vec3_t ret; ret.x = pnanovdb_read_float(buf, pnanovdb_address_offset(address, 0u)); ret.y = pnanovdb_read_float(buf, pnanovdb_address_offset(address, 4u)); ret.z = pnanovdb_read_float(buf, pnanovdb_address_offset(address, 8u)); return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_read_uint16(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, pnanovdb_address_mask_inv(address, 3u)); return (raw >> (pnanovdb_address_mask(address, 2) << 3)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_read_uint8(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, pnanovdb_address_mask_inv(address, 3u)); return (raw >> (pnanovdb_address_mask(address, 3) << 3)) & 255; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_read_vec3u16(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_vec3_t ret; const float scale = 1.f / 65535.f; ret.x = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint16(buf, pnanovdb_address_offset(address, 0u))) - 0.5f; ret.y = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint16(buf, pnanovdb_address_offset(address, 2u))) - 0.5f; ret.z = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint16(buf, pnanovdb_address_offset(address, 4u))) - 0.5f; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_read_vec3u8(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_vec3_t ret; const float scale = 1.f / 255.f; ret.x = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint8(buf, pnanovdb_address_offset(address, 0u))) - 0.5f; ret.y = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint8(buf, pnanovdb_address_offset(address, 1u))) - 0.5f; ret.z = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint8(buf, pnanovdb_address_offset(address, 2u))) - 0.5f; return ret; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_read_bit(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint32_t bit_offset) { pnanovdb_address_t word_address = pnanovdb_address_mask_inv(address, 3u); pnanovdb_uint32_t bit_index = (pnanovdb_address_mask(address, 3u) << 3u) + bit_offset; pnanovdb_uint32_t value_word = pnanovdb_buf_read_uint32(buf, word_address.byte_offset); return ((value_word >> bit_index) & 1) != 0u; } #if defined(PNANOVDB_C) PNANOVDB_FORCE_INLINE short pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address); return (short)(raw >> (pnanovdb_address_mask(address, 2) << 3)); } #elif defined(PNANOVDB_HLSL) PNANOVDB_FORCE_INLINE float pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address); return f16tof32(raw >> (pnanovdb_address_mask(address, 2) << 3)); } #elif defined(PNANOVDB_GLSL) PNANOVDB_FORCE_INLINE float pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address) { pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address); return unpackHalf2x16(raw >> (pnanovdb_address_mask(address, 2) << 3)).x; } #endif // ------------------------------------------------ High Level Buffer Write ----------------------------------------------------------- PNANOVDB_FORCE_INLINE void pnanovdb_write_uint32(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint32_t value) { pnanovdb_buf_write_uint32(buf, address.byte_offset, value); } PNANOVDB_FORCE_INLINE void pnanovdb_write_uint64(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint64_t value) { pnanovdb_buf_write_uint64(buf, address.byte_offset, value); } PNANOVDB_FORCE_INLINE void pnanovdb_write_int32(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_int32_t value) { pnanovdb_write_uint32(buf, address, pnanovdb_int32_as_uint32(value)); } PNANOVDB_FORCE_INLINE void pnanovdb_write_int64(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_int64_t value) { pnanovdb_buf_write_uint64(buf, address.byte_offset, pnanovdb_int64_as_uint64(value)); } PNANOVDB_FORCE_INLINE void pnanovdb_write_float(pnanovdb_buf_t buf, pnanovdb_address_t address, float value) { pnanovdb_write_uint32(buf, address, pnanovdb_float_as_uint32(value)); } PNANOVDB_FORCE_INLINE void pnanovdb_write_double(pnanovdb_buf_t buf, pnanovdb_address_t address, double value) { pnanovdb_write_uint64(buf, address, pnanovdb_double_as_uint64(value)); } PNANOVDB_FORCE_INLINE void pnanovdb_write_coord(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) value) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 0u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).x)); pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 4u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).y)); pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 8u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).z)); } PNANOVDB_FORCE_INLINE void pnanovdb_write_vec3(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_vec3_t) value) { pnanovdb_write_float(buf, pnanovdb_address_offset(address, 0u), PNANOVDB_DEREF(value).x); pnanovdb_write_float(buf, pnanovdb_address_offset(address, 4u), PNANOVDB_DEREF(value).y); pnanovdb_write_float(buf, pnanovdb_address_offset(address, 8u), PNANOVDB_DEREF(value).z); } // ------------------------------------------------ Core Structures ----------------------------------------------------------- #define PNANOVDB_MAGIC_NUMBER 0x304244566f6e614eUL// "NanoVDB0" in hex - little endian (uint64_t) #define PNANOVDB_MAGIC_GRID 0x314244566f6e614eUL// "NanoVDB1" in hex - little endian (uint64_t) #define PNANOVDB_MAGIC_FILE 0x324244566f6e614eUL// "NanoVDB2" in hex - little endian (uint64_t) #define PNANOVDB_MAJOR_VERSION_NUMBER 32// reflects changes to the ABI #define PNANOVDB_MINOR_VERSION_NUMBER 7// reflects changes to the API but not ABI #define PNANOVDB_PATCH_VERSION_NUMBER 0// reflects bug-fixes with no ABI or API changes #define PNANOVDB_GRID_TYPE_UNKNOWN 0 #define PNANOVDB_GRID_TYPE_FLOAT 1 #define PNANOVDB_GRID_TYPE_DOUBLE 2 #define PNANOVDB_GRID_TYPE_INT16 3 #define PNANOVDB_GRID_TYPE_INT32 4 #define PNANOVDB_GRID_TYPE_INT64 5 #define PNANOVDB_GRID_TYPE_VEC3F 6 #define PNANOVDB_GRID_TYPE_VEC3D 7 #define PNANOVDB_GRID_TYPE_MASK 8 #define PNANOVDB_GRID_TYPE_HALF 9 #define PNANOVDB_GRID_TYPE_UINT32 10 #define PNANOVDB_GRID_TYPE_BOOLEAN 11 #define PNANOVDB_GRID_TYPE_RGBA8 12 #define PNANOVDB_GRID_TYPE_FP4 13 #define PNANOVDB_GRID_TYPE_FP8 14 #define PNANOVDB_GRID_TYPE_FP16 15 #define PNANOVDB_GRID_TYPE_FPN 16 #define PNANOVDB_GRID_TYPE_VEC4F 17 #define PNANOVDB_GRID_TYPE_VEC4D 18 #define PNANOVDB_GRID_TYPE_INDEX 19 #define PNANOVDB_GRID_TYPE_ONINDEX 20 #define PNANOVDB_GRID_TYPE_INDEXMASK 21 #define PNANOVDB_GRID_TYPE_ONINDEXMASK 22 #define PNANOVDB_GRID_TYPE_POINTINDEX 23 #define PNANOVDB_GRID_TYPE_VEC3U8 24 #define PNANOVDB_GRID_TYPE_VEC3U16 25 #define PNANOVDB_GRID_TYPE_UINT8 26 #define PNANOVDB_GRID_TYPE_END 27 #define PNANOVDB_GRID_CLASS_UNKNOWN 0 #define PNANOVDB_GRID_CLASS_LEVEL_SET 1 // narrow band level set, e.g. SDF #define PNANOVDB_GRID_CLASS_FOG_VOLUME 2 // fog volume, e.g. density #define PNANOVDB_GRID_CLASS_STAGGERED 3 // staggered MAC grid, e.g. velocity #define PNANOVDB_GRID_CLASS_POINT_INDEX 4 // point index grid #define PNANOVDB_GRID_CLASS_POINT_DATA 5 // point data grid #define PNANOVDB_GRID_CLASS_TOPOLOGY 6 // grid with active states only (no values) #define PNANOVDB_GRID_CLASS_VOXEL_VOLUME 7 // volume of geometric cubes, e.g. minecraft #define PNANOVDB_GRID_CLASS_INDEX_GRID 8 // grid whose values are offsets, e.g. into an external array #define PNANOVDB_GRID_CLASS_TENSOR_GRID 9 // grid which can have extra metadata and features #define PNANOVDB_GRID_CLASS_END 10 #define PNANOVDB_GRID_FLAGS_HAS_LONG_GRID_NAME (1 << 0) #define PNANOVDB_GRID_FLAGS_HAS_BBOX (1 << 1) #define PNANOVDB_GRID_FLAGS_HAS_MIN_MAX (1 << 2) #define PNANOVDB_GRID_FLAGS_HAS_AVERAGE (1 << 3) #define PNANOVDB_GRID_FLAGS_HAS_STD_DEVIATION (1 << 4) #define PNANOVDB_GRID_FLAGS_IS_BREADTH_FIRST (1 << 5) #define PNANOVDB_GRID_FLAGS_END (1 << 6) #define PNANOVDB_LEAF_TYPE_DEFAULT 0 #define PNANOVDB_LEAF_TYPE_LITE 1 #define PNANOVDB_LEAF_TYPE_FP 2 #define PNANOVDB_LEAF_TYPE_INDEX 3 #define PNANOVDB_LEAF_TYPE_INDEXMASK 4 #define PNANOVDB_LEAF_TYPE_POINTINDEX 5 // BuildType = Unknown, float, double, int16_t, int32_t, int64_t, Vec3f, Vec3d, Mask, ... // bit count of values in leaf nodes, i.e. 8*sizeof(*nanovdb::LeafNode<BuildType>::mValues) or zero if no values are stored PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_value_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 96, 192, 0, 16, 32, 1, 32, 4, 8, 16, 0, 128, 256, 0, 0, 0, 0, 16, 24, 48, 8 }; // bit count of the Tile union in InternalNodes, i.e. 8*sizeof(nanovdb::InternalData::Tile) PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_table_strides_bits[PNANOVDB_GRID_TYPE_END] = { 64, 64, 64, 64, 64, 64, 128, 192, 64, 64, 64, 64, 64, 64, 64, 64, 64, 128, 256, 64, 64, 64, 64, 64, 64, 64, 64 }; // bit count of min/max values, i.e. 8*sizeof(nanovdb::LeafData::mMinimum) or zero if no min/max exists PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_minmax_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 96, 192, 8, 16, 32, 8, 32, 32, 32, 32, 32, 128, 256, 64, 64, 64, 64, 64, 24, 48, 8 }; // bit alignment of the value type, controlled by the smallest native type, which is why it is always 0, 8, 16, 32, or 64, e.g. for Vec3f it is 32 PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_minmax_aligns_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 32, 64, 8, 16, 32, 8, 32, 32, 32, 32, 32, 32, 64, 64, 64, 64, 64, 64, 8, 16, 8 }; // bit alignment of the stats (avg/std-dev) types, e.g. 8*sizeof(nanovdb::LeafData::mAverage) PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_stat_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 32, 32, 64, 32, 64, 8, 32, 32, 8, 32, 32, 32, 32, 32, 32, 64, 64, 64, 64, 64, 64, 32, 32, 32 }; // one of the 4 leaf types defined above, e.g. PNANOVDB_LEAF_TYPE_INDEX = 3 PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_leaf_type[PNANOVDB_GRID_TYPE_END] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 2, 2, 2, 2, 0, 0, 3, 3, 4, 4, 5, 0, 0, 0 }; struct pnanovdb_map_t { float matf[9]; float invmatf[9]; float vecf[3]; float taperf; double matd[9]; double invmatd[9]; double vecd[3]; double taperd; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_map_t) struct pnanovdb_map_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_map_handle_t) #define PNANOVDB_MAP_SIZE 264 #define PNANOVDB_MAP_OFF_MATF 0 #define PNANOVDB_MAP_OFF_INVMATF 36 #define PNANOVDB_MAP_OFF_VECF 72 #define PNANOVDB_MAP_OFF_TAPERF 84 #define PNANOVDB_MAP_OFF_MATD 88 #define PNANOVDB_MAP_OFF_INVMATD 160 #define PNANOVDB_MAP_OFF_VECD 232 #define PNANOVDB_MAP_OFF_TAPERD 256 PNANOVDB_FORCE_INLINE float pnanovdb_map_get_matf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATF + 4u * index)); } PNANOVDB_FORCE_INLINE float pnanovdb_map_get_invmatf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATF + 4u * index)); } PNANOVDB_FORCE_INLINE float pnanovdb_map_get_vecf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECF + 4u * index)); } PNANOVDB_FORCE_INLINE float pnanovdb_map_get_taperf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERF)); } PNANOVDB_FORCE_INLINE double pnanovdb_map_get_matd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATD + 8u * index)); } PNANOVDB_FORCE_INLINE double pnanovdb_map_get_invmatd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATD + 8u * index)); } PNANOVDB_FORCE_INLINE double pnanovdb_map_get_vecd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECD + 8u * index)); } PNANOVDB_FORCE_INLINE double pnanovdb_map_get_taperd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERD)); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_matf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float matf) { pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATF + 4u * index), matf); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_invmatf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float invmatf) { pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATF + 4u * index), invmatf); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_vecf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float vecf) { pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECF + 4u * index), vecf); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_taperf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float taperf) { pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERF), taperf); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_matd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double matd) { pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATD + 8u * index), matd); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_invmatd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double invmatd) { pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATD + 8u * index), invmatd); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_vecd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double vecd) { pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECD + 8u * index), vecd); } PNANOVDB_FORCE_INLINE void pnanovdb_map_set_taperd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double taperd) { pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERD), taperd); } struct pnanovdb_grid_t { pnanovdb_uint64_t magic; // 8 bytes, 0 pnanovdb_uint64_t checksum; // 8 bytes, 8 pnanovdb_uint32_t version; // 4 bytes, 16 pnanovdb_uint32_t flags; // 4 bytes, 20 pnanovdb_uint32_t grid_index; // 4 bytes, 24 pnanovdb_uint32_t grid_count; // 4 bytes, 28 pnanovdb_uint64_t grid_size; // 8 bytes, 32 pnanovdb_uint32_t grid_name[256 / 4]; // 256 bytes, 40 pnanovdb_map_t map; // 264 bytes, 296 double world_bbox[6]; // 48 bytes, 560 double voxel_size[3]; // 24 bytes, 608 pnanovdb_uint32_t grid_class; // 4 bytes, 632 pnanovdb_uint32_t grid_type; // 4 bytes, 636 pnanovdb_int64_t blind_metadata_offset; // 8 bytes, 640 pnanovdb_uint32_t blind_metadata_count; // 4 bytes, 648 pnanovdb_uint32_t pad[5]; // 20 bytes, 652 }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_t) struct pnanovdb_grid_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_handle_t) #define PNANOVDB_GRID_SIZE 672 #define PNANOVDB_GRID_OFF_MAGIC 0 #define PNANOVDB_GRID_OFF_CHECKSUM 8 #define PNANOVDB_GRID_OFF_VERSION 16 #define PNANOVDB_GRID_OFF_FLAGS 20 #define PNANOVDB_GRID_OFF_GRID_INDEX 24 #define PNANOVDB_GRID_OFF_GRID_COUNT 28 #define PNANOVDB_GRID_OFF_GRID_SIZE 32 #define PNANOVDB_GRID_OFF_GRID_NAME 40 #define PNANOVDB_GRID_OFF_MAP 296 #define PNANOVDB_GRID_OFF_WORLD_BBOX 560 #define PNANOVDB_GRID_OFF_VOXEL_SIZE 608 #define PNANOVDB_GRID_OFF_GRID_CLASS 632 #define PNANOVDB_GRID_OFF_GRID_TYPE 636 #define PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET 640 #define PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT 648 PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_magic(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAGIC)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_checksum(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_CHECKSUM)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_version(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VERSION)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_flags(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_FLAGS)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_index(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_INDEX)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_COUNT)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_grid_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_SIZE)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_name(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_NAME + 4u * index)); } PNANOVDB_FORCE_INLINE pnanovdb_map_handle_t pnanovdb_grid_get_map(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { pnanovdb_map_handle_t ret; ret.address = pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAP); return ret; } PNANOVDB_FORCE_INLINE double pnanovdb_grid_get_world_bbox(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_WORLD_BBOX + 8u * index)); } PNANOVDB_FORCE_INLINE double pnanovdb_grid_get_voxel_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VOXEL_SIZE + 8u * index)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_class(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_CLASS)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_type(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_TYPE)); } PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_grid_get_blind_metadata_offset(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_blind_metadata_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT)); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_magic(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t magic) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAGIC), magic); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_checksum(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t checksum) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_CHECKSUM), checksum); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_version(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t version) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VERSION), version); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_flags(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t flags) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_FLAGS), flags); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_index(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_index) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_INDEX), grid_index); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_count) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_COUNT), grid_count); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t grid_size) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_SIZE), grid_size); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_name(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, pnanovdb_uint32_t grid_name) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_NAME + 4u * index), grid_name); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_world_bbox(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, double world_bbox) { pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_WORLD_BBOX + 8u * index), world_bbox); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_voxel_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, double voxel_size) { pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VOXEL_SIZE + 8u * index), voxel_size); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_class(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_class) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_CLASS), grid_class); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_type(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_type) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_TYPE), grid_type); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_blind_metadata_offset(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t blind_metadata_offset) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET), blind_metadata_offset); } PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_blind_metadata_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t metadata_count) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT), metadata_count); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_make_version(pnanovdb_uint32_t major, pnanovdb_uint32_t minor, pnanovdb_uint32_t patch_num) { return (major << 21u) | (minor << 10u) | patch_num; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_major(pnanovdb_uint32_t version) { return (version >> 21u) & ((1u << 11u) - 1u); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_minor(pnanovdb_uint32_t version) { return (version >> 10u) & ((1u << 11u) - 1u); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_patch(pnanovdb_uint32_t version) { return version & ((1u << 10u) - 1u); } struct pnanovdb_gridblindmetadata_t { pnanovdb_int64_t data_offset; // 8 bytes, 0 pnanovdb_uint64_t value_count; // 8 bytes, 8 pnanovdb_uint32_t value_size; // 4 bytes, 16 pnanovdb_uint32_t semantic; // 4 bytes, 20 pnanovdb_uint32_t data_class; // 4 bytes, 24 pnanovdb_uint32_t data_type; // 4 bytes, 28 pnanovdb_uint32_t name[256 / 4]; // 256 bytes, 32 }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_gridblindmetadata_t) struct pnanovdb_gridblindmetadata_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_gridblindmetadata_handle_t) #define PNANOVDB_GRIDBLINDMETADATA_SIZE 288 #define PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_OFFSET 0 #define PNANOVDB_GRIDBLINDMETADATA_OFF_VALUE_COUNT 8 #define PNANOVDB_GRIDBLINDMETADATA_OFF_VALUE_SIZE 16 #define PNANOVDB_GRIDBLINDMETADATA_OFF_SEMANTIC 20 #define PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_CLASS 24 #define PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_TYPE 28 #define PNANOVDB_GRIDBLINDMETADATA_OFF_NAME 32 PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_gridblindmetadata_get_data_offset(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_OFFSET)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_gridblindmetadata_get_value_count(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_VALUE_COUNT)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_value_size(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_VALUE_SIZE)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_semantic(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_SEMANTIC)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_data_class(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_CLASS)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_data_type(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_TYPE)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_name(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p, pnanovdb_uint32_t index) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_NAME + 4u * index)); } struct pnanovdb_tree_t { pnanovdb_uint64_t node_offset_leaf; pnanovdb_uint64_t node_offset_lower; pnanovdb_uint64_t node_offset_upper; pnanovdb_uint64_t node_offset_root; pnanovdb_uint32_t node_count_leaf; pnanovdb_uint32_t node_count_lower; pnanovdb_uint32_t node_count_upper; pnanovdb_uint32_t tile_count_leaf; pnanovdb_uint32_t tile_count_lower; pnanovdb_uint32_t tile_count_upper; pnanovdb_uint64_t voxel_count; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_tree_t) struct pnanovdb_tree_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_tree_handle_t) #define PNANOVDB_TREE_SIZE 64 #define PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF 0 #define PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER 8 #define PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER 16 #define PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT 24 #define PNANOVDB_TREE_OFF_NODE_COUNT_LEAF 32 #define PNANOVDB_TREE_OFF_NODE_COUNT_LOWER 36 #define PNANOVDB_TREE_OFF_NODE_COUNT_UPPER 40 #define PNANOVDB_TREE_OFF_TILE_COUNT_LEAF 44 #define PNANOVDB_TREE_OFF_TILE_COUNT_LOWER 48 #define PNANOVDB_TREE_OFF_TILE_COUNT_UPPER 52 #define PNANOVDB_TREE_OFF_VOXEL_COUNT 56 PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LEAF)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LOWER)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_UPPER)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LEAF)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LOWER)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_UPPER)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_voxel_count(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_VOXEL_COUNT)); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_leaf) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF), node_offset_leaf); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_lower) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER), node_offset_lower); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_upper) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER), node_offset_upper); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_root) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT), node_offset_root); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_leaf) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LEAF), node_count_leaf); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_lower) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LOWER), node_count_lower); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_upper) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_UPPER), node_count_upper); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_leaf) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LEAF), tile_count_leaf); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_lower) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LOWER), tile_count_lower); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_upper) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_UPPER), tile_count_upper); } PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_voxel_count(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t voxel_count) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_VOXEL_COUNT), voxel_count); } struct pnanovdb_root_t { pnanovdb_coord_t bbox_min; pnanovdb_coord_t bbox_max; pnanovdb_uint32_t table_size; pnanovdb_uint32_t pad1; // background can start here // background, min, max }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_t) struct pnanovdb_root_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_handle_t) #define PNANOVDB_ROOT_BASE_SIZE 28 #define PNANOVDB_ROOT_OFF_BBOX_MIN 0 #define PNANOVDB_ROOT_OFF_BBOX_MAX 12 #define PNANOVDB_ROOT_OFF_TABLE_SIZE 24 PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_root_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) { return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MIN)); } PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_root_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) { return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MAX)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_get_tile_count(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_TABLE_SIZE)); } PNANOVDB_FORCE_INLINE void pnanovdb_root_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MIN), bbox_min); } PNANOVDB_FORCE_INLINE void pnanovdb_root_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) { pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MAX), bbox_max); } PNANOVDB_FORCE_INLINE void pnanovdb_root_set_tile_count(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, pnanovdb_uint32_t tile_count) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_TABLE_SIZE), tile_count); } struct pnanovdb_root_tile_t { pnanovdb_uint64_t key; pnanovdb_int64_t child; // signed byte offset from root to the child node, 0 means it is a constant tile, so use value pnanovdb_uint32_t state; pnanovdb_uint32_t pad1; // value can start here // value }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_tile_t) struct pnanovdb_root_tile_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_tile_handle_t) #define PNANOVDB_ROOT_TILE_BASE_SIZE 20 #define PNANOVDB_ROOT_TILE_OFF_KEY 0 #define PNANOVDB_ROOT_TILE_OFF_CHILD 8 #define PNANOVDB_ROOT_TILE_OFF_STATE 16 PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_tile_get_key(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_KEY)); } PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_root_tile_get_child(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) { return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_CHILD)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_tile_get_state(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_STATE)); } PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_key(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_uint64_t key) { pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_KEY), key); } PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_child(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_int64_t child) { pnanovdb_write_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_CHILD), child); } PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_state(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_uint32_t state) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_STATE), state); } struct pnanovdb_upper_t { pnanovdb_coord_t bbox_min; pnanovdb_coord_t bbox_max; pnanovdb_uint64_t flags; pnanovdb_uint32_t value_mask[1024]; pnanovdb_uint32_t child_mask[1024]; // min, max // alignas(32) pnanovdb_uint32_t table[]; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_upper_t) struct pnanovdb_upper_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_upper_handle_t) #define PNANOVDB_UPPER_TABLE_COUNT 32768 #define PNANOVDB_UPPER_BASE_SIZE 8224 #define PNANOVDB_UPPER_OFF_BBOX_MIN 0 #define PNANOVDB_UPPER_OFF_BBOX_MAX 12 #define PNANOVDB_UPPER_OFF_FLAGS 24 #define PNANOVDB_UPPER_OFF_VALUE_MASK 32 #define PNANOVDB_UPPER_OFF_CHILD_MASK 4128 PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_upper_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) { return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MIN)); } PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_upper_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) { return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MAX)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_upper_get_flags(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_FLAGS)); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_get_value_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index) { pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_VALUE_MASK + 4u * (bit_index >> 5u))); return ((value >> (bit_index & 31u)) & 1) != 0u; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_get_child_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index) { pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_CHILD_MASK + 4u * (bit_index >> 5u))); return ((value >> (bit_index & 31u)) & 1) != 0u; } PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MIN), bbox_min); } PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) { pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MAX), bbox_max); } PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_child_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index, pnanovdb_bool_t value) { pnanovdb_address_t addr = pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_CHILD_MASK + 4u * (bit_index >> 5u)); pnanovdb_uint32_t valueMask = pnanovdb_read_uint32(buf, addr); if (!value) { valueMask &= ~(1u << (bit_index & 31u)); } if (value) valueMask |= (1u << (bit_index & 31u)); pnanovdb_write_uint32(buf, addr, valueMask); } struct pnanovdb_lower_t { pnanovdb_coord_t bbox_min; pnanovdb_coord_t bbox_max; pnanovdb_uint64_t flags; pnanovdb_uint32_t value_mask[128]; pnanovdb_uint32_t child_mask[128]; // min, max // alignas(32) pnanovdb_uint32_t table[]; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_lower_t) struct pnanovdb_lower_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_lower_handle_t) #define PNANOVDB_LOWER_TABLE_COUNT 4096 #define PNANOVDB_LOWER_BASE_SIZE 1056 #define PNANOVDB_LOWER_OFF_BBOX_MIN 0 #define PNANOVDB_LOWER_OFF_BBOX_MAX 12 #define PNANOVDB_LOWER_OFF_FLAGS 24 #define PNANOVDB_LOWER_OFF_VALUE_MASK 32 #define PNANOVDB_LOWER_OFF_CHILD_MASK 544 PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_lower_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) { return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MIN)); } PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_lower_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) { return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MAX)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_lower_get_flags(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) { return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_FLAGS)); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_get_value_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index) { pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_VALUE_MASK + 4u * (bit_index >> 5u))); return ((value >> (bit_index & 31u)) & 1) != 0u; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_get_child_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index) { pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_CHILD_MASK + 4u * (bit_index >> 5u))); return ((value >> (bit_index & 31u)) & 1) != 0u; } PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MIN), bbox_min); } PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) { pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MAX), bbox_max); } PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_child_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index, pnanovdb_bool_t value) { pnanovdb_address_t addr = pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_CHILD_MASK + 4u * (bit_index >> 5u)); pnanovdb_uint32_t valueMask = pnanovdb_read_uint32(buf, addr); if (!value) { valueMask &= ~(1u << (bit_index & 31u)); } if (value) valueMask |= (1u << (bit_index & 31u)); pnanovdb_write_uint32(buf, addr, valueMask); } struct pnanovdb_leaf_t { pnanovdb_coord_t bbox_min; pnanovdb_uint32_t bbox_dif_and_flags; pnanovdb_uint32_t value_mask[16]; // min, max // alignas(32) pnanovdb_uint32_t values[]; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_leaf_t) struct pnanovdb_leaf_handle_t { pnanovdb_address_t address; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_leaf_handle_t) #define PNANOVDB_LEAF_TABLE_COUNT 512 #define PNANOVDB_LEAF_BASE_SIZE 80 #define PNANOVDB_LEAF_OFF_BBOX_MIN 0 #define PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS 12 #define PNANOVDB_LEAF_OFF_VALUE_MASK 16 #define PNANOVDB_LEAF_TABLE_NEG_OFF_BBOX_DIF_AND_FLAGS 84 #define PNANOVDB_LEAF_TABLE_NEG_OFF_MINIMUM 16 #define PNANOVDB_LEAF_TABLE_NEG_OFF_QUANTUM 12 PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_leaf_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p) { return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_MIN)); } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_get_bbox_dif_and_flags(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p) { return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS)); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_get_value_mask(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, pnanovdb_uint32_t bit_index) { pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 4u * (bit_index >> 5u))); return ((value >> (bit_index & 31u)) & 1) != 0u; } PNANOVDB_FORCE_INLINE void pnanovdb_leaf_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_MIN), bbox_min); } PNANOVDB_FORCE_INLINE void pnanovdb_leaf_set_bbox_dif_and_flags(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, pnanovdb_uint32_t bbox_dif_and_flags) { pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS), bbox_dif_and_flags); } struct pnanovdb_grid_type_constants_t { pnanovdb_uint32_t root_off_background; pnanovdb_uint32_t root_off_min; pnanovdb_uint32_t root_off_max; pnanovdb_uint32_t root_off_ave; pnanovdb_uint32_t root_off_stddev; pnanovdb_uint32_t root_size; pnanovdb_uint32_t value_stride_bits; pnanovdb_uint32_t table_stride; pnanovdb_uint32_t root_tile_off_value; pnanovdb_uint32_t root_tile_size; pnanovdb_uint32_t upper_off_min; pnanovdb_uint32_t upper_off_max; pnanovdb_uint32_t upper_off_ave; pnanovdb_uint32_t upper_off_stddev; pnanovdb_uint32_t upper_off_table; pnanovdb_uint32_t upper_size; pnanovdb_uint32_t lower_off_min; pnanovdb_uint32_t lower_off_max; pnanovdb_uint32_t lower_off_ave; pnanovdb_uint32_t lower_off_stddev; pnanovdb_uint32_t lower_off_table; pnanovdb_uint32_t lower_size; pnanovdb_uint32_t leaf_off_min; pnanovdb_uint32_t leaf_off_max; pnanovdb_uint32_t leaf_off_ave; pnanovdb_uint32_t leaf_off_stddev; pnanovdb_uint32_t leaf_off_table; pnanovdb_uint32_t leaf_size; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_type_constants_t) // The following table with offsets will nedd to be updates as new GridTypes are added in NanoVDB.h PNANOVDB_STATIC_CONST pnanovdb_grid_type_constants_t pnanovdb_grid_type_constants[PNANOVDB_GRID_TYPE_END] = { {28, 28, 28, 28, 28, 32, 0, 8, 20, 32, 8224, 8224, 8224, 8224, 8224, 270368, 1056, 1056, 1056, 1056, 1056, 33824, 80, 80, 80, 80, 96, 96}, {28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, {32, 40, 48, 56, 64, 96, 64, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 104, 128, 4224}, {28, 30, 32, 36, 40, 64, 16, 8, 20, 32, 8224, 8226, 8228, 8232, 8256, 270400, 1056, 1058, 1060, 1064, 1088, 33856, 80, 82, 84, 88, 96, 1120}, {28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, {32, 40, 48, 56, 64, 96, 64, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 104, 128, 4224}, {28, 40, 52, 64, 68, 96, 96, 16, 20, 32, 8224, 8236, 8248, 8252, 8256, 532544, 1056, 1068, 1080, 1084, 1088, 66624, 80, 92, 104, 108, 128, 6272}, {32, 56, 80, 104, 112, 128, 192, 24, 24, 64, 8224, 8248, 8272, 8280, 8288, 794720, 1056, 1080, 1104, 1112, 1120, 99424, 80, 104, 128, 136, 160, 12448}, {28, 29, 30, 31, 32, 64, 0, 8, 20, 32, 8224, 8225, 8226, 8227, 8256, 270400, 1056, 1057, 1058, 1059, 1088, 33856, 80, 80, 80, 80, 96, 96}, {28, 30, 32, 36, 40, 64, 16, 8, 20, 32, 8224, 8226, 8228, 8232, 8256, 270400, 1056, 1058, 1060, 1064, 1088, 33856, 80, 82, 84, 88, 96, 1120}, {28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, {28, 29, 30, 31, 32, 64, 1, 8, 20, 32, 8224, 8225, 8226, 8227, 8256, 270400, 1056, 1057, 1058, 1059, 1088, 33856, 80, 80, 80, 80, 96, 160}, {28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, {28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 352}, {28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 608}, {28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 1120}, {28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 96}, {28, 44, 60, 76, 80, 96, 128, 16, 20, 64, 8224, 8240, 8256, 8260, 8288, 532576, 1056, 1072, 1088, 1092, 1120, 66656, 80, 96, 112, 116, 128, 8320}, {32, 64, 96, 128, 136, 160, 256, 32, 24, 64, 8224, 8256, 8288, 8296, 8320, 1056896, 1056, 1088, 1120, 1128, 1152, 132224, 80, 112, 144, 152, 160, 16544}, {32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 96}, {32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 96}, {32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 160}, {32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 160}, {32, 40, 48, 56, 64, 96, 16, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 96, 96, 1120}, {28, 31, 34, 40, 44, 64, 24, 8, 20, 32, 8224, 8227, 8232, 8236, 8256, 270400, 1056, 1059, 1064, 1068, 1088, 33856, 80, 83, 88, 92, 96, 1632}, {28, 34, 40, 48, 52, 64, 48, 8, 20, 32, 8224, 8230, 8236, 8240, 8256, 270400, 1056, 1062, 1068, 1072, 1088, 33856, 80, 86, 92, 96, 128, 3200}, {28, 29, 30, 32, 36, 64, 8, 8, 20, 32, 8224, 8225, 8228, 8232, 8256, 270400, 1056, 1057, 1060, 1064, 1088, 33856, 80, 81, 84, 88, 96, 608}, }; // ------------------------------------------------ Basic Lookup ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_gridblindmetadata_handle_t pnanovdb_grid_get_gridblindmetadata(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, pnanovdb_uint32_t index) { pnanovdb_gridblindmetadata_handle_t meta = { grid.address }; pnanovdb_uint64_t byte_offset = pnanovdb_grid_get_blind_metadata_offset(buf, grid); meta.address = pnanovdb_address_offset64(meta.address, byte_offset); meta.address = pnanovdb_address_offset_product(meta.address, PNANOVDB_GRIDBLINDMETADATA_SIZE, index); return meta; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_grid_get_gridblindmetadata_value_address(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, pnanovdb_uint32_t index) { pnanovdb_gridblindmetadata_handle_t meta = pnanovdb_grid_get_gridblindmetadata(buf, grid, index); pnanovdb_int64_t byte_offset = pnanovdb_gridblindmetadata_get_data_offset(buf, meta); pnanovdb_address_t address = pnanovdb_address_offset64(meta.address, pnanovdb_int64_as_uint64(byte_offset)); return address; } PNANOVDB_FORCE_INLINE pnanovdb_tree_handle_t pnanovdb_grid_get_tree(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid) { pnanovdb_tree_handle_t tree = { grid.address }; tree.address = pnanovdb_address_offset(tree.address, PNANOVDB_GRID_SIZE); return tree; } PNANOVDB_FORCE_INLINE pnanovdb_root_handle_t pnanovdb_tree_get_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t tree) { pnanovdb_root_handle_t root = { tree.address }; pnanovdb_uint64_t byte_offset = pnanovdb_tree_get_node_offset_root(buf, tree); root.address = pnanovdb_address_offset64(root.address, byte_offset); return root; } PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_get_tile(pnanovdb_grid_type_t grid_type, pnanovdb_root_handle_t root, pnanovdb_uint32_t n) { pnanovdb_root_tile_handle_t tile = { root.address }; tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_size)); tile.address = pnanovdb_address_offset_product(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_size), n); return tile; } PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_get_tile_zero(pnanovdb_grid_type_t grid_type, pnanovdb_root_handle_t root) { pnanovdb_root_tile_handle_t tile = { root.address }; tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_size)); return tile; } PNANOVDB_FORCE_INLINE pnanovdb_upper_handle_t pnanovdb_root_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, pnanovdb_root_tile_handle_t tile) { pnanovdb_upper_handle_t upper = { root.address }; upper.address = pnanovdb_address_offset64(upper.address, pnanovdb_int64_as_uint64(pnanovdb_root_tile_get_child(buf, tile))); return upper; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_coord_to_key(PNANOVDB_IN(pnanovdb_coord_t) ijk) { #if defined(PNANOVDB_NATIVE_64) pnanovdb_uint64_t iu = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x) >> 12u; pnanovdb_uint64_t ju = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).y) >> 12u; pnanovdb_uint64_t ku = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).z) >> 12u; return (ku) | (ju << 21u) | (iu << 42u); #else pnanovdb_uint32_t iu = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x) >> 12u; pnanovdb_uint32_t ju = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).y) >> 12u; pnanovdb_uint32_t ku = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).z) >> 12u; pnanovdb_uint32_t key_x = ku | (ju << 21); pnanovdb_uint32_t key_y = (iu << 10) | (ju >> 11); return pnanovdb_uint32_as_uint64(key_x, key_y); #endif } PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_find_tile(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t tile_count = pnanovdb_uint32_as_int32(pnanovdb_root_get_tile_count(buf, root)); pnanovdb_root_tile_handle_t tile = pnanovdb_root_get_tile_zero(grid_type, root); pnanovdb_uint64_t key = pnanovdb_coord_to_key(ijk); for (pnanovdb_uint32_t i = 0u; i < tile_count; i++) { if (pnanovdb_uint64_is_equal(key, pnanovdb_root_tile_get_key(buf, tile))) { return tile; } tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_size)); } pnanovdb_root_tile_handle_t null_handle = { pnanovdb_address_null() }; return null_handle; } // ----------------------------- Leaf Node --------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk) { return (((PNANOVDB_DEREF(ijk).x & 7) >> 0) << (2 * 3)) + (((PNANOVDB_DEREF(ijk).y & 7) >> 0) << (3)) + ((PNANOVDB_DEREF(ijk).z & 7) >> 0); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_min); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_max); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_ave); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_stddev); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node, pnanovdb_uint32_t n) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_table) + ((PNANOVDB_GRID_TYPE_GET(grid_type, value_stride_bits) * n) >> 3u); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); return pnanovdb_leaf_get_table_address(grid_type, buf, leaf, n); } // ----------------------------- Leaf FP Types Specialization --------------------------------------- PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t value_log_bits) { // value_log_bits // 2 3 4 pnanovdb_uint32_t value_bits = 1u << value_log_bits; // 4 8 16 pnanovdb_uint32_t value_mask = (1u << value_bits) - 1u; // 0xF 0xFF 0xFFFF pnanovdb_uint32_t values_per_word_bits = 5u - value_log_bits; // 3 2 1 pnanovdb_uint32_t values_per_word_mask = (1u << values_per_word_bits) - 1u; // 7 3 1 pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); float minimum = pnanovdb_read_float(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_MINIMUM)); float quantum = pnanovdb_read_float(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_QUANTUM)); pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, ((n >> values_per_word_bits) << 2u))); pnanovdb_uint32_t value_compressed = (raw >> ((n & values_per_word_mask) << value_log_bits)) & value_mask; return pnanovdb_uint32_to_float(value_compressed) * quantum + minimum; } PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp4_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { return pnanovdb_leaf_fp_read_float(buf, address, ijk, 2u); } PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp8_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { return pnanovdb_leaf_fp_read_float(buf, address, ijk, 3u); } PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp16_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { return pnanovdb_leaf_fp_read_float(buf, address, ijk, 4u); } PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fpn_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t bbox_dif_and_flags = pnanovdb_read_uint32(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_BBOX_DIF_AND_FLAGS)); pnanovdb_uint32_t flags = bbox_dif_and_flags >> 24u; pnanovdb_uint32_t value_log_bits = flags >> 5; // b = 0, 1, 2, 3, 4 corresponding to 1, 2, 4, 8, 16 bits return pnanovdb_leaf_fp_read_float(buf, address, ijk, value_log_bits); } // ----------------------------- Leaf Index Specialization --------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_index_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return (pnanovdb_leaf_get_bbox_dif_and_flags(buf, leaf) & (1u << 28u)) != 0u; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) { return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, min_address), 512u); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) { return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, max_address), 513u); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) { return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, ave_address), 514u); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) { return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, dev_address), 515u); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); pnanovdb_uint64_t offset = pnanovdb_read_uint64(buf, value_address); return pnanovdb_uint64_offset(offset, n); } // ----------------------------- Leaf IndexMask Specialization --------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_indexmask_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return pnanovdb_leaf_index_has_stats(buf, leaf); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) { return pnanovdb_leaf_index_get_min_index(buf, min_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) { return pnanovdb_leaf_index_get_max_index(buf, max_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) { return pnanovdb_leaf_index_get_ave_index(buf, ave_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) { return pnanovdb_leaf_index_get_dev_index(buf, dev_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { return pnanovdb_leaf_index_get_value_index(buf, value_address, ijk); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_indexmask_get_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n) { pnanovdb_uint32_t word_idx = n >> 5; pnanovdb_uint32_t bit_idx = n & 31; pnanovdb_uint32_t val_mask = pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); return (val_mask & (1u << bit_idx)) != 0u; } PNANOVDB_FORCE_INLINE void pnanovdb_leaf_indexmask_set_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n, pnanovdb_bool_t v) { pnanovdb_uint32_t word_idx = n >> 5; pnanovdb_uint32_t bit_idx = n & 31; pnanovdb_uint32_t val_mask = pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); if (v) { val_mask = val_mask | (1u << bit_idx); } else { val_mask = val_mask & ~(1u << bit_idx); } pnanovdb_write_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx), val_mask); } // ----------------------------- Leaf OnIndex Specialization --------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_onindex_get_value_count(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { pnanovdb_uint64_t val_mask = pnanovdb_read_uint64(buf, pnanovdb_address_offset(leaf.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 8u * 7u)); pnanovdb_uint64_t prefix_sum = pnanovdb_read_uint64( buf, pnanovdb_address_offset(leaf.address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table) + 8u)); return pnanovdb_uint64_countbits(val_mask) + (pnanovdb_uint64_to_uint32_lsr(prefix_sum, 54u) & 511u); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_last_offset(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return pnanovdb_uint64_offset( pnanovdb_read_uint64(buf, pnanovdb_address_offset(leaf.address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table))), pnanovdb_leaf_onindex_get_value_count(buf, leaf) - 1u); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_onindex_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return (pnanovdb_leaf_get_bbox_dif_and_flags(buf, leaf) & (1u << 28u)) != 0u; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) { pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(min_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) { idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 1u); } return idx; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) { pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(max_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) { idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 2u); } return idx; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) { pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(ave_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) { idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 3u); } return idx; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) { pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(dev_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) { idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 4u); } return idx; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(value_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; pnanovdb_uint32_t word_idx = n >> 6u; pnanovdb_uint32_t bit_idx = n & 63u; pnanovdb_uint64_t val_mask = pnanovdb_read_uint64(buf, pnanovdb_address_offset(leaf.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 8u * word_idx)); pnanovdb_uint64_t mask = pnanovdb_uint64_bit_mask(bit_idx); pnanovdb_uint64_t value_index = pnanovdb_uint32_as_uint64_low(0u); if (pnanovdb_uint64_any_bit(pnanovdb_uint64_and(val_mask, mask))) { pnanovdb_uint32_t sum = 0u; sum += pnanovdb_uint64_countbits(pnanovdb_uint64_and(val_mask, pnanovdb_uint64_dec(mask))); if (word_idx > 0u) { pnanovdb_uint64_t prefix_sum = pnanovdb_read_uint64(buf, pnanovdb_address_offset(value_address, 8u)); sum += pnanovdb_uint64_to_uint32_lsr(prefix_sum, 9u * (word_idx - 1u)) & 511u; } pnanovdb_uint64_t offset = pnanovdb_read_uint64(buf, value_address); value_index = pnanovdb_uint64_offset(offset, sum); } return value_index; } // ----------------------------- Leaf OnIndexMask Specialization --------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_onindexmask_get_value_count(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return pnanovdb_leaf_onindex_get_value_count(buf, leaf); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_last_offset(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return pnanovdb_leaf_onindex_get_last_offset(buf, leaf); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_onindexmask_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return pnanovdb_leaf_onindex_has_stats(buf, leaf); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) { return pnanovdb_leaf_onindex_get_min_index(buf, min_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) { return pnanovdb_leaf_onindex_get_max_index(buf, max_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) { return pnanovdb_leaf_onindex_get_ave_index(buf, ave_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) { return pnanovdb_leaf_onindex_get_dev_index(buf, dev_address); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) { return pnanovdb_leaf_onindex_get_value_index(buf, value_address, ijk); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_onindexmask_get_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n) { pnanovdb_uint32_t word_idx = n >> 5; pnanovdb_uint32_t bit_idx = n & 31; pnanovdb_uint32_t val_mask = pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); return (val_mask & (1u << bit_idx)) != 0u; } PNANOVDB_FORCE_INLINE void pnanovdb_leaf_onindexmask_set_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n, pnanovdb_bool_t v) { pnanovdb_uint32_t word_idx = n >> 5; pnanovdb_uint32_t bit_idx = n & 31; pnanovdb_uint32_t val_mask = pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); if (v) { val_mask = val_mask | (1u << bit_idx); } else { val_mask = val_mask & ~(1u << bit_idx); } pnanovdb_write_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx), val_mask); } // ----------------------------- Leaf PointIndex Specialization --------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_offset(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return pnanovdb_read_uint64(buf, pnanovdb_leaf_get_min_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_point_count(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) { return pnanovdb_read_uint64(buf, pnanovdb_leaf_get_max_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf)); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_first(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) { return pnanovdb_uint64_offset(pnanovdb_leaf_pointindex_get_offset(buf, leaf), (i == 0u ? 0u : pnanovdb_read_uint16(buf, pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i - 1u)))); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_last(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) { return pnanovdb_uint64_offset(pnanovdb_leaf_pointindex_get_offset(buf, leaf), pnanovdb_read_uint16(buf, pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i))); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_value(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) { return pnanovdb_uint32_as_uint64_low(pnanovdb_read_uint16(buf, pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i))); } PNANOVDB_FORCE_INLINE void pnanovdb_leaf_pointindex_set_value_only(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i, pnanovdb_uint32_t value) { pnanovdb_address_t addr = pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i); pnanovdb_uint32_t raw32 = pnanovdb_read_uint32(buf, pnanovdb_address_mask_inv(addr, 3u)); if ((i & 1) == 0u) { raw32 = (raw32 & 0xFFFF0000) | (value & 0x0000FFFF); } else { raw32 = (raw32 & 0x0000FFFF) | (value << 16u); } pnanovdb_write_uint32(buf, addr, raw32); } PNANOVDB_FORCE_INLINE void pnanovdb_leaf_pointindex_set_on(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) { pnanovdb_uint32_t word_idx = i >> 5; pnanovdb_uint32_t bit_idx = i & 31; pnanovdb_address_t addr = pnanovdb_address_offset(leaf.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 4u * word_idx); pnanovdb_uint32_t val_mask = pnanovdb_read_uint32(buf, addr); val_mask = val_mask | (1u << bit_idx); pnanovdb_write_uint32(buf, addr, val_mask); } PNANOVDB_FORCE_INLINE void pnanovdb_leaf_pointindex_set_value(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i, pnanovdb_uint32_t value) { pnanovdb_leaf_pointindex_set_on(buf, leaf, i); pnanovdb_leaf_pointindex_set_value_only(buf, leaf, i, value); } // ------------------------------------------------ Lower Node ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_lower_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk) { return (((PNANOVDB_DEREF(ijk).x & 127) >> 3) << (2 * 4)) + (((PNANOVDB_DEREF(ijk).y & 127) >> 3) << (4)) + ((PNANOVDB_DEREF(ijk).z & 127) >> 3); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_min); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_max); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_ave); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_stddev); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_table) + PNANOVDB_GRID_TYPE_GET(grid_type, table_stride) * n; return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_lower_get_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n) { pnanovdb_address_t table_address = pnanovdb_lower_get_table_address(grid_type, buf, node, n); return pnanovdb_read_int64(buf, table_address); } PNANOVDB_FORCE_INLINE pnanovdb_leaf_handle_t pnanovdb_lower_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, pnanovdb_uint32_t n) { pnanovdb_leaf_handle_t leaf = { lower.address }; leaf.address = pnanovdb_address_offset64(leaf.address, pnanovdb_int64_as_uint64(pnanovdb_lower_get_table_child(grid_type, buf, lower, n))); return leaf; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) { pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); pnanovdb_address_t value_address; if (pnanovdb_lower_get_child_mask(buf, lower, n)) { pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); value_address = pnanovdb_leaf_get_value_address(grid_type, buf, child, ijk); PNANOVDB_DEREF(level) = 0u; } else { value_address = pnanovdb_lower_get_table_address(grid_type, buf, lower, n); PNANOVDB_DEREF(level) = 1u; } return value_address; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t level; return pnanovdb_lower_get_value_address_and_level(grid_type, buf, lower, ijk, PNANOVDB_REF(level)); } // ------------------------------------------------ Upper Node ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_upper_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk) { return (((PNANOVDB_DEREF(ijk).x & 4095) >> 7) << (2 * 5)) + (((PNANOVDB_DEREF(ijk).y & 4095) >> 7) << (5)) + ((PNANOVDB_DEREF(ijk).z & 4095) >> 7); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_min); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_max); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_ave); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_stddev); return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_table) + PNANOVDB_GRID_TYPE_GET(grid_type, table_stride) * n; return pnanovdb_address_offset(node.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_upper_get_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n) { pnanovdb_address_t bufAddress = pnanovdb_upper_get_table_address(grid_type, buf, node, n); return pnanovdb_read_int64(buf, bufAddress); } PNANOVDB_FORCE_INLINE pnanovdb_lower_handle_t pnanovdb_upper_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, pnanovdb_uint32_t n) { pnanovdb_lower_handle_t lower = { upper.address }; lower.address = pnanovdb_address_offset64(lower.address, pnanovdb_int64_as_uint64(pnanovdb_upper_get_table_child(grid_type, buf, upper, n))); return lower; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) { pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); pnanovdb_address_t value_address; if (pnanovdb_upper_get_child_mask(buf, upper, n)) { pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); value_address = pnanovdb_lower_get_value_address_and_level(grid_type, buf, child, ijk, level); } else { value_address = pnanovdb_upper_get_table_address(grid_type, buf, upper, n); PNANOVDB_DEREF(level) = 2u; } return value_address; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t level; return pnanovdb_upper_get_value_address_and_level(grid_type, buf, upper, ijk, PNANOVDB_REF(level)); } PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n, pnanovdb_int64_t child) { pnanovdb_address_t bufAddress = pnanovdb_upper_get_table_address(grid_type, buf, node, n); pnanovdb_write_int64(buf, bufAddress, child); } // ------------------------------------------------ Root ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_min); return pnanovdb_address_offset(root.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_max); return pnanovdb_address_offset(root.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_ave); return pnanovdb_address_offset(root.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_stddev); return pnanovdb_address_offset(root.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_tile_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t root_tile) { pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value); return pnanovdb_address_offset(root_tile.address, byte_offset); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) { pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); pnanovdb_address_t ret; if (pnanovdb_address_is_null(tile.address)) { ret = pnanovdb_address_offset(root.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_off_background)); PNANOVDB_DEREF(level) = 4u; } else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) { ret = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value)); PNANOVDB_DEREF(level) = 3u; } else { pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); ret = pnanovdb_upper_get_value_address_and_level(grid_type, buf, child, ijk, level); } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t level; return pnanovdb_root_get_value_address_and_level(grid_type, buf, root, ijk, PNANOVDB_REF(level)); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_bit(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) bit_index) { pnanovdb_uint32_t level; pnanovdb_address_t address = pnanovdb_root_get_value_address_and_level(grid_type, buf, root, ijk, PNANOVDB_REF(level)); PNANOVDB_DEREF(bit_index) = level == 0u ? pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x & 7) : 0u; return address; } PNANOVDB_FORCE_INLINE float pnanovdb_root_fp4_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) { float ret; if (level == 0) { ret = pnanovdb_leaf_fp4_read_float(buf, address, ijk); } else { ret = pnanovdb_read_float(buf, address); } return ret; } PNANOVDB_FORCE_INLINE float pnanovdb_root_fp8_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) { float ret; if (level == 0) { ret = pnanovdb_leaf_fp8_read_float(buf, address, ijk); } else { ret = pnanovdb_read_float(buf, address); } return ret; } PNANOVDB_FORCE_INLINE float pnanovdb_root_fp16_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) { float ret; if (level == 0) { ret = pnanovdb_leaf_fp16_read_float(buf, address, ijk); } else { ret = pnanovdb_read_float(buf, address); } return ret; } PNANOVDB_FORCE_INLINE float pnanovdb_root_fpn_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) { float ret; if (level == 0) { ret = pnanovdb_leaf_fpn_read_float(buf, address, ijk); } else { ret = pnanovdb_read_float(buf, address); } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_index_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) { pnanovdb_uint64_t ret; if (level == 0) { ret = pnanovdb_leaf_index_get_value_index(buf, address, ijk); } else { ret = pnanovdb_read_uint64(buf, address); } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_onindex_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) { pnanovdb_uint64_t ret; if (level == 0) { ret = pnanovdb_leaf_onindex_get_value_index(buf, address, ijk); } else { ret = pnanovdb_read_uint64(buf, address); } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_pointindex_get_point_range( pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level, PNANOVDB_INOUT(pnanovdb_uint64_t)range_begin, PNANOVDB_INOUT(pnanovdb_uint64_t)range_end ) { pnanovdb_uint32_t local_range_begin = 0u; pnanovdb_uint32_t local_range_end = 0u; pnanovdb_uint64_t offset = pnanovdb_uint32_as_uint64_low(0u); if (level == 0) { pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); // recover leaf address pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(value_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_POINTINDEX, leaf_off_table) + 2u * n) }; if (n > 0u) { local_range_begin = pnanovdb_read_uint16(buf, pnanovdb_address_offset_neg(value_address, 2u)); } local_range_end = pnanovdb_read_uint16(buf, value_address); offset = pnanovdb_leaf_pointindex_get_offset(buf, leaf); } PNANOVDB_DEREF(range_begin) = pnanovdb_uint64_offset(offset, local_range_begin); PNANOVDB_DEREF(range_end) = pnanovdb_uint64_offset(offset, local_range_end); return pnanovdb_uint32_as_uint64_low(local_range_end - local_range_begin); } PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_pointindex_get_point_address_range( pnanovdb_buf_t buf, pnanovdb_grid_type_t value_type, pnanovdb_address_t value_address, pnanovdb_address_t blindmetadata_value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level, PNANOVDB_INOUT(pnanovdb_address_t)address_begin, PNANOVDB_INOUT(pnanovdb_address_t)address_end ) { pnanovdb_uint64_t range_begin; pnanovdb_uint64_t range_end; pnanovdb_uint64_t range_size = pnanovdb_root_pointindex_get_point_range(buf, value_address, ijk, level, PNANOVDB_REF(range_begin), PNANOVDB_REF(range_end)); pnanovdb_uint32_t stride = 12u; // vec3f if (value_type == PNANOVDB_GRID_TYPE_VEC3U8) { stride = 3u; } else if (value_type == PNANOVDB_GRID_TYPE_VEC3U16) { stride = 6u; } PNANOVDB_DEREF(address_begin) = pnanovdb_address_offset64_product(blindmetadata_value_address, range_begin, stride); PNANOVDB_DEREF(address_end) = pnanovdb_address_offset64_product(blindmetadata_value_address, range_end, stride); return range_size; } // ------------------------------------------------ ReadAccessor ----------------------------------------------------------- struct pnanovdb_readaccessor_t { pnanovdb_coord_t key; pnanovdb_leaf_handle_t leaf; pnanovdb_lower_handle_t lower; pnanovdb_upper_handle_t upper; pnanovdb_root_handle_t root; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_readaccessor_t) PNANOVDB_FORCE_INLINE void pnanovdb_readaccessor_init(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, pnanovdb_root_handle_t root) { PNANOVDB_DEREF(acc).key.x = 0x7FFFFFFF; PNANOVDB_DEREF(acc).key.y = 0x7FFFFFFF; PNANOVDB_DEREF(acc).key.z = 0x7FFFFFFF; PNANOVDB_DEREF(acc).leaf.address = pnanovdb_address_null(); PNANOVDB_DEREF(acc).lower.address = pnanovdb_address_null(); PNANOVDB_DEREF(acc).upper.address = pnanovdb_address_null(); PNANOVDB_DEREF(acc).root = root; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached0(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty) { if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).leaf.address)) { return PNANOVDB_FALSE; } if ((dirty & ~((1u << 3) - 1u)) != 0) { PNANOVDB_DEREF(acc).leaf.address = pnanovdb_address_null(); return PNANOVDB_FALSE; } return PNANOVDB_TRUE; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached1(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty) { if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).lower.address)) { return PNANOVDB_FALSE; } if ((dirty & ~((1u << 7) - 1u)) != 0) { PNANOVDB_DEREF(acc).lower.address = pnanovdb_address_null(); return PNANOVDB_FALSE; } return PNANOVDB_TRUE; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached2(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty) { if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).upper.address)) { return PNANOVDB_FALSE; } if ((dirty & ~((1u << 12) - 1u)) != 0) { PNANOVDB_DEREF(acc).upper.address = pnanovdb_address_null(); return PNANOVDB_FALSE; } return PNANOVDB_TRUE; } PNANOVDB_FORCE_INLINE int pnanovdb_readaccessor_computedirty(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) { return (PNANOVDB_DEREF(ijk).x ^ PNANOVDB_DEREF(acc).key.x) | (PNANOVDB_DEREF(ijk).y ^ PNANOVDB_DEREF(acc).key.y) | (PNANOVDB_DEREF(ijk).z ^ PNANOVDB_DEREF(acc).key.z); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); return pnanovdb_leaf_get_table_address(grid_type, buf, leaf, n); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level) { pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); pnanovdb_address_t value_address; if (pnanovdb_lower_get_child_mask(buf, lower, n)) { pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); PNANOVDB_DEREF(acc).leaf = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); value_address = pnanovdb_leaf_get_value_address_and_cache(grid_type, buf, child, ijk, acc); PNANOVDB_DEREF(level) = 0u; } else { value_address = pnanovdb_lower_get_table_address(grid_type, buf, lower, n); PNANOVDB_DEREF(level) = 1u; } return value_address; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t level; return pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, lower, ijk, acc, PNANOVDB_REF(level)); } PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n, pnanovdb_int64_t child) { pnanovdb_address_t table_address = pnanovdb_lower_get_table_address(grid_type, buf, node, n); pnanovdb_write_int64(buf, table_address, child); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level) { pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); pnanovdb_address_t value_address; if (pnanovdb_upper_get_child_mask(buf, upper, n)) { pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); PNANOVDB_DEREF(acc).lower = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); value_address = pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, child, ijk, acc, level); } else { value_address = pnanovdb_upper_get_table_address(grid_type, buf, upper, n); PNANOVDB_DEREF(level) = 2u; } return value_address; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t level; return pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, upper, ijk, acc, PNANOVDB_REF(level)); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level) { pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); pnanovdb_address_t ret; if (pnanovdb_address_is_null(tile.address)) { ret = pnanovdb_address_offset(root.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_off_background)); PNANOVDB_DEREF(level) = 4u; } else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) { ret = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value)); PNANOVDB_DEREF(level) = 3u; } else { pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); PNANOVDB_DEREF(acc).upper = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); ret = pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, child, ijk, acc, level); } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t level; return pnanovdb_root_get_value_address_and_level_and_cache(grid_type, buf, root, ijk, acc, PNANOVDB_REF(level)); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) { int dirty = pnanovdb_readaccessor_computedirty(acc, ijk); pnanovdb_address_t value_address; if (pnanovdb_readaccessor_iscached0(acc, dirty)) { value_address = pnanovdb_leaf_get_value_address_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc); PNANOVDB_DEREF(level) = 0u; } else if (pnanovdb_readaccessor_iscached1(acc, dirty)) { value_address = pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc, level); } else if (pnanovdb_readaccessor_iscached2(acc, dirty)) { value_address = pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc, level); } else { value_address = pnanovdb_root_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc, level); } return value_address; } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) { pnanovdb_uint32_t level; return pnanovdb_readaccessor_get_value_address_and_level(grid_type, buf, acc, ijk, PNANOVDB_REF(level)); } PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address_bit(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) bit_index) { pnanovdb_uint32_t level; pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address_and_level(grid_type, buf, acc, ijk, PNANOVDB_REF(level)); PNANOVDB_DEREF(bit_index) = level == 0u ? pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x & 7) : 0u; return address; } // ------------------------------------------------ ReadAccessor GetDim ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { return 1u; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_lower_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); pnanovdb_uint32_t ret; if (pnanovdb_lower_get_child_mask(buf, lower, n)) { pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); PNANOVDB_DEREF(acc).leaf = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); ret = pnanovdb_leaf_get_dim_and_cache(grid_type, buf, child, ijk, acc); } else { ret = (1u << (3u)); // node 0 dim } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_upper_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); pnanovdb_uint32_t ret; if (pnanovdb_upper_get_child_mask(buf, upper, n)) { pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); PNANOVDB_DEREF(acc).lower = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); ret = pnanovdb_lower_get_dim_and_cache(grid_type, buf, child, ijk, acc); } else { ret = (1u << (4u + 3u)); // node 1 dim } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); pnanovdb_uint32_t ret; if (pnanovdb_address_is_null(tile.address)) { ret = 1u << (5u + 4u + 3u); // background, node 2 dim } else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) { ret = 1u << (5u + 4u + 3u); // tile value, node 2 dim } else { pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); PNANOVDB_DEREF(acc).upper = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); ret = pnanovdb_upper_get_dim_and_cache(grid_type, buf, child, ijk, acc); } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_readaccessor_get_dim(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) { int dirty = pnanovdb_readaccessor_computedirty(acc, ijk); pnanovdb_uint32_t dim; if (pnanovdb_readaccessor_iscached0(acc, dirty)) { dim = pnanovdb_leaf_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc); } else if (pnanovdb_readaccessor_iscached1(acc, dirty)) { dim = pnanovdb_lower_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc); } else if (pnanovdb_readaccessor_iscached2(acc, dirty)) { dim = pnanovdb_upper_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc); } else { dim = pnanovdb_root_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc); } return dim; } // ------------------------------------------------ ReadAccessor IsActive ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); return pnanovdb_leaf_get_value_mask(buf, leaf, n); } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); pnanovdb_bool_t is_active; if (pnanovdb_lower_get_child_mask(buf, lower, n)) { pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); PNANOVDB_DEREF(acc).leaf = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); is_active = pnanovdb_leaf_is_active_and_cache(grid_type, buf, child, ijk, acc); } else { is_active = pnanovdb_lower_get_value_mask(buf, lower, n); } return is_active; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); pnanovdb_bool_t is_active; if (pnanovdb_upper_get_child_mask(buf, upper, n)) { pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); PNANOVDB_DEREF(acc).lower = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); is_active = pnanovdb_lower_is_active_and_cache(grid_type, buf, child, ijk, acc); } else { is_active = pnanovdb_upper_get_value_mask(buf, upper, n); } return is_active; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_root_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) { pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); pnanovdb_bool_t is_active; if (pnanovdb_address_is_null(tile.address)) { is_active = PNANOVDB_FALSE; // background } else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) { pnanovdb_uint32_t state = pnanovdb_root_tile_get_state(buf, tile); is_active = state != 0u; // tile value } else { pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); PNANOVDB_DEREF(acc).upper = child; PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); is_active = pnanovdb_upper_is_active_and_cache(grid_type, buf, child, ijk, acc); } return is_active; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_is_active(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) { int dirty = pnanovdb_readaccessor_computedirty(acc, ijk); pnanovdb_bool_t is_active; if (pnanovdb_readaccessor_iscached0(acc, dirty)) { is_active = pnanovdb_leaf_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc); } else if (pnanovdb_readaccessor_iscached1(acc, dirty)) { is_active = pnanovdb_lower_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc); } else if (pnanovdb_readaccessor_iscached2(acc, dirty)) { is_active = pnanovdb_upper_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc); } else { is_active = pnanovdb_root_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc); } return is_active; } // ------------------------------------------------ Map Transforms ----------------------------------------------------------- PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_vec3_t dst; float sx = PNANOVDB_DEREF(src).x; float sy = PNANOVDB_DEREF(src).y; float sz = PNANOVDB_DEREF(src).z; dst.x = sx * pnanovdb_map_get_matf(buf, map, 0) + sy * pnanovdb_map_get_matf(buf, map, 1) + sz * pnanovdb_map_get_matf(buf, map, 2) + pnanovdb_map_get_vecf(buf, map, 0); dst.y = sx * pnanovdb_map_get_matf(buf, map, 3) + sy * pnanovdb_map_get_matf(buf, map, 4) + sz * pnanovdb_map_get_matf(buf, map, 5) + pnanovdb_map_get_vecf(buf, map, 1); dst.z = sx * pnanovdb_map_get_matf(buf, map, 6) + sy * pnanovdb_map_get_matf(buf, map, 7) + sz * pnanovdb_map_get_matf(buf, map, 8) + pnanovdb_map_get_vecf(buf, map, 2); return dst; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_inverse(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_vec3_t dst; float sx = PNANOVDB_DEREF(src).x - pnanovdb_map_get_vecf(buf, map, 0); float sy = PNANOVDB_DEREF(src).y - pnanovdb_map_get_vecf(buf, map, 1); float sz = PNANOVDB_DEREF(src).z - pnanovdb_map_get_vecf(buf, map, 2); dst.x = sx * pnanovdb_map_get_invmatf(buf, map, 0) + sy * pnanovdb_map_get_invmatf(buf, map, 1) + sz * pnanovdb_map_get_invmatf(buf, map, 2); dst.y = sx * pnanovdb_map_get_invmatf(buf, map, 3) + sy * pnanovdb_map_get_invmatf(buf, map, 4) + sz * pnanovdb_map_get_invmatf(buf, map, 5); dst.z = sx * pnanovdb_map_get_invmatf(buf, map, 6) + sy * pnanovdb_map_get_invmatf(buf, map, 7) + sz * pnanovdb_map_get_invmatf(buf, map, 8); return dst; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_jacobi(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_vec3_t dst; float sx = PNANOVDB_DEREF(src).x; float sy = PNANOVDB_DEREF(src).y; float sz = PNANOVDB_DEREF(src).z; dst.x = sx * pnanovdb_map_get_matf(buf, map, 0) + sy * pnanovdb_map_get_matf(buf, map, 1) + sz * pnanovdb_map_get_matf(buf, map, 2); dst.y = sx * pnanovdb_map_get_matf(buf, map, 3) + sy * pnanovdb_map_get_matf(buf, map, 4) + sz * pnanovdb_map_get_matf(buf, map, 5); dst.z = sx * pnanovdb_map_get_matf(buf, map, 6) + sy * pnanovdb_map_get_matf(buf, map, 7) + sz * pnanovdb_map_get_matf(buf, map, 8); return dst; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_inverse_jacobi(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_vec3_t dst; float sx = PNANOVDB_DEREF(src).x; float sy = PNANOVDB_DEREF(src).y; float sz = PNANOVDB_DEREF(src).z; dst.x = sx * pnanovdb_map_get_invmatf(buf, map, 0) + sy * pnanovdb_map_get_invmatf(buf, map, 1) + sz * pnanovdb_map_get_invmatf(buf, map, 2); dst.y = sx * pnanovdb_map_get_invmatf(buf, map, 3) + sy * pnanovdb_map_get_invmatf(buf, map, 4) + sz * pnanovdb_map_get_invmatf(buf, map, 5); dst.z = sx * pnanovdb_map_get_invmatf(buf, map, 6) + sy * pnanovdb_map_get_invmatf(buf, map, 7) + sz * pnanovdb_map_get_invmatf(buf, map, 8); return dst; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_world_to_indexf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); return pnanovdb_map_apply_inverse(buf, map, src); } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_index_to_worldf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); return pnanovdb_map_apply(buf, map, src); } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_world_to_index_dirf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); return pnanovdb_map_apply_inverse_jacobi(buf, map, src); } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_index_to_world_dirf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) { pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); return pnanovdb_map_apply_jacobi(buf, map, src); } // ------------------------------------------------ DitherLUT ----------------------------------------------------------- // This table was generated with /************** static constexpr inline uint32 SYSwang_inthash(uint32 key) { // From http://www.concentric.net/~Ttwang/tech/inthash.htm key += ~(key << 16); key ^= (key >> 5); key += (key << 3); key ^= (key >> 13); key += ~(key << 9); key ^= (key >> 17); return key; } static void ut_initDitherR(float *pattern, float offset, int x, int y, int z, int res, int goalres) { // These offsets are designed to maximize the difference between // dither values in nearby voxels within a given 2x2x2 cell, without // producing axis-aligned artifacts. The are organized in row-major // order. static const float theDitherOffset[] = {0,4,6,2,5,1,3,7}; static const float theScale = 0.125F; int key = (((z << res) + y) << res) + x; if (res == goalres) { pattern[key] = offset; return; } // Randomly flip (on each axis) the dithering patterns used by the // subcells. This key is xor'd with the subcell index below before // looking up in the dither offset list. key = SYSwang_inthash(key) & 7; x <<= 1; y <<= 1; z <<= 1; offset *= theScale; for (int i = 0; i < 8; i++) ut_initDitherR(pattern, offset+theDitherOffset[i ^ key]*theScale, x+(i&1), y+((i&2)>>1), z+((i&4)>>2), res+1, goalres); } // This is a compact algorithm that accomplishes essentially the same thing // as ut_initDither() above. We should eventually switch to use this and // clean the dead code. static fpreal32 * ut_initDitherRecursive(int goalres) { const int nfloat = 1 << (goalres*3); float *pattern = new float[nfloat]; ut_initDitherR(pattern, 1.0F, 0, 0, 0, 0, goalres); // This has built an even spacing from 1/nfloat to 1.0. // however, our dither pattern should be 1/(nfloat+1) to nfloat/(nfloat+1) // So we do a correction here. Note that the earlier calculations are // done with powers of 2 so are exact, so it does make sense to delay // the renormalization to this pass. float correctionterm = nfloat / (nfloat+1.0F); for (int i = 0; i < nfloat; i++) pattern[i] *= correctionterm; return pattern; } theDitherMatrix = ut_initDitherRecursive(3); for (int i = 0; i < 512/8; i ++) { for (int j = 0; j < 8; j ++) std::cout << theDitherMatrix[i*8+j] << "f, "; std::cout << std::endl; } **************/ PNANOVDB_STATIC_CONST float pnanovdb_dither_lut[512] = { 0.14425f, 0.643275f, 0.830409f, 0.331384f, 0.105263f, 0.604289f, 0.167641f, 0.666667f, 0.892788f, 0.393762f, 0.0818713f, 0.580897f, 0.853801f, 0.354776f, 0.916179f, 0.417154f, 0.612086f, 0.11306f, 0.79922f, 0.300195f, 0.510721f, 0.0116959f, 0.947368f, 0.448343f, 0.362573f, 0.861598f, 0.0506823f, 0.549708f, 0.261209f, 0.760234f, 0.19883f, 0.697856f, 0.140351f, 0.639376f, 0.576998f, 0.0779727f, 0.522417f, 0.0233918f, 0.460039f, 0.959064f, 0.888889f, 0.389864f, 0.327485f, 0.826511f, 0.272904f, 0.77193f, 0.709552f, 0.210526f, 0.483431f, 0.982456f, 0.296296f, 0.795322f, 0.116959f, 0.615984f, 0.0545809f, 0.553606f, 0.732943f, 0.233918f, 0.545809f, 0.0467836f, 0.865497f, 0.366472f, 0.803119f, 0.304094f, 0.518519f, 0.0194932f, 0.45614f, 0.955166f, 0.729045f, 0.230019f, 0.54191f, 0.042885f, 0.269006f, 0.768031f, 0.705653f, 0.206628f, 0.479532f, 0.978558f, 0.292398f, 0.791423f, 0.237817f, 0.736842f, 0.424951f, 0.923977f, 0.136452f, 0.635478f, 0.323587f, 0.822612f, 0.986355f, 0.487329f, 0.674464f, 0.175439f, 0.88499f, 0.385965f, 0.573099f, 0.0740741f, 0.51462f, 0.0155945f, 0.202729f, 0.701754f, 0.148148f, 0.647174f, 0.834308f, 0.335283f, 0.265107f, 0.764133f, 0.951267f, 0.452242f, 0.896686f, 0.397661f, 0.08577f, 0.584795f, 0.8577f, 0.358674f, 0.920078f, 0.421053f, 0.740741f, 0.241715f, 0.678363f, 0.179337f, 0.109162f, 0.608187f, 0.17154f, 0.670565f, 0.491228f, 0.990253f, 0.42885f, 0.927875f, 0.0662768f, 0.565302f, 0.62768f, 0.128655f, 0.183236f, 0.682261f, 0.744639f, 0.245614f, 0.814815f, 0.315789f, 0.378168f, 0.877193f, 0.931774f, 0.432749f, 0.495127f, 0.994152f, 0.0350877f, 0.534113f, 0.97076f, 0.471735f, 0.214425f, 0.71345f, 0.526316f, 0.0272904f, 0.783626f, 0.2846f, 0.222222f, 0.721248f, 0.962963f, 0.463938f, 0.276803f, 0.775828f, 0.966862f, 0.467836f, 0.405458f, 0.904483f, 0.0701754f, 0.569201f, 0.881092f, 0.382066f, 0.218324f, 0.717349f, 0.654971f, 0.155945f, 0.818713f, 0.319688f, 0.132554f, 0.631579f, 0.0623782f, 0.561404f, 0.748538f, 0.249513f, 0.912281f, 0.413255f, 0.974659f, 0.475634f, 0.810916f, 0.311891f, 0.499025f, 0.998051f, 0.163743f, 0.662768f, 0.226121f, 0.725146f, 0.690058f, 0.191033f, 0.00389864f, 0.502924f, 0.557505f, 0.0584795f, 0.120858f, 0.619883f, 0.440546f, 0.939571f, 0.752437f, 0.253411f, 0.307992f, 0.807018f, 0.869396f, 0.37037f, 0.658869f, 0.159844f, 0.346979f, 0.846004f, 0.588694f, 0.0896686f, 0.152047f, 0.651072f, 0.409357f, 0.908382f, 0.596491f, 0.0974659f, 0.339181f, 0.838207f, 0.900585f, 0.401559f, 0.34308f, 0.842105f, 0.779727f, 0.280702f, 0.693957f, 0.194932f, 0.25731f, 0.756335f, 0.592593f, 0.0935673f, 0.0311891f, 0.530214f, 0.444444f, 0.94347f, 0.506823f, 0.00779727f, 0.68616f, 0.187135f, 0.124756f, 0.623782f, 0.288499f, 0.787524f, 0.350877f, 0.849903f, 0.436647f, 0.935673f, 0.873294f, 0.374269f, 0.538012f, 0.0389864f, 0.60039f, 0.101365f, 0.57115f, 0.0721248f, 0.758285f, 0.259259f, 0.719298f, 0.220273f, 0.532164f, 0.0331384f, 0.321637f, 0.820663f, 0.00974659f, 0.508772f, 0.469786f, 0.968811f, 0.282651f, 0.781676f, 0.539961f, 0.0409357f, 0.727096f, 0.22807f, 0.500975f, 0.00194932f, 0.563353f, 0.0643275f, 0.290448f, 0.789474f, 0.477583f, 0.976608f, 0.251462f, 0.750487f, 0.31384f, 0.812865f, 0.94152f, 0.442495f, 0.879142f, 0.380117f, 0.37232f, 0.871345f, 0.309942f, 0.808967f, 0.192982f, 0.692008f, 0.130604f, 0.62963f, 0.621832f, 0.122807f, 0.559454f, 0.0604289f, 0.660819f, 0.161793f, 0.723197f, 0.224172f, 0.403509f, 0.902534f, 0.840156f, 0.341131f, 0.411306f, 0.910331f, 0.473684f, 0.97271f, 0.653021f, 0.153996f, 0.0916179f, 0.590643f, 0.196881f, 0.695906f, 0.384016f, 0.883041f, 0.0955166f, 0.594542f, 0.157895f, 0.65692f, 0.945419f, 0.446394f, 0.633528f, 0.134503f, 0.844055f, 0.345029f, 0.906433f, 0.407407f, 0.165692f, 0.664717f, 0.103314f, 0.602339f, 0.126706f, 0.625731f, 0.189084f, 0.688109f, 0.91423f, 0.415205f, 0.851852f, 0.352827f, 0.875244f, 0.376218f, 0.937622f, 0.438596f, 0.317739f, 0.816764f, 0.255361f, 0.754386f, 0.996101f, 0.497076f, 0.933723f, 0.434698f, 0.567251f, 0.0682261f, 0.504873f, 0.00584795f, 0.247563f, 0.746589f, 0.185185f, 0.684211f, 0.037037f, 0.536062f, 0.0994152f, 0.598441f, 0.777778f, 0.278752f, 0.465887f, 0.964912f, 0.785575f, 0.28655f, 0.847953f, 0.348928f, 0.0292398f, 0.528265f, 0.7154f, 0.216374f, 0.39961f, 0.898636f, 0.961014f, 0.461988f, 0.0487329f, 0.547758f, 0.111111f, 0.610136f, 0.649123f, 0.150097f, 0.212476f, 0.711501f, 0.797271f, 0.298246f, 0.859649f, 0.360624f, 0.118908f, 0.617934f, 0.0565302f, 0.555556f, 0.329435f, 0.82846f, 0.516569f, 0.0175439f, 0.867446f, 0.368421f, 0.805068f, 0.306043f, 0.578947f, 0.079922f, 0.267057f, 0.766082f, 0.270955f, 0.76998f, 0.707602f, 0.208577f, 0.668616f, 0.169591f, 0.606238f, 0.107212f, 0.520468f, 0.0214425f, 0.45809f, 0.957115f, 0.419103f, 0.918129f, 0.356725f, 0.855751f, 0.988304f, 0.489279f, 0.426901f, 0.925926f, 0.450292f, 0.949318f, 0.512671f, 0.0136452f, 0.239766f, 0.738791f, 0.676413f, 0.177388f, 0.699805f, 0.20078f, 0.263158f, 0.762183f, 0.773879f, 0.274854f, 0.337232f, 0.836257f, 0.672515f, 0.173489f, 0.734893f, 0.235867f, 0.0253411f, 0.524366f, 0.586745f, 0.0877193f, 0.423002f, 0.922027f, 0.48538f, 0.984405f, 0.74269f, 0.243665f, 0.680312f, 0.181287f, 0.953216f, 0.454191f, 0.1423f, 0.641326f, 0.493177f, 0.992203f, 0.430799f, 0.929825f, 0.204678f, 0.703704f, 0.890838f, 0.391813f, 0.894737f, 0.395712f, 0.0838207f, 0.582846f, 0.0448343f, 0.54386f, 0.231969f, 0.730994f, 0.146199f, 0.645224f, 0.832359f, 0.333333f, 0.793372f, 0.294347f, 0.980507f, 0.481481f, 0.364522f, 0.863548f, 0.80117f, 0.302144f, 0.824561f, 0.325536f, 0.138402f, 0.637427f, 0.614035f, 0.11501f, 0.0526316f, 0.551657f, 0.0760234f, 0.575049f, 0.88694f, 0.387914f, }; PNANOVDB_FORCE_INLINE float pnanovdb_dither_lookup(pnanovdb_bool_t enabled, int offset) { return enabled ? pnanovdb_dither_lut[offset & 511] : 0.5f; } // ------------------------------------------------ HDDA ----------------------------------------------------------- #ifdef PNANOVDB_HDDA // Comment out to disable this explicit round-off check #define PNANOVDB_ENFORCE_FORWARD_STEPPING #define PNANOVDB_HDDA_FLOAT_MAX 1e38f struct pnanovdb_hdda_t { pnanovdb_int32_t dim; float tmin; float tmax; pnanovdb_coord_t voxel; pnanovdb_coord_t step; pnanovdb_vec3_t delta; pnanovdb_vec3_t next; }; PNANOVDB_STRUCT_TYPEDEF(pnanovdb_hdda_t) PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_hdda_pos_to_ijk(PNANOVDB_IN(pnanovdb_vec3_t) pos) { pnanovdb_coord_t voxel; voxel.x = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).x)); voxel.y = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).y)); voxel.z = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).z)); return voxel; } PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_hdda_pos_to_voxel(PNANOVDB_IN(pnanovdb_vec3_t) pos, int dim) { pnanovdb_coord_t voxel; voxel.x = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).x)) & (~(dim - 1)); voxel.y = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).y)) & (~(dim - 1)); voxel.z = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).z)) & (~(dim - 1)); return voxel; } PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_hdda_ray_start(PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction) { pnanovdb_vec3_t pos = pnanovdb_vec3_add( pnanovdb_vec3_mul(PNANOVDB_DEREF(direction), pnanovdb_vec3_uniform(tmin)), PNANOVDB_DEREF(origin) ); return pos; } PNANOVDB_FORCE_INLINE void pnanovdb_hdda_init(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda, PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction, float tmax, int dim) { PNANOVDB_DEREF(hdda).dim = dim; PNANOVDB_DEREF(hdda).tmin = tmin; PNANOVDB_DEREF(hdda).tmax = tmax; pnanovdb_vec3_t pos = pnanovdb_hdda_ray_start(origin, tmin, direction); pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction)); PNANOVDB_DEREF(hdda).voxel = pnanovdb_hdda_pos_to_voxel(PNANOVDB_REF(pos), dim); // x if (PNANOVDB_DEREF(direction).x == 0.f) { PNANOVDB_DEREF(hdda).next.x = PNANOVDB_HDDA_FLOAT_MAX; PNANOVDB_DEREF(hdda).step.x = 0; PNANOVDB_DEREF(hdda).delta.x = 0.f; } else if (dir_inv.x > 0.f) { PNANOVDB_DEREF(hdda).step.x = 1; PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x + dim - pos.x) * dir_inv.x; PNANOVDB_DEREF(hdda).delta.x = dir_inv.x; } else { PNANOVDB_DEREF(hdda).step.x = -1; PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x - pos.x) * dir_inv.x; PNANOVDB_DEREF(hdda).delta.x = -dir_inv.x; } // y if (PNANOVDB_DEREF(direction).y == 0.f) { PNANOVDB_DEREF(hdda).next.y = PNANOVDB_HDDA_FLOAT_MAX; PNANOVDB_DEREF(hdda).step.y = 0; PNANOVDB_DEREF(hdda).delta.y = 0.f; } else if (dir_inv.y > 0.f) { PNANOVDB_DEREF(hdda).step.y = 1; PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y + dim - pos.y) * dir_inv.y; PNANOVDB_DEREF(hdda).delta.y = dir_inv.y; } else { PNANOVDB_DEREF(hdda).step.y = -1; PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y - pos.y) * dir_inv.y; PNANOVDB_DEREF(hdda).delta.y = -dir_inv.y; } // z if (PNANOVDB_DEREF(direction).z == 0.f) { PNANOVDB_DEREF(hdda).next.z = PNANOVDB_HDDA_FLOAT_MAX; PNANOVDB_DEREF(hdda).step.z = 0; PNANOVDB_DEREF(hdda).delta.z = 0.f; } else if (dir_inv.z > 0.f) { PNANOVDB_DEREF(hdda).step.z = 1; PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z + dim - pos.z) * dir_inv.z; PNANOVDB_DEREF(hdda).delta.z = dir_inv.z; } else { PNANOVDB_DEREF(hdda).step.z = -1; PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z - pos.z) * dir_inv.z; PNANOVDB_DEREF(hdda).delta.z = -dir_inv.z; } } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_update(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda, PNANOVDB_IN(pnanovdb_vec3_t) origin, PNANOVDB_IN(pnanovdb_vec3_t) direction, int dim) { if (PNANOVDB_DEREF(hdda).dim == dim) { return PNANOVDB_FALSE; } PNANOVDB_DEREF(hdda).dim = dim; pnanovdb_vec3_t pos = pnanovdb_vec3_add( pnanovdb_vec3_mul(PNANOVDB_DEREF(direction), pnanovdb_vec3_uniform(PNANOVDB_DEREF(hdda).tmin)), PNANOVDB_DEREF(origin) ); pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction)); PNANOVDB_DEREF(hdda).voxel = pnanovdb_hdda_pos_to_voxel(PNANOVDB_REF(pos), dim); if (PNANOVDB_DEREF(hdda).step.x != 0) { PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x - pos.x) * dir_inv.x; if (PNANOVDB_DEREF(hdda).step.x > 0) { PNANOVDB_DEREF(hdda).next.x += dim * dir_inv.x; } } if (PNANOVDB_DEREF(hdda).step.y != 0) { PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y - pos.y) * dir_inv.y; if (PNANOVDB_DEREF(hdda).step.y > 0) { PNANOVDB_DEREF(hdda).next.y += dim * dir_inv.y; } } if (PNANOVDB_DEREF(hdda).step.z != 0) { PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z - pos.z) * dir_inv.z; if (PNANOVDB_DEREF(hdda).step.z > 0) { PNANOVDB_DEREF(hdda).next.z += dim * dir_inv.z; } } return PNANOVDB_TRUE; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_step(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda) { pnanovdb_bool_t ret; if (PNANOVDB_DEREF(hdda).next.x < PNANOVDB_DEREF(hdda).next.y && PNANOVDB_DEREF(hdda).next.x < PNANOVDB_DEREF(hdda).next.z) { #ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING if (PNANOVDB_DEREF(hdda).next.x <= PNANOVDB_DEREF(hdda).tmin) { PNANOVDB_DEREF(hdda).next.x += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.x + 1.0e-6f; } #endif PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.x; PNANOVDB_DEREF(hdda).next.x += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.x; PNANOVDB_DEREF(hdda).voxel.x += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.x; ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax; } else if (PNANOVDB_DEREF(hdda).next.y < PNANOVDB_DEREF(hdda).next.z) { #ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING if (PNANOVDB_DEREF(hdda).next.y <= PNANOVDB_DEREF(hdda).tmin) { PNANOVDB_DEREF(hdda).next.y += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.y + 1.0e-6f; } #endif PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.y; PNANOVDB_DEREF(hdda).next.y += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.y; PNANOVDB_DEREF(hdda).voxel.y += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.y; ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax; } else { #ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING if (PNANOVDB_DEREF(hdda).next.z <= PNANOVDB_DEREF(hdda).tmin) { PNANOVDB_DEREF(hdda).next.z += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.z + 1.0e-6f; } #endif PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.z; PNANOVDB_DEREF(hdda).next.z += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.z; PNANOVDB_DEREF(hdda).voxel.z += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.z; ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax; } return ret; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_ray_clip( PNANOVDB_IN(pnanovdb_vec3_t) bbox_min, PNANOVDB_IN(pnanovdb_vec3_t) bbox_max, PNANOVDB_IN(pnanovdb_vec3_t) origin, PNANOVDB_INOUT(float) tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction, PNANOVDB_INOUT(float) tmax ) { pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction)); pnanovdb_vec3_t t0 = pnanovdb_vec3_mul(pnanovdb_vec3_sub(PNANOVDB_DEREF(bbox_min), PNANOVDB_DEREF(origin)), dir_inv); pnanovdb_vec3_t t1 = pnanovdb_vec3_mul(pnanovdb_vec3_sub(PNANOVDB_DEREF(bbox_max), PNANOVDB_DEREF(origin)), dir_inv); pnanovdb_vec3_t tmin3 = pnanovdb_vec3_min(t0, t1); pnanovdb_vec3_t tmax3 = pnanovdb_vec3_max(t0, t1); float tnear = pnanovdb_max(tmin3.x, pnanovdb_max(tmin3.y, tmin3.z)); float tfar = pnanovdb_min(tmax3.x, pnanovdb_min(tmax3.y, tmax3.z)); pnanovdb_bool_t hit = tnear <= tfar; PNANOVDB_DEREF(tmin) = pnanovdb_max(PNANOVDB_DEREF(tmin), tnear); PNANOVDB_DEREF(tmax) = pnanovdb_min(PNANOVDB_DEREF(tmax), tfar); return hit; } PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_zero_crossing( pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction, float tmax, PNANOVDB_INOUT(float) thit, PNANOVDB_INOUT(float) v ) { pnanovdb_coord_t bbox_min = pnanovdb_root_get_bbox_min(buf, PNANOVDB_DEREF(acc).root); pnanovdb_coord_t bbox_max = pnanovdb_root_get_bbox_max(buf, PNANOVDB_DEREF(acc).root); pnanovdb_vec3_t bbox_minf = pnanovdb_coord_to_vec3(bbox_min); pnanovdb_vec3_t bbox_maxf = pnanovdb_coord_to_vec3(pnanovdb_coord_add(bbox_max, pnanovdb_coord_uniform(1))); pnanovdb_bool_t hit = pnanovdb_hdda_ray_clip(PNANOVDB_REF(bbox_minf), PNANOVDB_REF(bbox_maxf), origin, PNANOVDB_REF(tmin), direction, PNANOVDB_REF(tmax)); if (!hit || tmax > 1.0e20f) { return PNANOVDB_FALSE; } pnanovdb_vec3_t pos = pnanovdb_hdda_ray_start(origin, tmin, direction); pnanovdb_coord_t ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos)); pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)); float v0 = pnanovdb_read_float(buf, address); pnanovdb_int32_t dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk))); pnanovdb_hdda_t hdda; pnanovdb_hdda_init(PNANOVDB_REF(hdda), origin, tmin, direction, tmax, dim); while (pnanovdb_hdda_step(PNANOVDB_REF(hdda))) { pnanovdb_vec3_t pos_start = pnanovdb_hdda_ray_start(origin, hdda.tmin + 1.0001f, direction); ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos_start)); dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk))); pnanovdb_hdda_update(PNANOVDB_REF(hdda), origin, direction, dim); if (hdda.dim > 1 || !pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(ijk))) { continue; } while (pnanovdb_hdda_step(PNANOVDB_REF(hdda)) && pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(hdda.voxel))) { ijk = hdda.voxel; pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)); PNANOVDB_DEREF(v) = pnanovdb_read_float(buf, address); if (PNANOVDB_DEREF(v) * v0 < 0.f) { PNANOVDB_DEREF(thit) = hdda.tmin; return PNANOVDB_TRUE; } } } return PNANOVDB_FALSE; } #endif #endif // end of NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED
165,441
C
47.788558
297
0.701948
NVIDIA/warp/warp/native/nanovdb/GridHandle.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! \file nanovdb/GridHandle.h \author Ken Museth \date January 8, 2020 \brief Defines GridHandle, which manages a host, and possibly a device, memory buffer containing one or more NanoVDB grids. */ #ifndef NANOVDB_GRID_HANDLE_H_HAS_BEEN_INCLUDED #define NANOVDB_GRID_HANDLE_H_HAS_BEEN_INCLUDED #include <fstream> // for std::ifstream #include <iostream> // for std::cerr/cout #include <vector> #include <initializer_list> #include <nanovdb/NanoVDB.h>// for toGridType #include <nanovdb/HostBuffer.h> namespace nanovdb { // --------------------------> GridHandle <------------------------------------ struct GridHandleMetaData {uint64_t offset, size; GridType gridType;}; /// @brief This class serves to manage a buffer containing one or more NanoVDB Grids. /// /// @note It is important to note that this class does NOT depend on OpenVDB. template<typename BufferT = HostBuffer> class GridHandle { std::vector<GridHandleMetaData> mMetaData; BufferT mBuffer; template <typename T> static T* no_const(const T* ptr) { return const_cast<T*>(ptr); } public: using BufferType = BufferT; /// @brief Move constructor from a host buffer /// @param buffer buffer containing one or more NanoGrids that will be moved into this GridHandle /// @throw Will throw and error with the buffer does not contain a valid NanoGrid! template<typename T = BufferT, typename util::enable_if<BufferTraits<T>::hasDeviceDual, int>::type = 0> GridHandle(T&& buffer); /// @brief Move constructor from a dual host-device buffer /// @param buffer buffer containing one or more NanoGrids that will be moved into this GridHandle /// @throw Will throw and error with the buffer does not contain a valid NanoGrid! template<typename T = BufferT, typename util::disable_if<BufferTraits<T>::hasDeviceDual, int>::type = 0> GridHandle(T&& buffer); /// @brief Constructs an empty GridHandle GridHandle() = default; /// @brief Disallow copy-construction GridHandle(const GridHandle&) = delete; /// @brief Move copy-constructor GridHandle(GridHandle&& other) noexcept { mBuffer = std::move(other.mBuffer); mMetaData = std::move(other.mMetaData); } /// @brief clear this GridHandle to an empty handle void reset() { mBuffer.clear(); mMetaData.clear(); } /// @brief Disallow copy assignment operation GridHandle& operator=(const GridHandle&) = delete; /// @brief Move copy assignment operation GridHandle& operator=(GridHandle&& other) noexcept { mBuffer = std::move(other.mBuffer); mMetaData = std::move(other.mMetaData); return *this; } /// @brief Performs a deep copy of the GridHandle, possibly templated on a different buffer type /// @tparam OtherBufferT Buffer type of the deep copy /// @param buffer optional buffer used for allocation /// @return A new handle of the specified buffer type that contains a deep copy of the current handle template <typename OtherBufferT = HostBuffer> GridHandle<OtherBufferT> copy(const OtherBufferT& buffer = OtherBufferT()) const; /// @brief Return a reference to the buffer BufferT& buffer() { return mBuffer; } /// @brief Return a const reference to the buffer const BufferT& buffer() const { return mBuffer; } /// @brief Returns a non-const pointer to the data. /// @warning Note that the return pointer can be NULL if the GridHandle was not initialized void* data() { return mBuffer.data(); } /// @brief Returns a const pointer to the data. /// @warning Note that the return pointer can be NULL if the GridHandle was not initialized const void* data() const { return mBuffer.data(); } template<typename U = BufferT> typename util::enable_if<BufferTraits<U>::hasDeviceDual, const void*>::type deviceData() const { return mBuffer.deviceData(); } template<typename U = BufferT> typename util::enable_if<BufferTraits<U>::hasDeviceDual, void*>::type deviceData() { return mBuffer.deviceData(); } /// @brief Returns the size in bytes of the raw memory buffer managed by this GridHandle. uint64_t size() const { return mBuffer.size(); } //@{ /// @brief Return true if this handle is empty, i.e. has no allocated memory bool empty() const { return this->size() == 0; } bool isEmpty() const { return this->size() == 0; } //@} /// @brief Return true if this handle contains any grids operator bool() const { return !this->empty(); } /// @brief Returns a const host pointer to the @a n'th NanoVDB grid encoded in this GridHandle. /// @tparam ValueT Value type of the grid point to be returned /// @param n Index of the (host) grid pointer to be returned /// @warning Note that the return pointer can be NULL if the GridHandle no host grid, @a n is invalid /// or if the template parameter does not match the specified grid! template<typename ValueT> const NanoGrid<ValueT>* grid(uint32_t n = 0) const; /// @brief Returns a host pointer to the @a n'th NanoVDB grid encoded in this GridHandle. /// @tparam ValueT Value type of the grid point to be returned /// @param n Index of the (host) grid pointer to be returned /// @warning Note that the return pointer can be NULL if the GridHandle no host grid, @a n is invalid /// or if the template parameter does not match the specified grid! template<typename ValueT> NanoGrid<ValueT>* grid(uint32_t n = 0) {return const_cast<NanoGrid<ValueT>*>(static_cast<const GridHandle*>(this)->template grid<ValueT>(n));} /// @brief Return a const pointer to the @a n'th grid encoded in this GridHandle on the device, e.g. GPU /// @tparam ValueT Value type of the grid point to be returned /// @param n Index of the (device) grid pointer to be returned /// @warning Note that the return pointer can be NULL if the GridHandle has no device grid, @a n is invalid, /// or if the template parameter does not match the specified grid. template<typename ValueT, typename U = BufferT> typename util::enable_if<BufferTraits<U>::hasDeviceDual, const NanoGrid<ValueT>*>::type deviceGrid(uint32_t n=0) const; /// @brief Return a const pointer to the @a n'th grid encoded in this GridHandle on the device, e.g. GPU /// @tparam ValueT Value type of the grid point to be returned /// @param n Index if of the grid pointer to be returned /// @param verbose if non-zero error messages will be printed in case something failed /// @warning Note that the return pointer can be NULL if the GridHandle was not initialized, @a n is invalid, /// or if the template parameter does not match the specified grid. template<typename ValueT, typename U = BufferT> typename util::enable_if<BufferTraits<U>::hasDeviceDual, NanoGrid<ValueT>*>::type deviceGrid(uint32_t n=0){return const_cast<NanoGrid<ValueT>*>(static_cast<const GridHandle*>(this)->template deviceGrid<ValueT>(n));} /// @brief Upload the grid to the device, e.g. from CPU to GPU /// @note This method is only available if the buffer supports devices template<typename U = BufferT> typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type deviceUpload(void* stream = nullptr, bool sync = true) { mBuffer.deviceUpload(stream, sync); } /// @brief Download the grid to from the device, e.g. from GPU to CPU /// @note This method is only available if the buffer supports devices template<typename U = BufferT> typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type deviceDownload(void* stream = nullptr, bool sync = true) { mBuffer.deviceDownload(stream, sync); } /// @brief Check if the buffer is this handle has any padding, i.e. if the buffer is larger than the combined size of all its grids /// @return true is the combined size of all grid is smaller than the buffer size bool isPadded() const {return mMetaData.empty() ? false : mMetaData.back().offset + mMetaData.back().size != mBuffer.size();} /// @brief Return the total number of grids contained in this buffer uint32_t gridCount() const {return static_cast<uint32_t>(mMetaData.size());} /// @brief Return the grid size of the @a n'th grid in this GridHandle /// @param n index of the grid (assumed to be less than gridCount()) /// @return Return the byte size of the specified grid uint64_t gridSize(uint32_t n = 0) const {return mMetaData[n].size; } /// @brief Return the GridType of the @a n'th grid in this GridHandle /// @param n index of the grid (assumed to be less than gridCount()) /// @return Return the GridType of the specified grid GridType gridType(uint32_t n = 0) const {return mMetaData[n].gridType; } /// @brief Access to the GridData of the n'th grid in the current handle /// @param n zero-based ID of the grid /// @return Const pointer to the n'th GridData in the current handle const GridData* gridData(uint32_t n = 0) const; /// @brief Returns a const point to the @a n'th grid meta data /// @param n zero-based ID of the grid /// @warning Note that the return pointer can be NULL if the GridHandle was not initialized const GridMetaData* gridMetaData(uint32_t n = 0) const; /// @brief Write a specific grid in this buffer to an output stream /// @param os output stream that the buffer will be written to /// @param n zero-based index of the grid to be written to stream void write(std::ostream& os, uint32_t n) const { if (const GridData* data = this->gridData(n)) { os.write((const char*)data, data->mGridSize); } else { throw std::runtime_error("GridHandle does not contain a #" + std::to_string(n) + " grid"); } } /// @brief Write the entire grid buffer to an output stream /// @param os output stream that the buffer will be written to void write(std::ostream& os) const { for (uint32_t n=0; n<this->gridCount(); ++n) this->write(os, n); } /// @brief Write this entire grid buffer to a file /// @param fileName string name of the output file void write(const std::string &fileName) const { std::ofstream os(fileName, std::ios::out | std::ios::binary | std::ios::trunc); if (!os.is_open()) throw std::ios_base::failure("Unable to open file named \"" + fileName + "\" for output"); this->write(os); } /// @brief Write a specific grid to file /// @param fileName string name of the output file /// @param n zero-based index of the grid to be written to file void write(const std::string &fileName, uint32_t n) const { std::ofstream os(fileName, std::ios::out | std::ios::binary | std::ios::trunc); if (!os.is_open()) throw std::ios_base::failure("Unable to open file named \"" + fileName + "\" for output"); this->write(os, n); } /// @brief Read an entire raw grid buffer from an input stream /// @param is input stream containing a raw grid buffer /// @param pool optional pool from which to allocate the new grid buffer /// @throw Will throw a std::logic_error if the stream does not contain a valid raw grid void read(std::istream& is, const BufferT& pool = BufferT()); /// @brief Read a specific grid from an input stream containing a raw grid buffer /// @param is input stream containing a raw grid buffer /// @param n zero-based index of the grid to be read /// @param pool optional pool from which to allocate the new grid buffer /// @throw Will throw a std::logic_error if the stream does not contain a valid raw grid void read(std::istream& is, uint32_t n, const BufferT& pool = BufferT()); /// @brief Read a specific grid from an input stream containing a raw grid buffer /// @param is input stream containing a raw grid buffer /// @param gridName string name of the grid to be read /// @param pool optional pool from which to allocate the new grid buffer /// @throw Will throw a std::logic_error if the stream does not contain a valid raw grid with the speficied name void read(std::istream& is, const std::string &gridName, const BufferT& pool = BufferT()); /// @brief Read a raw grid buffer from a file /// @param filename string name of the input file containing a raw grid buffer /// @param pool optional pool from which to allocate the new grid buffe void read(const std::string &fileName, const BufferT& pool = BufferT()) { std::ifstream is(fileName, std::ios::in | std::ios::binary); if (!is.is_open()) throw std::ios_base::failure("Unable to open file named \"" + fileName + "\" for input"); this->read(is, pool); } /// @brief Read a specific grid from a file containing a raw grid buffer /// @param filename string name of the input file containing a raw grid buffer /// @param n zero-based index of the grid to be read /// @param pool optional pool from which to allocate the new grid buffer /// @throw Will throw a std::ios_base::failure if the file does not exist and a /// std::logic_error if the files does not contain a valid raw grid void read(const std::string &fileName, uint32_t n, const BufferT& pool = BufferT()) { std::ifstream is(fileName, std::ios::in | std::ios::binary); if (!is.is_open()) throw std::ios_base::failure("Unable to open file named \"" + fileName + "\" for input"); this->read(is, n, pool); } /// @brief Read a specific grid from a file containing a raw grid buffer /// @param filename string name of the input file containing a raw grid buffer /// @param gridName string name of the grid to be read /// @param pool optional pool from which to allocate the new grid buffer /// @throw Will throw a std::ios_base::failure if the file does not exist and a /// std::logic_error if the files does not contain a valid raw grid withe the specified name void read(const std::string &fileName, const std::string &gridName, const BufferT& pool = BufferT()) { std::ifstream is(fileName, std::ios::in | std::ios::binary); if (!is.is_open()) throw std::ios_base::failure("Unable to open file named \"" + fileName + "\" for input"); this->read(is, gridName, pool); } }; // GridHandle // --------------------------> Implementation of private methods in GridHandle <------------------------------------ template<typename BufferT> inline const GridData* GridHandle<BufferT>::gridData(uint32_t n) const { const void *data = this->data(); if (data == nullptr || n >= mMetaData.size()) return nullptr; return util::PtrAdd<GridData>(data, mMetaData[n].offset); }// const GridData* GridHandle<BufferT>::gridData(uint32_t n) const template<typename BufferT> inline const GridMetaData* GridHandle<BufferT>::gridMetaData(uint32_t n) const { const auto *data = this->data(); if (data == nullptr || n >= mMetaData.size()) return nullptr; return util::PtrAdd<GridMetaData>(data, mMetaData[n].offset); }// const GridMetaData* GridHandle<BufferT>::gridMetaData(uint32_t n) const inline __hostdev__ void cpyGridHandleMeta(const GridData *data, GridHandleMetaData *meta) { uint64_t offset = 0; for (auto *p=meta, *q=p+data->mGridCount; p!=q; ++p) { *p = {offset, data->mGridSize, data->mGridType}; offset += p->size; data = util::PtrAdd<GridData>(data, p->size); } }// void cpyGridHandleMeta(const GridData *data, GridHandleMetaData *meta) template<typename BufferT> template<typename T, typename util::disable_if<BufferTraits<T>::hasDeviceDual, int>::type> GridHandle<BufferT>::GridHandle(T&& buffer) { static_assert(util::is_same<T,BufferT>::value, "Expected U==BufferT"); mBuffer = std::move(buffer); if (auto *data = reinterpret_cast<const GridData*>(mBuffer.data())) { if (!data->isValid()) throw std::runtime_error("GridHandle was constructed with an invalid host buffer"); mMetaData.resize(data->mGridCount); cpyGridHandleMeta(data, mMetaData.data()); } }// GridHandle<BufferT>::GridHandle(T&& buffer) template<typename BufferT> template <typename OtherBufferT> inline GridHandle<OtherBufferT> GridHandle<BufferT>::copy(const OtherBufferT& other) const { if (mBuffer.isEmpty()) return GridHandle<OtherBufferT>();// return an empty handle auto buffer = OtherBufferT::create(mBuffer.size(), &other); std::memcpy(buffer.data(), mBuffer.data(), mBuffer.size());// deep copy of buffer return GridHandle<OtherBufferT>(std::move(buffer)); }// GridHandle<OtherBufferT> GridHandle<BufferT>::copy(const OtherBufferT& other) const template<typename BufferT> template<typename ValueT> inline const NanoGrid<ValueT>* GridHandle<BufferT>::grid(uint32_t n) const { const void *data = mBuffer.data(); if (data == nullptr || n >= mMetaData.size() || mMetaData[n].gridType != toGridType<ValueT>()) return nullptr; return util::PtrAdd<NanoGrid<ValueT>>(data, mMetaData[n].offset); }// const NanoGrid<ValueT>* GridHandle<BufferT>::grid(uint32_t n) const template<typename BufferT> template<typename ValueT, typename U> inline typename util::enable_if<BufferTraits<U>::hasDeviceDual, const NanoGrid<ValueT>*>::type GridHandle<BufferT>::deviceGrid(uint32_t n) const { const void *data = mBuffer.deviceData(); if (data == nullptr || n >= mMetaData.size() || mMetaData[n].gridType != toGridType<ValueT>()) return nullptr; return util::PtrAdd<NanoGrid<ValueT>>(data, mMetaData[n].offset); }// GridHandle<BufferT>::deviceGrid(uint32_t n) cons } // namespace nanovdb #if defined(__CUDACC__) #include <nanovdb/cuda/GridHandle.cuh> #endif// defined(__CUDACC__) #endif // NANOVDB_GRID_HANDLE_H_HAS_BEEN_INCLUDED
18,007
C
48.06812
146
0.680013
NVIDIA/warp/warp/native/nanovdb/math/Math.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! \file Math.h \author Ken Museth \date January 8, 2020 \brief Math functions and classes */ #ifndef NANOVDB_MATH_MATH_H_HAS_BEEN_INCLUDED #define NANOVDB_MATH_MATH_H_HAS_BEEN_INCLUDED #include <nanovdb/util/Util.h>// for __hostdev__ and lots of other utility functions namespace nanovdb {// ================================================================= namespace math {// ============================================================= // ----------------------------> Various math functions <------------------------------------- //@{ /// @brief Pi constant taken from Boost to match old behaviour template<typename T> inline __hostdev__ constexpr T pi() { return 3.141592653589793238462643383279502884e+00; } template<> inline __hostdev__ constexpr float pi() { return 3.141592653589793238462643383279502884e+00F; } template<> inline __hostdev__ constexpr double pi() { return 3.141592653589793238462643383279502884e+00; } template<> inline __hostdev__ constexpr long double pi() { return 3.141592653589793238462643383279502884e+00L; } //@} //@{ /// Tolerance for floating-point comparison template<typename T> struct Tolerance; template<> struct Tolerance<float> { __hostdev__ static float value() { return 1e-8f; } }; template<> struct Tolerance<double> { __hostdev__ static double value() { return 1e-15; } }; //@} //@{ /// Delta for small floating-point offsets template<typename T> struct Delta; template<> struct Delta<float> { __hostdev__ static float value() { return 1e-5f; } }; template<> struct Delta<double> { __hostdev__ static double value() { return 1e-9; } }; //@} //@{ /// Maximum floating-point values template<typename T> struct Maximum; #if defined(__CUDA_ARCH__) || defined(__HIP__) template<> struct Maximum<int> { __hostdev__ static int value() { return 2147483647; } }; template<> struct Maximum<uint32_t> { __hostdev__ static uint32_t value() { return 4294967295u; } }; template<> struct Maximum<float> { __hostdev__ static float value() { return 1e+38f; } }; template<> struct Maximum<double> { __hostdev__ static double value() { return 1e+308; } }; #else template<typename T> struct Maximum { static T value() { return std::numeric_limits<T>::max(); } }; #endif //@} template<typename Type> __hostdev__ inline bool isApproxZero(const Type& x) { return !(x > Tolerance<Type>::value()) && !(x < -Tolerance<Type>::value()); } template<typename Type> __hostdev__ inline Type Min(Type a, Type b) { return (a < b) ? a : b; } __hostdev__ inline int32_t Min(int32_t a, int32_t b) { return int32_t(fminf(float(a), float(b))); } __hostdev__ inline uint32_t Min(uint32_t a, uint32_t b) { return uint32_t(fminf(float(a), float(b))); } __hostdev__ inline float Min(float a, float b) { return fminf(a, b); } __hostdev__ inline double Min(double a, double b) { return fmin(a, b); } template<typename Type> __hostdev__ inline Type Max(Type a, Type b) { return (a > b) ? a : b; } __hostdev__ inline int32_t Max(int32_t a, int32_t b) { return int32_t(fmaxf(float(a), float(b))); } __hostdev__ inline uint32_t Max(uint32_t a, uint32_t b) { return uint32_t(fmaxf(float(a), float(b))); } __hostdev__ inline float Max(float a, float b) { return fmaxf(a, b); } __hostdev__ inline double Max(double a, double b) { return fmax(a, b); } __hostdev__ inline float Clamp(float x, float a, float b) { return Max(Min(x, b), a); } __hostdev__ inline double Clamp(double x, double a, double b) { return Max(Min(x, b), a); } __hostdev__ inline float Fract(float x) { return x - floorf(x); } __hostdev__ inline double Fract(double x) { return x - floor(x); } __hostdev__ inline int32_t Floor(float x) { return int32_t(floorf(x)); } __hostdev__ inline int32_t Floor(double x) { return int32_t(floor(x)); } __hostdev__ inline int32_t Ceil(float x) { return int32_t(ceilf(x)); } __hostdev__ inline int32_t Ceil(double x) { return int32_t(ceil(x)); } template<typename T> __hostdev__ inline T Pow2(T x) { return x * x; } template<typename T> __hostdev__ inline T Pow3(T x) { return x * x * x; } template<typename T> __hostdev__ inline T Pow4(T x) { return Pow2(x * x); } template<typename T> __hostdev__ inline T Abs(T x) { return x < 0 ? -x : x; } template<> __hostdev__ inline float Abs(float x) { return fabsf(x); } template<> __hostdev__ inline double Abs(double x) { return fabs(x); } template<> __hostdev__ inline int Abs(int x) { return abs(x); } template<typename CoordT, typename RealT, template<typename> class Vec3T> __hostdev__ inline CoordT Round(const Vec3T<RealT>& xyz); template<typename CoordT, template<typename> class Vec3T> __hostdev__ inline CoordT Round(const Vec3T<float>& xyz) { return CoordT(int32_t(rintf(xyz[0])), int32_t(rintf(xyz[1])), int32_t(rintf(xyz[2]))); //return CoordT(int32_t(roundf(xyz[0])), int32_t(roundf(xyz[1])), int32_t(roundf(xyz[2])) ); //return CoordT(int32_t(floorf(xyz[0] + 0.5f)), int32_t(floorf(xyz[1] + 0.5f)), int32_t(floorf(xyz[2] + 0.5f))); } template<typename CoordT, template<typename> class Vec3T> __hostdev__ inline CoordT Round(const Vec3T<double>& xyz) { return CoordT(int32_t(floor(xyz[0] + 0.5)), int32_t(floor(xyz[1] + 0.5)), int32_t(floor(xyz[2] + 0.5))); } template<typename CoordT, typename RealT, template<typename> class Vec3T> __hostdev__ inline CoordT RoundDown(const Vec3T<RealT>& xyz) { return CoordT(Floor(xyz[0]), Floor(xyz[1]), Floor(xyz[2])); } //@{ /// Return the square root of a floating-point value. __hostdev__ inline float Sqrt(float x) { return sqrtf(x); } __hostdev__ inline double Sqrt(double x) { return sqrt(x); } //@} /// Return the sign of the given value as an integer (either -1, 0 or 1). template<typename T> __hostdev__ inline T Sign(const T& x) { return ((T(0) < x) ? T(1) : T(0)) - ((x < T(0)) ? T(1) : T(0)); } template<typename Vec3T> __hostdev__ inline int MinIndex(const Vec3T& v) { #if 0 static const int hashTable[8] = {2, 1, 9, 1, 2, 9, 0, 0}; //9 are dummy values const int hashKey = ((v[0] < v[1]) << 2) + ((v[0] < v[2]) << 1) + (v[1] < v[2]); // ?*4+?*2+?*1 return hashTable[hashKey]; #else if (v[0] < v[1] && v[0] < v[2]) return 0; if (v[1] < v[2]) return 1; else return 2; #endif } template<typename Vec3T> __hostdev__ inline int MaxIndex(const Vec3T& v) { #if 0 static const int hashTable[8] = {2, 1, 9, 1, 2, 9, 0, 0}; //9 are dummy values const int hashKey = ((v[0] > v[1]) << 2) + ((v[0] > v[2]) << 1) + (v[1] > v[2]); // ?*4+?*2+?*1 return hashTable[hashKey]; #else if (v[0] > v[1] && v[0] > v[2]) return 0; if (v[1] > v[2]) return 1; else return 2; #endif } /// @brief round up byteSize to the nearest wordSize, e.g. to align to machine word: AlignUp<sizeof(size_t)(n) /// /// @details both wordSize and byteSize are in byte units template<uint64_t wordSize> __hostdev__ inline uint64_t AlignUp(uint64_t byteCount) { const uint64_t r = byteCount % wordSize; return r ? byteCount - r + wordSize : byteCount; } // ------------------------------> Coord <-------------------------------------- // forward declaration so we can define Coord::asVec3s and Coord::asVec3d template<typename> class Vec3; /// @brief Signed (i, j, k) 32-bit integer coordinate class, similar to openvdb::math::Coord class Coord { int32_t mVec[3]; // private member data - three signed index coordinates public: using ValueType = int32_t; using IndexType = uint32_t; /// @brief Initialize all coordinates to zero. __hostdev__ Coord() : mVec{0, 0, 0} { } /// @brief Initializes all coordinates to the given signed integer. __hostdev__ explicit Coord(ValueType n) : mVec{n, n, n} { } /// @brief Initializes coordinate to the given signed integers. __hostdev__ Coord(ValueType i, ValueType j, ValueType k) : mVec{i, j, k} { } __hostdev__ Coord(ValueType* ptr) : mVec{ptr[0], ptr[1], ptr[2]} { } __hostdev__ int32_t x() const { return mVec[0]; } __hostdev__ int32_t y() const { return mVec[1]; } __hostdev__ int32_t z() const { return mVec[2]; } __hostdev__ int32_t& x() { return mVec[0]; } __hostdev__ int32_t& y() { return mVec[1]; } __hostdev__ int32_t& z() { return mVec[2]; } __hostdev__ static Coord max() { return Coord(int32_t((1u << 31) - 1)); } __hostdev__ static Coord min() { return Coord(-int32_t((1u << 31) - 1) - 1); } __hostdev__ static size_t memUsage() { return sizeof(Coord); } /// @brief Return a const reference to the given Coord component. /// @warning The argument is assumed to be 0, 1, or 2. __hostdev__ const ValueType& operator[](IndexType i) const { return mVec[i]; } /// @brief Return a non-const reference to the given Coord component. /// @warning The argument is assumed to be 0, 1, or 2. __hostdev__ ValueType& operator[](IndexType i) { return mVec[i]; } /// @brief Assignment operator that works with openvdb::Coord template<typename CoordT> __hostdev__ Coord& operator=(const CoordT& other) { static_assert(sizeof(Coord) == sizeof(CoordT), "Mis-matched sizeof"); mVec[0] = other[0]; mVec[1] = other[1]; mVec[2] = other[2]; return *this; } /// @brief Return a new instance with coordinates masked by the given unsigned integer. __hostdev__ Coord operator&(IndexType n) const { return Coord(mVec[0] & n, mVec[1] & n, mVec[2] & n); } // @brief Return a new instance with coordinates left-shifted by the given unsigned integer. __hostdev__ Coord operator<<(IndexType n) const { return Coord(mVec[0] << n, mVec[1] << n, mVec[2] << n); } // @brief Return a new instance with coordinates right-shifted by the given unsigned integer. __hostdev__ Coord operator>>(IndexType n) const { return Coord(mVec[0] >> n, mVec[1] >> n, mVec[2] >> n); } /// @brief Return true if this Coord is lexicographically less than the given Coord. __hostdev__ bool operator<(const Coord& rhs) const { return mVec[0] < rhs[0] ? true : mVec[0] > rhs[0] ? false : mVec[1] < rhs[1] ? true : mVec[1] > rhs[1] ? false : mVec[2] < rhs[2] ? true : false; } /// @brief Return true if this Coord is lexicographically less or equal to the given Coord. __hostdev__ bool operator<=(const Coord& rhs) const { return mVec[0] < rhs[0] ? true : mVec[0] > rhs[0] ? false : mVec[1] < rhs[1] ? true : mVec[1] > rhs[1] ? false : mVec[2] <=rhs[2] ? true : false; } // @brief Return true if this Coord is lexicographically greater than the given Coord. __hostdev__ bool operator>(const Coord& rhs) const { return mVec[0] > rhs[0] ? true : mVec[0] < rhs[0] ? false : mVec[1] > rhs[1] ? true : mVec[1] < rhs[1] ? false : mVec[2] > rhs[2] ? true : false; } // @brief Return true if this Coord is lexicographically greater or equal to the given Coord. __hostdev__ bool operator>=(const Coord& rhs) const { return mVec[0] > rhs[0] ? true : mVec[0] < rhs[0] ? false : mVec[1] > rhs[1] ? true : mVec[1] < rhs[1] ? false : mVec[2] >=rhs[2] ? true : false; } // @brief Return true if the Coord components are identical. __hostdev__ bool operator==(const Coord& rhs) const { return mVec[0] == rhs[0] && mVec[1] == rhs[1] && mVec[2] == rhs[2]; } __hostdev__ bool operator!=(const Coord& rhs) const { return mVec[0] != rhs[0] || mVec[1] != rhs[1] || mVec[2] != rhs[2]; } __hostdev__ Coord& operator&=(int n) { mVec[0] &= n; mVec[1] &= n; mVec[2] &= n; return *this; } __hostdev__ Coord& operator<<=(uint32_t n) { mVec[0] <<= n; mVec[1] <<= n; mVec[2] <<= n; return *this; } __hostdev__ Coord& operator>>=(uint32_t n) { mVec[0] >>= n; mVec[1] >>= n; mVec[2] >>= n; return *this; } __hostdev__ Coord& operator+=(int n) { mVec[0] += n; mVec[1] += n; mVec[2] += n; return *this; } __hostdev__ Coord operator+(const Coord& rhs) const { return Coord(mVec[0] + rhs[0], mVec[1] + rhs[1], mVec[2] + rhs[2]); } __hostdev__ Coord operator-(const Coord& rhs) const { return Coord(mVec[0] - rhs[0], mVec[1] - rhs[1], mVec[2] - rhs[2]); } __hostdev__ Coord operator-() const { return Coord(-mVec[0], -mVec[1], -mVec[2]); } __hostdev__ Coord& operator+=(const Coord& rhs) { mVec[0] += rhs[0]; mVec[1] += rhs[1]; mVec[2] += rhs[2]; return *this; } __hostdev__ Coord& operator-=(const Coord& rhs) { mVec[0] -= rhs[0]; mVec[1] -= rhs[1]; mVec[2] -= rhs[2]; return *this; } /// @brief Perform a component-wise minimum with the other Coord. __hostdev__ Coord& minComponent(const Coord& other) { if (other[0] < mVec[0]) mVec[0] = other[0]; if (other[1] < mVec[1]) mVec[1] = other[1]; if (other[2] < mVec[2]) mVec[2] = other[2]; return *this; } /// @brief Perform a component-wise maximum with the other Coord. __hostdev__ Coord& maxComponent(const Coord& other) { if (other[0] > mVec[0]) mVec[0] = other[0]; if (other[1] > mVec[1]) mVec[1] = other[1]; if (other[2] > mVec[2]) mVec[2] = other[2]; return *this; } #if defined(__CUDACC__) // the following functions only run on the GPU! __device__ inline Coord& minComponentAtomic(const Coord& other) { atomicMin(&mVec[0], other[0]); atomicMin(&mVec[1], other[1]); atomicMin(&mVec[2], other[2]); return *this; } __device__ inline Coord& maxComponentAtomic(const Coord& other) { atomicMax(&mVec[0], other[0]); atomicMax(&mVec[1], other[1]); atomicMax(&mVec[2], other[2]); return *this; } #endif __hostdev__ Coord offsetBy(ValueType dx, ValueType dy, ValueType dz) const { return Coord(mVec[0] + dx, mVec[1] + dy, mVec[2] + dz); } __hostdev__ Coord offsetBy(ValueType n) const { return this->offsetBy(n, n, n); } /// Return true if any of the components of @a a are smaller than the /// corresponding components of @a b. __hostdev__ static inline bool lessThan(const Coord& a, const Coord& b) { return (a[0] < b[0] || a[1] < b[1] || a[2] < b[2]); } /// @brief Return the largest integer coordinates that are not greater /// than @a xyz (node centered conversion). template<typename Vec3T> __hostdev__ static Coord Floor(const Vec3T& xyz) { return Coord(math::Floor(xyz[0]), math::Floor(xyz[1]), math::Floor(xyz[2])); } /// @brief Return a hash key derived from the existing coordinates. /// @details The hash function is originally taken from the SIGGRAPH paper: /// "VDB: High-resolution sparse volumes with dynamic topology" /// and the prime numbers are modified based on the ACM Transactions on Graphics paper: /// "Real-time 3D reconstruction at scale using voxel hashing" (the second number had a typo!) template<int Log2N = 3 + 4 + 5> __hostdev__ uint32_t hash() const { return ((1 << Log2N) - 1) & (mVec[0] * 73856093 ^ mVec[1] * 19349669 ^ mVec[2] * 83492791); } /// @brief Return the octant of this Coord //__hostdev__ size_t octant() const { return (uint32_t(mVec[0])>>31) | ((uint32_t(mVec[1])>>31)<<1) | ((uint32_t(mVec[2])>>31)<<2); } __hostdev__ uint8_t octant() const { return (uint8_t(bool(mVec[0] & (1u << 31)))) | (uint8_t(bool(mVec[1] & (1u << 31))) << 1) | (uint8_t(bool(mVec[2] & (1u << 31))) << 2); } /// @brief Return a single precision floating-point vector of this coordinate __hostdev__ inline Vec3<float> asVec3s() const; /// @brief Return a double precision floating-point vector of this coordinate __hostdev__ inline Vec3<double> asVec3d() const; // returns a copy of itself, so it mimics the behaviour of Vec3<T>::round() __hostdev__ inline Coord round() const { return *this; } }; // Coord class // ----------------------------> Vec3 <-------------------------------------- /// @brief A simple vector class with three components, similar to openvdb::math::Vec3 template<typename T> class Vec3 { T mVec[3]; public: static const int SIZE = 3; static const int size = 3; // in openvdb::math::Tuple using ValueType = T; Vec3() = default; __hostdev__ explicit Vec3(T x) : mVec{x, x, x} { } __hostdev__ Vec3(T x, T y, T z) : mVec{x, y, z} { } template<template<class> class Vec3T, class T2> __hostdev__ Vec3(const Vec3T<T2>& v) : mVec{T(v[0]), T(v[1]), T(v[2])} { static_assert(Vec3T<T2>::size == size, "expected Vec3T::size==3!"); } template<typename T2> __hostdev__ explicit Vec3(const Vec3<T2>& v) : mVec{T(v[0]), T(v[1]), T(v[2])} { } __hostdev__ explicit Vec3(const Coord& ijk) : mVec{T(ijk[0]), T(ijk[1]), T(ijk[2])} { } __hostdev__ bool operator==(const Vec3& rhs) const { return mVec[0] == rhs[0] && mVec[1] == rhs[1] && mVec[2] == rhs[2]; } __hostdev__ bool operator!=(const Vec3& rhs) const { return mVec[0] != rhs[0] || mVec[1] != rhs[1] || mVec[2] != rhs[2]; } template<template<class> class Vec3T, class T2> __hostdev__ Vec3& operator=(const Vec3T<T2>& rhs) { static_assert(Vec3T<T2>::size == size, "expected Vec3T::size==3!"); mVec[0] = rhs[0]; mVec[1] = rhs[1]; mVec[2] = rhs[2]; return *this; } __hostdev__ const T& operator[](int i) const { return mVec[i]; } __hostdev__ T& operator[](int i) { return mVec[i]; } template<typename Vec3T> __hostdev__ T dot(const Vec3T& v) const { return mVec[0] * v[0] + mVec[1] * v[1] + mVec[2] * v[2]; } template<typename Vec3T> __hostdev__ Vec3 cross(const Vec3T& v) const { return Vec3(mVec[1] * v[2] - mVec[2] * v[1], mVec[2] * v[0] - mVec[0] * v[2], mVec[0] * v[1] - mVec[1] * v[0]); } __hostdev__ T lengthSqr() const { return mVec[0] * mVec[0] + mVec[1] * mVec[1] + mVec[2] * mVec[2]; // 5 flops } __hostdev__ T length() const { return Sqrt(this->lengthSqr()); } __hostdev__ Vec3 operator-() const { return Vec3(-mVec[0], -mVec[1], -mVec[2]); } __hostdev__ Vec3 operator*(const Vec3& v) const { return Vec3(mVec[0] * v[0], mVec[1] * v[1], mVec[2] * v[2]); } __hostdev__ Vec3 operator/(const Vec3& v) const { return Vec3(mVec[0] / v[0], mVec[1] / v[1], mVec[2] / v[2]); } __hostdev__ Vec3 operator+(const Vec3& v) const { return Vec3(mVec[0] + v[0], mVec[1] + v[1], mVec[2] + v[2]); } __hostdev__ Vec3 operator-(const Vec3& v) const { return Vec3(mVec[0] - v[0], mVec[1] - v[1], mVec[2] - v[2]); } __hostdev__ Vec3 operator+(const Coord& ijk) const { return Vec3(mVec[0] + ijk[0], mVec[1] + ijk[1], mVec[2] + ijk[2]); } __hostdev__ Vec3 operator-(const Coord& ijk) const { return Vec3(mVec[0] - ijk[0], mVec[1] - ijk[1], mVec[2] - ijk[2]); } __hostdev__ Vec3 operator*(const T& s) const { return Vec3(s * mVec[0], s * mVec[1], s * mVec[2]); } __hostdev__ Vec3 operator/(const T& s) const { return (T(1) / s) * (*this); } __hostdev__ Vec3& operator+=(const Vec3& v) { mVec[0] += v[0]; mVec[1] += v[1]; mVec[2] += v[2]; return *this; } __hostdev__ Vec3& operator+=(const Coord& ijk) { mVec[0] += T(ijk[0]); mVec[1] += T(ijk[1]); mVec[2] += T(ijk[2]); return *this; } __hostdev__ Vec3& operator-=(const Vec3& v) { mVec[0] -= v[0]; mVec[1] -= v[1]; mVec[2] -= v[2]; return *this; } __hostdev__ Vec3& operator-=(const Coord& ijk) { mVec[0] -= T(ijk[0]); mVec[1] -= T(ijk[1]); mVec[2] -= T(ijk[2]); return *this; } __hostdev__ Vec3& operator*=(const T& s) { mVec[0] *= s; mVec[1] *= s; mVec[2] *= s; return *this; } __hostdev__ Vec3& operator/=(const T& s) { return (*this) *= T(1) / s; } __hostdev__ Vec3& normalize() { return (*this) /= this->length(); } /// @brief Perform a component-wise minimum with the other Coord. __hostdev__ Vec3& minComponent(const Vec3& other) { if (other[0] < mVec[0]) mVec[0] = other[0]; if (other[1] < mVec[1]) mVec[1] = other[1]; if (other[2] < mVec[2]) mVec[2] = other[2]; return *this; } /// @brief Perform a component-wise maximum with the other Coord. __hostdev__ Vec3& maxComponent(const Vec3& other) { if (other[0] > mVec[0]) mVec[0] = other[0]; if (other[1] > mVec[1]) mVec[1] = other[1]; if (other[2] > mVec[2]) mVec[2] = other[2]; return *this; } /// @brief Return the smallest vector component __hostdev__ ValueType min() const { return mVec[0] < mVec[1] ? (mVec[0] < mVec[2] ? mVec[0] : mVec[2]) : (mVec[1] < mVec[2] ? mVec[1] : mVec[2]); } /// @brief Return the largest vector component __hostdev__ ValueType max() const { return mVec[0] > mVec[1] ? (mVec[0] > mVec[2] ? mVec[0] : mVec[2]) : (mVec[1] > mVec[2] ? mVec[1] : mVec[2]); } /// @brief Round each component if this Vec<T> up to its integer value /// @return Return an integer Coord __hostdev__ Coord floor() const { return Coord(Floor(mVec[0]), Floor(mVec[1]), Floor(mVec[2])); } /// @brief Round each component if this Vec<T> down to its integer value /// @return Return an integer Coord __hostdev__ Coord ceil() const { return Coord(Ceil(mVec[0]), Ceil(mVec[1]), Ceil(mVec[2])); } /// @brief Round each component if this Vec<T> to its closest integer value /// @return Return an integer Coord __hostdev__ Coord round() const { if (util::is_same<T, float>::value) { return Coord(Floor(mVec[0] + 0.5f), Floor(mVec[1] + 0.5f), Floor(mVec[2] + 0.5f)); } else if (util::is_same<T, int>::value) { return Coord(mVec[0], mVec[1], mVec[2]); } else { return Coord(Floor(mVec[0] + 0.5), Floor(mVec[1] + 0.5), Floor(mVec[2] + 0.5)); } } /// @brief return a non-const raw constant pointer to array of three vector components __hostdev__ T* asPointer() { return mVec; } /// @brief return a const raw constant pointer to array of three vector components __hostdev__ const T* asPointer() const { return mVec; } }; // Vec3<T> template<typename T1, typename T2> __hostdev__ inline Vec3<T2> operator*(T1 scalar, const Vec3<T2>& vec) { return Vec3<T2>(scalar * vec[0], scalar * vec[1], scalar * vec[2]); } template<typename T1, typename T2> __hostdev__ inline Vec3<T2> operator/(T1 scalar, const Vec3<T2>& vec) { return Vec3<T2>(scalar / vec[0], scalar / vec[1], scalar / vec[2]); } /// @brief Return a single precision floating-point vector of this coordinate __hostdev__ inline Vec3<float> Coord::asVec3s() const { return Vec3<float>(float(mVec[0]), float(mVec[1]), float(mVec[2])); } /// @brief Return a double precision floating-point vector of this coordinate __hostdev__ inline Vec3<double> Coord::asVec3d() const { return Vec3<double>(double(mVec[0]), double(mVec[1]), double(mVec[2])); } // ----------------------------> Vec4 <-------------------------------------- /// @brief A simple vector class with four components, similar to openvdb::math::Vec4 template<typename T> class Vec4 { T mVec[4]; public: static const int SIZE = 4; static const int size = 4; using ValueType = T; Vec4() = default; __hostdev__ explicit Vec4(T x) : mVec{x, x, x, x} { } __hostdev__ Vec4(T x, T y, T z, T w) : mVec{x, y, z, w} { } template<typename T2> __hostdev__ explicit Vec4(const Vec4<T2>& v) : mVec{T(v[0]), T(v[1]), T(v[2]), T(v[3])} { } template<template<class> class Vec4T, class T2> __hostdev__ Vec4(const Vec4T<T2>& v) : mVec{T(v[0]), T(v[1]), T(v[2]), T(v[3])} { static_assert(Vec4T<T2>::size == size, "expected Vec4T::size==4!"); } __hostdev__ bool operator==(const Vec4& rhs) const { return mVec[0] == rhs[0] && mVec[1] == rhs[1] && mVec[2] == rhs[2] && mVec[3] == rhs[3]; } __hostdev__ bool operator!=(const Vec4& rhs) const { return mVec[0] != rhs[0] || mVec[1] != rhs[1] || mVec[2] != rhs[2] || mVec[3] != rhs[3]; } template<template<class> class Vec4T, class T2> __hostdev__ Vec4& operator=(const Vec4T<T2>& rhs) { static_assert(Vec4T<T2>::size == size, "expected Vec4T::size==4!"); mVec[0] = rhs[0]; mVec[1] = rhs[1]; mVec[2] = rhs[2]; mVec[3] = rhs[3]; return *this; } __hostdev__ const T& operator[](int i) const { return mVec[i]; } __hostdev__ T& operator[](int i) { return mVec[i]; } template<typename Vec4T> __hostdev__ T dot(const Vec4T& v) const { return mVec[0] * v[0] + mVec[1] * v[1] + mVec[2] * v[2] + mVec[3] * v[3]; } __hostdev__ T lengthSqr() const { return mVec[0] * mVec[0] + mVec[1] * mVec[1] + mVec[2] * mVec[2] + mVec[3] * mVec[3]; // 7 flops } __hostdev__ T length() const { return Sqrt(this->lengthSqr()); } __hostdev__ Vec4 operator-() const { return Vec4(-mVec[0], -mVec[1], -mVec[2], -mVec[3]); } __hostdev__ Vec4 operator*(const Vec4& v) const { return Vec4(mVec[0] * v[0], mVec[1] * v[1], mVec[2] * v[2], mVec[3] * v[3]); } __hostdev__ Vec4 operator/(const Vec4& v) const { return Vec4(mVec[0] / v[0], mVec[1] / v[1], mVec[2] / v[2], mVec[3] / v[3]); } __hostdev__ Vec4 operator+(const Vec4& v) const { return Vec4(mVec[0] + v[0], mVec[1] + v[1], mVec[2] + v[2], mVec[3] + v[3]); } __hostdev__ Vec4 operator-(const Vec4& v) const { return Vec4(mVec[0] - v[0], mVec[1] - v[1], mVec[2] - v[2], mVec[3] - v[3]); } __hostdev__ Vec4 operator*(const T& s) const { return Vec4(s * mVec[0], s * mVec[1], s * mVec[2], s * mVec[3]); } __hostdev__ Vec4 operator/(const T& s) const { return (T(1) / s) * (*this); } __hostdev__ Vec4& operator+=(const Vec4& v) { mVec[0] += v[0]; mVec[1] += v[1]; mVec[2] += v[2]; mVec[3] += v[3]; return *this; } __hostdev__ Vec4& operator-=(const Vec4& v) { mVec[0] -= v[0]; mVec[1] -= v[1]; mVec[2] -= v[2]; mVec[3] -= v[3]; return *this; } __hostdev__ Vec4& operator*=(const T& s) { mVec[0] *= s; mVec[1] *= s; mVec[2] *= s; mVec[3] *= s; return *this; } __hostdev__ Vec4& operator/=(const T& s) { return (*this) *= T(1) / s; } __hostdev__ Vec4& normalize() { return (*this) /= this->length(); } /// @brief Perform a component-wise minimum with the other Coord. __hostdev__ Vec4& minComponent(const Vec4& other) { if (other[0] < mVec[0]) mVec[0] = other[0]; if (other[1] < mVec[1]) mVec[1] = other[1]; if (other[2] < mVec[2]) mVec[2] = other[2]; if (other[3] < mVec[3]) mVec[3] = other[3]; return *this; } /// @brief Perform a component-wise maximum with the other Coord. __hostdev__ Vec4& maxComponent(const Vec4& other) { if (other[0] > mVec[0]) mVec[0] = other[0]; if (other[1] > mVec[1]) mVec[1] = other[1]; if (other[2] > mVec[2]) mVec[2] = other[2]; if (other[3] > mVec[3]) mVec[3] = other[3]; return *this; } }; // Vec4<T> template<typename T1, typename T2> __hostdev__ inline Vec4<T2> operator*(T1 scalar, const Vec4<T2>& vec) { return Vec4<T2>(scalar * vec[0], scalar * vec[1], scalar * vec[2], scalar * vec[3]); } template<typename T1, typename T2> __hostdev__ inline Vec4<T2> operator/(T1 scalar, const Vec4<T2>& vec) { return Vec4<T2>(scalar / vec[0], scalar / vec[1], scalar / vec[2], scalar / vec[3]); } // ----------------------------> matMult <-------------------------------------- /// @brief Multiply a 3x3 matrix and a 3d vector using 32bit floating point arithmetics /// @note This corresponds to a linear mapping, e.g. scaling, rotation etc. /// @tparam Vec3T Template type of the input and output 3d vectors /// @param mat pointer to an array of floats with the 3x3 matrix /// @param xyz input vector to be multiplied by the matrix /// @return result of matrix-vector multiplication, i.e. mat x xyz template<typename Vec3T> __hostdev__ inline Vec3T matMult(const float* mat, const Vec3T& xyz) { return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[1], static_cast<float>(xyz[2]) * mat[2])), fmaf(static_cast<float>(xyz[0]), mat[3], fmaf(static_cast<float>(xyz[1]), mat[4], static_cast<float>(xyz[2]) * mat[5])), fmaf(static_cast<float>(xyz[0]), mat[6], fmaf(static_cast<float>(xyz[1]), mat[7], static_cast<float>(xyz[2]) * mat[8]))); // 6 fmaf + 3 mult = 9 flops } /// @brief Multiply a 3x3 matrix and a 3d vector using 64bit floating point arithmetics /// @note This corresponds to a linear mapping, e.g. scaling, rotation etc. /// @tparam Vec3T Template type of the input and output 3d vectors /// @param mat pointer to an array of floats with the 3x3 matrix /// @param xyz input vector to be multiplied by the matrix /// @return result of matrix-vector multiplication, i.e. mat x xyz template<typename Vec3T> __hostdev__ inline Vec3T matMult(const double* mat, const Vec3T& xyz) { return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[1], static_cast<double>(xyz[2]) * mat[2])), fma(static_cast<double>(xyz[0]), mat[3], fma(static_cast<double>(xyz[1]), mat[4], static_cast<double>(xyz[2]) * mat[5])), fma(static_cast<double>(xyz[0]), mat[6], fma(static_cast<double>(xyz[1]), mat[7], static_cast<double>(xyz[2]) * mat[8]))); // 6 fmaf + 3 mult = 9 flops } /// @brief Multiply a 3x3 matrix to a 3d vector and add another 3d vector using 32bit floating point arithmetics /// @note This corresponds to an affine transformation, i.e a linear mapping followed by a translation. e.g. scale/rotation and translation /// @tparam Vec3T Template type of the input and output 3d vectors /// @param mat pointer to an array of floats with the 3x3 matrix /// @param vec 3d vector to be added AFTER the matrix multiplication /// @param xyz input vector to be multiplied by the matrix and a translated by @c vec /// @return result of affine transformation, i.e. (mat x xyz) + vec template<typename Vec3T> __hostdev__ inline Vec3T matMult(const float* mat, const float* vec, const Vec3T& xyz) { return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[1], fmaf(static_cast<float>(xyz[2]), mat[2], vec[0]))), fmaf(static_cast<float>(xyz[0]), mat[3], fmaf(static_cast<float>(xyz[1]), mat[4], fmaf(static_cast<float>(xyz[2]), mat[5], vec[1]))), fmaf(static_cast<float>(xyz[0]), mat[6], fmaf(static_cast<float>(xyz[1]), mat[7], fmaf(static_cast<float>(xyz[2]), mat[8], vec[2])))); // 9 fmaf = 9 flops } /// @brief Multiply a 3x3 matrix to a 3d vector and add another 3d vector using 64bit floating point arithmetics /// @note This corresponds to an affine transformation, i.e a linear mapping followed by a translation. e.g. scale/rotation and translation /// @tparam Vec3T Template type of the input and output 3d vectors /// @param mat pointer to an array of floats with the 3x3 matrix /// @param vec 3d vector to be added AFTER the matrix multiplication /// @param xyz input vector to be multiplied by the matrix and a translated by @c vec /// @return result of affine transformation, i.e. (mat x xyz) + vec template<typename Vec3T> __hostdev__ inline Vec3T matMult(const double* mat, const double* vec, const Vec3T& xyz) { return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[1], fma(static_cast<double>(xyz[2]), mat[2], vec[0]))), fma(static_cast<double>(xyz[0]), mat[3], fma(static_cast<double>(xyz[1]), mat[4], fma(static_cast<double>(xyz[2]), mat[5], vec[1]))), fma(static_cast<double>(xyz[0]), mat[6], fma(static_cast<double>(xyz[1]), mat[7], fma(static_cast<double>(xyz[2]), mat[8], vec[2])))); // 9 fma = 9 flops } /// @brief Multiply the transposed of a 3x3 matrix and a 3d vector using 32bit floating point arithmetics /// @note This corresponds to an inverse linear mapping, e.g. inverse scaling, inverse rotation etc. /// @tparam Vec3T Template type of the input and output 3d vectors /// @param mat pointer to an array of floats with the 3x3 matrix /// @param xyz input vector to be multiplied by the transposed matrix /// @return result of matrix-vector multiplication, i.e. mat^T x xyz template<typename Vec3T> __hostdev__ inline Vec3T matMultT(const float* mat, const Vec3T& xyz) { return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[3], static_cast<float>(xyz[2]) * mat[6])), fmaf(static_cast<float>(xyz[0]), mat[1], fmaf(static_cast<float>(xyz[1]), mat[4], static_cast<float>(xyz[2]) * mat[7])), fmaf(static_cast<float>(xyz[0]), mat[2], fmaf(static_cast<float>(xyz[1]), mat[5], static_cast<float>(xyz[2]) * mat[8]))); // 6 fmaf + 3 mult = 9 flops } /// @brief Multiply the transposed of a 3x3 matrix and a 3d vector using 64bit floating point arithmetics /// @note This corresponds to an inverse linear mapping, e.g. inverse scaling, inverse rotation etc. /// @tparam Vec3T Template type of the input and output 3d vectors /// @param mat pointer to an array of floats with the 3x3 matrix /// @param xyz input vector to be multiplied by the transposed matrix /// @return result of matrix-vector multiplication, i.e. mat^T x xyz template<typename Vec3T> __hostdev__ inline Vec3T matMultT(const double* mat, const Vec3T& xyz) { return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[3], static_cast<double>(xyz[2]) * mat[6])), fma(static_cast<double>(xyz[0]), mat[1], fma(static_cast<double>(xyz[1]), mat[4], static_cast<double>(xyz[2]) * mat[7])), fma(static_cast<double>(xyz[0]), mat[2], fma(static_cast<double>(xyz[1]), mat[5], static_cast<double>(xyz[2]) * mat[8]))); // 6 fmaf + 3 mult = 9 flops } template<typename Vec3T> __hostdev__ inline Vec3T matMultT(const float* mat, const float* vec, const Vec3T& xyz) { return Vec3T(fmaf(static_cast<float>(xyz[0]), mat[0], fmaf(static_cast<float>(xyz[1]), mat[3], fmaf(static_cast<float>(xyz[2]), mat[6], vec[0]))), fmaf(static_cast<float>(xyz[0]), mat[1], fmaf(static_cast<float>(xyz[1]), mat[4], fmaf(static_cast<float>(xyz[2]), mat[7], vec[1]))), fmaf(static_cast<float>(xyz[0]), mat[2], fmaf(static_cast<float>(xyz[1]), mat[5], fmaf(static_cast<float>(xyz[2]), mat[8], vec[2])))); // 9 fmaf = 9 flops } template<typename Vec3T> __hostdev__ inline Vec3T matMultT(const double* mat, const double* vec, const Vec3T& xyz) { return Vec3T(fma(static_cast<double>(xyz[0]), mat[0], fma(static_cast<double>(xyz[1]), mat[3], fma(static_cast<double>(xyz[2]), mat[6], vec[0]))), fma(static_cast<double>(xyz[0]), mat[1], fma(static_cast<double>(xyz[1]), mat[4], fma(static_cast<double>(xyz[2]), mat[7], vec[1]))), fma(static_cast<double>(xyz[0]), mat[2], fma(static_cast<double>(xyz[1]), mat[5], fma(static_cast<double>(xyz[2]), mat[8], vec[2])))); // 9 fma = 9 flops } // ----------------------------> BBox <------------------------------------- // Base-class for static polymorphism (cannot be constructed directly) template<typename Vec3T> struct BaseBBox { Vec3T mCoord[2]; __hostdev__ bool operator==(const BaseBBox& rhs) const { return mCoord[0] == rhs.mCoord[0] && mCoord[1] == rhs.mCoord[1]; }; __hostdev__ bool operator!=(const BaseBBox& rhs) const { return mCoord[0] != rhs.mCoord[0] || mCoord[1] != rhs.mCoord[1]; }; __hostdev__ const Vec3T& operator[](int i) const { return mCoord[i]; } __hostdev__ Vec3T& operator[](int i) { return mCoord[i]; } __hostdev__ Vec3T& min() { return mCoord[0]; } __hostdev__ Vec3T& max() { return mCoord[1]; } __hostdev__ const Vec3T& min() const { return mCoord[0]; } __hostdev__ const Vec3T& max() const { return mCoord[1]; } __hostdev__ BaseBBox& translate(const Vec3T& xyz) { mCoord[0] += xyz; mCoord[1] += xyz; return *this; } /// @brief Expand this bounding box to enclose point @c xyz. __hostdev__ BaseBBox& expand(const Vec3T& xyz) { mCoord[0].minComponent(xyz); mCoord[1].maxComponent(xyz); return *this; } /// @brief Expand this bounding box to enclose the given bounding box. __hostdev__ BaseBBox& expand(const BaseBBox& bbox) { mCoord[0].minComponent(bbox[0]); mCoord[1].maxComponent(bbox[1]); return *this; } /// @brief Intersect this bounding box with the given bounding box. __hostdev__ BaseBBox& intersect(const BaseBBox& bbox) { mCoord[0].maxComponent(bbox[0]); mCoord[1].minComponent(bbox[1]); return *this; } //__hostdev__ BaseBBox expandBy(typename Vec3T::ValueType padding) const //{ // return BaseBBox(mCoord[0].offsetBy(-padding),mCoord[1].offsetBy(padding)); //} __hostdev__ bool isInside(const Vec3T& xyz) { if (xyz[0] < mCoord[0][0] || xyz[1] < mCoord[0][1] || xyz[2] < mCoord[0][2]) return false; if (xyz[0] > mCoord[1][0] || xyz[1] > mCoord[1][1] || xyz[2] > mCoord[1][2]) return false; return true; } protected: __hostdev__ BaseBBox() {} __hostdev__ BaseBBox(const Vec3T& min, const Vec3T& max) : mCoord{min, max} { } }; // BaseBBox template<typename Vec3T, bool = util::is_floating_point<typename Vec3T::ValueType>::value> struct BBox; /// @brief Partial template specialization for floating point coordinate types. /// /// @note Min is inclusive and max is exclusive. If min = max the dimension of /// the bounding box is zero and therefore it is also empty. template<typename Vec3T> struct BBox<Vec3T, true> : public BaseBBox<Vec3T> { using Vec3Type = Vec3T; using ValueType = typename Vec3T::ValueType; static_assert(util::is_floating_point<ValueType>::value, "Expected a floating point coordinate type"); using BaseT = BaseBBox<Vec3T>; using BaseT::mCoord; /// @brief Default construction sets BBox to an empty bbox __hostdev__ BBox() : BaseT(Vec3T( Maximum<typename Vec3T::ValueType>::value()), Vec3T(-Maximum<typename Vec3T::ValueType>::value())) { } __hostdev__ BBox(const Vec3T& min, const Vec3T& max) : BaseT(min, max) { } __hostdev__ BBox(const Coord& min, const Coord& max) : BaseT(Vec3T(ValueType(min[0]), ValueType(min[1]), ValueType(min[2])), Vec3T(ValueType(max[0] + 1), ValueType(max[1] + 1), ValueType(max[2] + 1))) { } __hostdev__ static BBox createCube(const Coord& min, typename Coord::ValueType dim) { return BBox(min, min.offsetBy(dim)); } __hostdev__ BBox(const BaseBBox<Coord>& bbox) : BBox(bbox[0], bbox[1]) { } __hostdev__ bool empty() const { return mCoord[0][0] >= mCoord[1][0] || mCoord[0][1] >= mCoord[1][1] || mCoord[0][2] >= mCoord[1][2]; } __hostdev__ operator bool() const { return mCoord[0][0] < mCoord[1][0] && mCoord[0][1] < mCoord[1][1] && mCoord[0][2] < mCoord[1][2]; } __hostdev__ Vec3T dim() const { return *this ? this->max() - this->min() : Vec3T(0); } __hostdev__ bool isInside(const Vec3T& p) const { return p[0] > mCoord[0][0] && p[1] > mCoord[0][1] && p[2] > mCoord[0][2] && p[0] < mCoord[1][0] && p[1] < mCoord[1][1] && p[2] < mCoord[1][2]; } }; // BBox<Vec3T, true> /// @brief Partial template specialization for integer coordinate types /// /// @note Both min and max are INCLUDED in the bbox so dim = max - min + 1. So, /// if min = max the bounding box contains exactly one point and dim = 1! template<typename CoordT> struct BBox<CoordT, false> : public BaseBBox<CoordT> { static_assert(util::is_same<int, typename CoordT::ValueType>::value, "Expected \"int\" coordinate type"); using BaseT = BaseBBox<CoordT>; using BaseT::mCoord; /// @brief Iterator over the domain covered by a BBox /// @details z is the fastest-moving coordinate. class Iterator { const BBox& mBBox; CoordT mPos; public: __hostdev__ Iterator(const BBox& b) : mBBox(b) , mPos(b.min()) { } __hostdev__ Iterator(const BBox& b, const Coord& p) : mBBox(b) , mPos(p) { } __hostdev__ Iterator& operator++() { if (mPos[2] < mBBox[1][2]) { // this is the most common case ++mPos[2];// increment z } else if (mPos[1] < mBBox[1][1]) { mPos[2] = mBBox[0][2];// reset z ++mPos[1];// increment y } else if (mPos[0] <= mBBox[1][0]) { mPos[2] = mBBox[0][2];// reset z mPos[1] = mBBox[0][1];// reset y ++mPos[0];// increment x } return *this; } __hostdev__ Iterator operator++(int) { auto tmp = *this; ++(*this); return tmp; } __hostdev__ bool operator==(const Iterator& rhs) const { NANOVDB_ASSERT(mBBox == rhs.mBBox); return mPos == rhs.mPos; } __hostdev__ bool operator!=(const Iterator& rhs) const { NANOVDB_ASSERT(mBBox == rhs.mBBox); return mPos != rhs.mPos; } __hostdev__ bool operator<(const Iterator& rhs) const { NANOVDB_ASSERT(mBBox == rhs.mBBox); return mPos < rhs.mPos; } __hostdev__ bool operator<=(const Iterator& rhs) const { NANOVDB_ASSERT(mBBox == rhs.mBBox); return mPos <= rhs.mPos; } /// @brief Return @c true if the iterator still points to a valid coordinate. __hostdev__ operator bool() const { return mPos <= mBBox[1]; } __hostdev__ const CoordT& operator*() const { return mPos; } }; // Iterator __hostdev__ Iterator begin() const { return Iterator{*this}; } __hostdev__ Iterator end() const { return Iterator{*this, CoordT(mCoord[1][0]+1, mCoord[0][1], mCoord[0][2])}; } __hostdev__ BBox() : BaseT(CoordT::max(), CoordT::min()) { } __hostdev__ BBox(const CoordT& min, const CoordT& max) : BaseT(min, max) { } template<typename SplitT> __hostdev__ BBox(BBox& other, const SplitT&) : BaseT(other.mCoord[0], other.mCoord[1]) { NANOVDB_ASSERT(this->is_divisible()); const int n = MaxIndex(this->dim()); mCoord[1][n] = (mCoord[0][n] + mCoord[1][n]) >> 1; other.mCoord[0][n] = mCoord[1][n] + 1; } __hostdev__ static BBox createCube(const CoordT& min, typename CoordT::ValueType dim) { return BBox(min, min.offsetBy(dim - 1)); } __hostdev__ static BBox createCube(typename CoordT::ValueType min, typename CoordT::ValueType max) { return BBox(CoordT(min), CoordT(max)); } __hostdev__ bool is_divisible() const { return mCoord[0][0] < mCoord[1][0] && mCoord[0][1] < mCoord[1][1] && mCoord[0][2] < mCoord[1][2]; } /// @brief Return true if this bounding box is empty, e.g. uninitialized __hostdev__ bool empty() const { return mCoord[0][0] > mCoord[1][0] || mCoord[0][1] > mCoord[1][1] || mCoord[0][2] > mCoord[1][2]; } /// @brief Convert this BBox to boolean true if it is not empty __hostdev__ operator bool() const { return mCoord[0][0] <= mCoord[1][0] && mCoord[0][1] <= mCoord[1][1] && mCoord[0][2] <= mCoord[1][2]; } __hostdev__ CoordT dim() const { return *this ? this->max() - this->min() + Coord(1) : Coord(0); } __hostdev__ uint64_t volume() const { auto d = this->dim(); return uint64_t(d[0]) * uint64_t(d[1]) * uint64_t(d[2]); } __hostdev__ bool isInside(const CoordT& p) const { return !(CoordT::lessThan(p, this->min()) || CoordT::lessThan(this->max(), p)); } /// @brief Return @c true if the given bounding box is inside this bounding box. __hostdev__ bool isInside(const BBox& b) const { return !(CoordT::lessThan(b.min(), this->min()) || CoordT::lessThan(this->max(), b.max())); } /// @brief Return @c true if the given bounding box overlaps with this bounding box. __hostdev__ bool hasOverlap(const BBox& b) const { return !(CoordT::lessThan(this->max(), b.min()) || CoordT::lessThan(b.max(), this->min())); } /// @warning This converts a CoordBBox into a floating-point bounding box which implies that max += 1 ! template<typename RealT = double> __hostdev__ BBox<Vec3<RealT>> asReal() const { static_assert(util::is_floating_point<RealT>::value, "CoordBBox::asReal: Expected a floating point coordinate"); return BBox<Vec3<RealT>>(Vec3<RealT>(RealT(mCoord[0][0]), RealT(mCoord[0][1]), RealT(mCoord[0][2])), Vec3<RealT>(RealT(mCoord[1][0] + 1), RealT(mCoord[1][1] + 1), RealT(mCoord[1][2] + 1))); } /// @brief Return a new instance that is expanded by the specified padding. __hostdev__ BBox expandBy(typename CoordT::ValueType padding) const { return BBox(mCoord[0].offsetBy(-padding), mCoord[1].offsetBy(padding)); } /// @brief @brief transform this coordinate bounding box by the specified map /// @param map mapping of index to world coordinates /// @return world bounding box template<typename Map> __hostdev__ auto transform(const Map& map) const { using Vec3T = Vec3<double>; const Vec3T tmp = map.applyMap(Vec3T(mCoord[0][0], mCoord[0][1], mCoord[0][2])); BBox<Vec3T> bbox(tmp, tmp);// return value bbox.expand(map.applyMap(Vec3T(mCoord[0][0], mCoord[0][1], mCoord[1][2]))); bbox.expand(map.applyMap(Vec3T(mCoord[0][0], mCoord[1][1], mCoord[0][2]))); bbox.expand(map.applyMap(Vec3T(mCoord[1][0], mCoord[0][1], mCoord[0][2]))); bbox.expand(map.applyMap(Vec3T(mCoord[1][0], mCoord[1][1], mCoord[0][2]))); bbox.expand(map.applyMap(Vec3T(mCoord[1][0], mCoord[0][1], mCoord[1][2]))); bbox.expand(map.applyMap(Vec3T(mCoord[0][0], mCoord[1][1], mCoord[1][2]))); bbox.expand(map.applyMap(Vec3T(mCoord[1][0], mCoord[1][1], mCoord[1][2]))); return bbox; } #if defined(__CUDACC__) // the following functions only run on the GPU! __device__ inline BBox& expandAtomic(const CoordT& ijk) { mCoord[0].minComponentAtomic(ijk); mCoord[1].maxComponentAtomic(ijk); return *this; } __device__ inline BBox& expandAtomic(const BBox& bbox) { mCoord[0].minComponentAtomic(bbox[0]); mCoord[1].maxComponentAtomic(bbox[1]); return *this; } __device__ inline BBox& intersectAtomic(const BBox& bbox) { mCoord[0].maxComponentAtomic(bbox[0]); mCoord[1].minComponentAtomic(bbox[1]); return *this; } #endif }; // BBox<CoordT, false> // --------------------------> Rgba8 <------------------------------------ /// @brief 8-bit red, green, blue, alpha packed into 32 bit unsigned int class Rgba8 { union { uint8_t c[4]; // 4 integer color channels of red, green, blue and alpha components. uint32_t packed; // 32 bit packed representation } mData; public: static const int SIZE = 4; using ValueType = uint8_t; /// @brief Default copy constructor Rgba8(const Rgba8&) = default; /// @brief Default move constructor Rgba8(Rgba8&&) = default; /// @brief Default move assignment operator /// @return non-const reference to this instance Rgba8& operator=(Rgba8&&) = default; /// @brief Default copy assignment operator /// @return non-const reference to this instance Rgba8& operator=(const Rgba8&) = default; /// @brief Default ctor initializes all channels to zero __hostdev__ Rgba8() : mData{{0, 0, 0, 0}} { static_assert(sizeof(uint32_t) == sizeof(Rgba8), "Unexpected sizeof"); } /// @brief integer r,g,b,a ctor where alpha channel defaults to opaque /// @note all values should be in the range 0u to 255u __hostdev__ Rgba8(uint8_t r, uint8_t g, uint8_t b, uint8_t a = 255u) : mData{{r, g, b, a}} { } /// @brief @brief ctor where all channels are initialized to the same value /// @note value should be in the range 0u to 255u explicit __hostdev__ Rgba8(uint8_t v) : mData{{v, v, v, v}} { } /// @brief floating-point r,g,b,a ctor where alpha channel defaults to opaque /// @note all values should be in the range 0.0f to 1.0f __hostdev__ Rgba8(float r, float g, float b, float a = 1.0f) : mData{{static_cast<uint8_t>(0.5f + r * 255.0f), // round floats to nearest integers static_cast<uint8_t>(0.5f + g * 255.0f), // double {{}} is needed due to union static_cast<uint8_t>(0.5f + b * 255.0f), static_cast<uint8_t>(0.5f + a * 255.0f)}} { } /// @brief Vec3f r,g,b ctor (alpha channel it set to 1) /// @note all values should be in the range 0.0f to 1.0f __hostdev__ Rgba8(const Vec3<float>& rgb) : Rgba8(rgb[0], rgb[1], rgb[2]) { } /// @brief Vec4f r,g,b,a ctor /// @note all values should be in the range 0.0f to 1.0f __hostdev__ Rgba8(const Vec4<float>& rgba) : Rgba8(rgba[0], rgba[1], rgba[2], rgba[3]) { } __hostdev__ bool operator< (const Rgba8& rhs) const { return mData.packed < rhs.mData.packed; } __hostdev__ bool operator==(const Rgba8& rhs) const { return mData.packed == rhs.mData.packed; } __hostdev__ float lengthSqr() const { return 0.0000153787005f * (float(mData.c[0]) * mData.c[0] + float(mData.c[1]) * mData.c[1] + float(mData.c[2]) * mData.c[2]); //1/255^2 } __hostdev__ float length() const { return sqrtf(this->lengthSqr()); } /// @brief return n'th color channel as a float in the range 0 to 1 __hostdev__ float asFloat(int n) const { return 0.003921569f*float(mData.c[n]); }// divide by 255 __hostdev__ const uint8_t& operator[](int n) const { return mData.c[n]; } __hostdev__ uint8_t& operator[](int n) { return mData.c[n]; } __hostdev__ const uint32_t& packed() const { return mData.packed; } __hostdev__ uint32_t& packed() { return mData.packed; } __hostdev__ const uint8_t& r() const { return mData.c[0]; } __hostdev__ const uint8_t& g() const { return mData.c[1]; } __hostdev__ const uint8_t& b() const { return mData.c[2]; } __hostdev__ const uint8_t& a() const { return mData.c[3]; } __hostdev__ uint8_t& r() { return mData.c[0]; } __hostdev__ uint8_t& g() { return mData.c[1]; } __hostdev__ uint8_t& b() { return mData.c[2]; } __hostdev__ uint8_t& a() { return mData.c[3]; } __hostdev__ operator Vec3<float>() const { return Vec3<float>(this->asFloat(0), this->asFloat(1), this->asFloat(2)); } __hostdev__ operator Vec4<float>() const { return Vec4<float>(this->asFloat(0), this->asFloat(1), this->asFloat(2), this->asFloat(3)); } }; // Rgba8 using Vec3d = Vec3<double>; using Vec3f = Vec3<float>; using Vec3i = Vec3<int32_t>; using Vec3u = Vec3<uint32_t>; using Vec3u8 = Vec3<uint8_t>; using Vec3u16 = Vec3<uint16_t>; using Vec4R = Vec4<double>; using Vec4d = Vec4<double>; using Vec4f = Vec4<float>; using Vec4i = Vec4<int>; }// namespace math =============================================================== using Rgba8 [[deprecated("Use math::Rgba8 instead.")]] = math::Rgba8; using math::Coord; using Vec3d = math::Vec3<double>; using Vec3f = math::Vec3<float>; using Vec3i = math::Vec3<int32_t>; using Vec3u = math::Vec3<uint32_t>; using Vec3u8 = math::Vec3<uint8_t>; using Vec3u16 = math::Vec3<uint16_t>; using Vec4R = math::Vec4<double>; using Vec4d = math::Vec4<double>; using Vec4f = math::Vec4<float>; using Vec4i = math::Vec4<int>; using CoordBBox = math::BBox<Coord>; using Vec3dBBox = math::BBox<Vec3d>; using BBoxR [[deprecated("Use Vec3dBBox instead.")]] = math::BBox<Vec3d>; } // namespace nanovdb =================================================================== #endif // end of NANOVDB_MATH_MATH_H_HAS_BEEN_INCLUDED
54,481
C
36.599724
171
0.570254
NVIDIA/warp/warp/native/nanovdb/util/Util.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! \file nanovdb/util/Util.h \author Ken Museth \date January 8, 2020 \brief Utility functions */ #ifndef NANOVDB_UTIL_UTIL_H_HAS_BEEN_INCLUDED #define NANOVDB_UTIL_UTIL_H_HAS_BEEN_INCLUDED #ifdef __CUDACC_RTC__ typedef signed char int8_t; typedef short int16_t; typedef int int32_t; typedef long long int64_t; typedef unsigned char uint8_t; typedef unsigned int uint32_t; typedef unsigned short uint16_t; typedef unsigned long long uint64_t; #define NANOVDB_ASSERT(x) #ifndef UINT64_C #define UINT64_C(x) (x ## ULL) #endif #else // !__CUDACC_RTC__ #include <stdlib.h> // for abs in clang7 #include <stdint.h> // for types like int32_t etc #include <stddef.h> // for size_t type #include <cassert> // for assert #include <cstdio> // for stderr and snprintf #include <cmath> // for sqrt and fma #include <limits> // for numeric_limits #include <utility>// for std::move #ifdef NANOVDB_USE_IOSTREAMS #include <fstream>// for read/writeUncompressedGrids #endif// ifdef NANOVDB_USE_IOSTREAMS // All asserts can be disabled here, even for debug builds #if 1 #define NANOVDB_ASSERT(x) assert(x) #else #define NANOVDB_ASSERT(x) #endif #if defined(NANOVDB_USE_INTRINSICS) && defined(_MSC_VER) #include <intrin.h> #pragma intrinsic(_BitScanReverse) #pragma intrinsic(_BitScanForward) #pragma intrinsic(_BitScanReverse64) #pragma intrinsic(_BitScanForward64) #endif #endif // __CUDACC_RTC__ #if defined(__CUDACC__) || defined(__HIP__) // Only define __hostdev__ qualifier when using NVIDIA CUDA or HIP compilers #ifndef __hostdev__ #define __hostdev__ __host__ __device__ // Runs on the CPU and GPU, called from the CPU or the GPU #endif #else // Dummy definitions of macros only defined by CUDA and HIP compilers #ifndef __hostdev__ #define __hostdev__ // Runs on the CPU and GPU, called from the CPU or the GPU #endif #ifndef __global__ #define __global__ // Runs on the GPU, called from the CPU or the GPU #endif #ifndef __device__ #define __device__ // Runs on the GPU, called from the GPU #endif #ifndef __host__ #define __host__ // Runs on the CPU, called from the CPU #endif #endif // if defined(__CUDACC__) || defined(__HIP__) // The following macro will suppress annoying warnings when nvcc // compiles functions that call (host) intrinsics (which is perfectly valid) #if defined(_MSC_VER) && defined(__CUDACC__) #define NANOVDB_HOSTDEV_DISABLE_WARNING __pragma("hd_warning_disable") #elif defined(__GNUC__) && defined(__CUDACC__) #define NANOVDB_HOSTDEV_DISABLE_WARNING _Pragma("hd_warning_disable") #else #define NANOVDB_HOSTDEV_DISABLE_WARNING #endif // Define compiler warnings that work with all compilers //#if defined(_MSC_VER) //#define NANO_WARNING(msg) _pragma("message" #msg) //#else //#define NANO_WARNING(msg) _Pragma("message" #msg) //#endif //============================================== /// @brief Defines macros that issues warnings for deprecated header files /// @details Example: /// @code /// #include <nanovdb/util/Util.h> // for NANOVDB_DEPRECATED_HEADER /// #include <nanovdb/path/Alternative.h> /// NANOVDB_DEPRECATED_HEADER("This header file is deprecated, please use <nanovdb/path/Alternative.h> instead") /// @endcode #ifdef __GNUC__ #define NANOVDB_PRAGMA(X) _Pragma(#X) #define NANOVDB_DEPRECATED_HEADER(MSG) NANOVDB_PRAGMA(GCC warning MSG) #elif defined(_MSC_VER) #define NANOVDB_STRINGIZE_(MSG) #MSG #define NANOVDB_STRINGIZE(MSG) NANOVDB_STRINGIZE_(MSG) #define NANOVDB_DEPRECATED_HEADER(MSG) \ __pragma(message(__FILE__ "(" NANOVDB_STRINGIZE(__LINE__) ") : Warning: " MSG)) #endif // A portable implementation of offsetof - unfortunately it doesn't work with static_assert #define NANOVDB_OFFSETOF(CLASS, MEMBER) ((int)(size_t)((char*)&((CLASS*)0)->MEMBER - (char*)0)) namespace nanovdb {// ================================================================= namespace util {// ==================================================================== /// @brief Minimal implementation of std::declval, which converts any type @c T to //// a reference type, making it possible to use member functions in the operand /// of the decltype specifier without the need to go through constructors. /// @tparam T Template type to be converted to T&& /// @return T&& /// @warning Unlike std::declval, this version does not work when T = void! However, /// NVRTC does not like std::declval, so we provide our own implementation. template<typename T> T&& declval() noexcept; // --------------------------> string utility functions <------------------------------------ /// @brief tests if a c-string @c str is empty, that is its first value is '\0' /// @param str c-string to be tested for null termination /// @return true if str[0] = '\0' __hostdev__ inline bool empty(const char* str) { NANOVDB_ASSERT(str != nullptr); return *str == '\0'; }// util::empty /// @brief length of a c-sting, excluding '\0'. /// @param str c-string /// @return the number of characters that precede the terminating null character. __hostdev__ inline size_t strlen(const char *str) { NANOVDB_ASSERT(str != nullptr); const char *s = str; while(*s) ++s; ; return (s - str); }// util::strlen /// @brief Copy characters from @c src to @c dst. /// @param dst pointer to the destination string. /// @param src pointer to the null-terminated source string. /// @return destination string @c dst. /// @note Emulates the behaviour of std::strcpy, except this version also runs on the GPU. __hostdev__ inline char* strcpy(char *dst, const char *src) { NANOVDB_ASSERT(dst != nullptr && src != nullptr); for (char *p = dst; (*p++ = *src) != '\0'; ++src); return dst; }// util::strcpy(char*, const char*) /// @brief Copies the first num characters of @c src to @c dst. /// If the end of the source C string (which is signaled by a /// null-character) is found before @c max characters have been /// copied, @c dst is padded with zeros until a total of @c max /// characters have been written to it. /// @param dst destination string /// @param src source string /// @param max maximum number of character in destination string /// @return destination string @c dst /// @warning if strncpy(dst, src, max)[max-1]!='\0' then @c src has more /// characters than @c max and the return string needs to be /// manually null-terminated, i.e. strncpy(dst, src, max)[max-1]='\0' __hostdev__ inline char* strncpy(char *dst, const char *src, size_t max) { NANOVDB_ASSERT(dst != nullptr && src != nullptr); size_t i = 0; for (; i < max && src[i] != '\0'; ++i) dst[i] = src[i]; for (; i < max; ++i) dst[i] = '\0'; return dst; }// util::strncpy(char *dst, const char *src, size_t max) /// @brief converts a number to a string using a specific base /// @param dst destination string /// @param num signed number to be concatenated after @c dst /// @param bas base used when converting @c num to a string /// @return destination string @c dst /// @note Emulates the behaviour of itoa, except this verion also works on the GPU. __hostdev__ inline char* strcpy(char* dst, int num, int bas = 10) { NANOVDB_ASSERT(dst != nullptr && bas > 0); int len = 0;// length of number once converted to a string if (num == 0) dst[len++] = '0'; for (int abs = num < 0 && bas == 10 ? -num : num; abs; abs /= bas) { const int rem = abs % bas; dst[len++] = rem > 9 ? rem - 10 + 'a' : rem + '0'; } if (num < 0) dst[len++] = '-';// append '-' if negative for (char *a = dst, *b = a + len - 1; a < b; ++a, --b) {// reverse dst dst[len] = *a;// use end of string as temp *a = *b; *b = dst[len]; } dst[len] = '\0';// explicitly terminate end of string return dst; }// util::strcpy(char*, int, int) /// @brief Appends a copy of the character string pointed to by @c src to /// the end of the character string pointed to by @c dst on the device. /// @param dst pointer to the null-terminated byte string to append to. /// @param src pointer to the null-terminated byte string to copy from. /// @return pointer to the character array being appended to. /// @note Emulates the behaviour of std::strcat, except this version also runs on the GPU. __hostdev__ inline char* strcat(char *dst, const char *src) { NANOVDB_ASSERT(dst != nullptr && src != nullptr); char *p = dst; while (*p != '\0') ++p;// advance till end of dst strcpy(p, src);// append src return dst; }// util::strcat(char*, const char*) /// @brief concatenates a number after a string using a specific base /// @param dst null terminated destination string /// @param num signed number to be concatenated after @c dst /// @param bas base used when converting @c num to a string /// @return destination string @c dst __hostdev__ inline char* strcat(char* dst, int num, int bas = 10) { NANOVDB_ASSERT(dst != nullptr); char *p = dst; while (*p != '\0') ++p; strcpy(p, num, bas); return dst; }// util::strcat(char*, int, int) /// @brief Compares two null-terminated byte strings lexicographically. /// @param lhs pointer to the null-terminated byte strings to compare /// @param rhs pointer to the null-terminated byte strings to compare /// @return Negative value if @c lhs appears before @c rhs in lexicographical order. /// Zero if @c lhs and @c rhs compare equal. Positive value if @c lhs appears /// after @c rhs in lexicographical order. /// @note Emulates the behaviour of std::strcmp, except this version also runs on the GPU. __hostdev__ inline int strcmp(const char *lhs, const char *rhs) { while(*lhs != '\0' && (*lhs == *rhs)){ lhs++; rhs++; } return *(const unsigned char*)lhs - *(const unsigned char*)rhs;// zero if lhs == rhs }// util::strcmp(const char*, const char*) /// @brief Test if two null-terminated byte strings are the same /// @param lhs pointer to the null-terminated byte strings to compare /// @param rhs pointer to the null-terminated byte strings to compare /// @return true if the two c-strings are identical __hostdev__ inline bool streq(const char *lhs, const char *rhs) { return strcmp(lhs, rhs) == 0; }// util::streq namespace impl {// ======================================================= // Base-case implementation of Variadic Template function impl::sprint __hostdev__ inline char* sprint(char *dst){return dst;} // Variadic Template function impl::sprint template <typename T, typename... Types> __hostdev__ inline char* sprint(char *dst, T var1, Types... var2) { return impl::sprint(strcat(dst, var1), var2...); } }// namespace impl ========================================================= /// @brief prints a variable number of string and/or numbers to a destination string template <typename T, typename... Types> __hostdev__ inline char* sprint(char *dst, T var1, Types... var2) { return impl::sprint(strcpy(dst, var1), var2...); }// util::sprint // --------------------------> memzero <------------------------------------ /// @brief Zero initialization of memory /// @param dst pointer to destination /// @param byteCount number of bytes to be initialized to zero /// @return destination pointer @c dst __hostdev__ inline static void* memzero(void *dst, size_t byteCount) { NANOVDB_ASSERT(dst); const size_t wordCount = byteCount >> 3; if (wordCount << 3 == byteCount) { for (auto *d = (uint64_t*)dst, *e = d + wordCount; d != e; ++d) *d = 0ULL; } else { for (auto *d = (char*)dst, *e = d + byteCount; d != e; ++d) *d = '\0'; } return dst; }// util::memzero // --------------------------> util::is_same <------------------------------------ /// @brief C++11 implementation of std::is_same /// @note When more than two arguments are provided value = T0==T1 || T0==T2 || ... template<typename T0, typename T1, typename ...T> struct is_same { static constexpr bool value = is_same<T0, T1>::value || is_same<T0, T...>::value; }; template<typename T0, typename T1> struct is_same<T0, T1> {static constexpr bool value = false;}; template<typename T> struct is_same<T, T> {static constexpr bool value = true;}; // --------------------------> util::is_floating_point <------------------------------------ /// @brief C++11 implementation of std::is_floating_point template<typename T> struct is_floating_point {static constexpr bool value = is_same<T, float, double>::value;}; // --------------------------> util::enable_if <------------------------------------ /// @brief C++11 implementation of std::enable_if template <bool, typename T = void> struct enable_if {}; template <typename T> struct enable_if<true, T> {using type = T;}; // --------------------------> util::disable_if <------------------------------------ template<bool, typename T = void> struct disable_if {using type = T;}; template<typename T> struct disable_if<true, T> {}; // --------------------------> util::is_const <------------------------------------ template<typename T> struct is_const {static constexpr bool value = false;}; template<typename T> struct is_const<const T> {static constexpr bool value = true;}; // --------------------------> util::is_pointer <------------------------------------ /// @brief Trait used to identify template parameter that are pointers /// @tparam T Template parameter to be tested template<class T> struct is_pointer {static constexpr bool value = false;}; /// @brief Template specialization of pointers /// @tparam T Template parameter to be tested /// @note T can be both a non-const and const type template<class T> struct is_pointer<T*> {static constexpr bool value = true;}; // --------------------------> util::conditional <------------------------------------ /// @brief C++11 implementation of std::conditional template<bool, class TrueT, class FalseT> struct conditional { using type = TrueT; }; /// @brief Template specialization of conditional /// @tparam FalseT Type used when boolean is false /// @tparam TrueT Type used when boolean is true template<class TrueT, class FalseT> struct conditional<false, TrueT, FalseT> { using type = FalseT; }; // --------------------------> util::remove_const <------------------------------------ /// @brief Trait use to const from type. Default implementation is just a pass-through /// @tparam T Type /// @details remove_pointer<float>::type = float template<typename T> struct remove_const {using type = T;}; /// @brief Template specialization of trait class use to remove const qualifier type from a type /// @tparam T Type of the const type /// @details remove_pointer<const float>::type = float template<typename T> struct remove_const<const T> {using type = T;}; // --------------------------> util::remove_reference <------------------------------------ /// @brief Trait use to remove reference, i.e. "&", qualifier from a type. Default implementation is just a pass-through /// @tparam T Type /// @details remove_pointer<float>::type = float template <typename T> struct remove_reference {using type = T;}; /// @brief Template specialization of trait class use to remove reference, i.e. "&", qualifier from a type /// @tparam T Type of the reference /// @details remove_pointer<float&>::type = float template <typename T> struct remove_reference<T&> {using type = T;}; // --------------------------> util::remove_pointer <------------------------------------ /// @brief Trait use to remove pointer, i.e. "*", qualifier from a type. Default implementation is just a pass-through /// @tparam T Type /// @details remove_pointer<float>::type = float template <typename T> struct remove_pointer {using type = T;}; /// @brief Template specialization of trait class use to to remove pointer, i.e. "*", qualifier from a type /// @tparam T Type of the pointer /// @details remove_pointer<float*>::type = float template <typename T> struct remove_pointer<T*> {using type = T;}; // --------------------------> util::match_const <------------------------------------ /// @brief Trait used to transfer the const-ness of a reference type to another type /// @tparam T Type whose const-ness needs to match the reference type /// @tparam ReferenceT Reference type that is not const /// @details match_const<const int, float>::type = int /// match_const<int, float>::type = int template<typename T, typename ReferenceT> struct match_const {using type = typename remove_const<T>::type;}; /// @brief Template specialization used to transfer the const-ness of a reference type to another type /// @tparam T Type that will adopt the const-ness of the reference type /// @tparam ReferenceT Reference type that is const /// @details match_const<const int, const float>::type = const int /// match_const<int, const float>::type = const int template<typename T, typename ReferenceT> struct match_const<T, const ReferenceT> {using type = const typename remove_const<T>::type;}; // --------------------------> util::is_specialization <------------------------------------ /// @brief Metafunction used to determine if the first template /// parameter is a specialization of the class template /// given in the second template parameter. /// /// @details is_specialization<Vec3<float>, Vec3>::value == true; /// is_specialization<Vec3f, Vec3>::value == true; /// is_specialization<std::vector<float>, std::vector>::value == true; template<typename AnyType, template<typename...> class TemplateType> struct is_specialization {static const bool value = false;}; template<typename... Args, template<typename...> class TemplateType> struct is_specialization<TemplateType<Args...>, TemplateType> { static const bool value = true; };// util::is_specialization // --------------------------> util::PtrDiff <------------------------------------ /// @brief Compute the distance, in bytes, between two pointers, dist = p - q /// @param p fist pointer, assumed to NOT be NULL /// @param q second pointer, assumed to NOT be NULL /// @return signed distance between pointer, p - q, addresses in units of bytes __hostdev__ inline static int64_t PtrDiff(const void* p, const void* q) { NANOVDB_ASSERT(p && q); return reinterpret_cast<const char*>(p) - reinterpret_cast<const char*>(q); }// util::PtrDiff // --------------------------> util::PtrAdd <------------------------------------ /// @brief Adds a byte offset to a non-const pointer to produce another non-const pointer /// @tparam DstT Type of the return pointer (defaults to void) /// @param p non-const input pointer, assumed to NOT be NULL /// @param offset signed byte offset /// @return a non-const pointer defined as the offset of an input pointer template<typename DstT = void> __hostdev__ inline static DstT* PtrAdd(void* p, int64_t offset) { NANOVDB_ASSERT(p); return reinterpret_cast<DstT*>(reinterpret_cast<char*>(p) + offset); }// util::PtrAdd /// @brief Adds a byte offset to a const pointer to produce another const pointer /// @tparam DstT Type of the return pointer (defaults to void) /// @param p const input pointer, assumed to NOT be NULL /// @param offset signed byte offset /// @return a const pointer defined as the offset of a const input pointer template<typename DstT = void> __hostdev__ inline static const DstT* PtrAdd(const void* p, int64_t offset) { NANOVDB_ASSERT(p); return reinterpret_cast<const DstT*>(reinterpret_cast<const char*>(p) + offset); }// util::PtrAdd // -------------------> findLowestOn <---------------------------- /// @brief Returns the index of the lowest, i.e. least significant, on bit in the specified 32 bit word /// /// @warning Assumes that at least one bit is set in the word, i.e. @a v != uint32_t(0)! NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ inline uint32_t findLowestOn(uint32_t v) { NANOVDB_ASSERT(v); #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS) return __ffs(v) - 1; // one based indexing #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS) unsigned long index; _BitScanForward(&index, v); return static_cast<uint32_t>(index); #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS) return static_cast<uint32_t>(__builtin_ctzl(v)); #else //NANO_WARNING("Using software implementation for findLowestOn(uint32_t v)") static const unsigned char DeBruijn[32] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; // disable unary minus on unsigned warning #if defined(_MSC_VER) && !defined(__NVCC__) #pragma warning(push) #pragma warning(disable : 4146) #endif return DeBruijn[uint32_t((v & -v) * 0x077CB531U) >> 27]; #if defined(_MSC_VER) && !defined(__NVCC__) #pragma warning(pop) #endif #endif }// util::findLowestOn(uint32_t) /// @brief Returns the index of the lowest, i.e. least significant, on bit in the specified 64 bit word /// /// @warning Assumes that at least one bit is set in the word, i.e. @a v != uint32_t(0)! NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ inline uint32_t findLowestOn(uint64_t v) { NANOVDB_ASSERT(v); #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS) return __ffsll(static_cast<unsigned long long int>(v)) - 1; // one based indexing #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS) unsigned long index; _BitScanForward64(&index, v); return static_cast<uint32_t>(index); #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS) return static_cast<uint32_t>(__builtin_ctzll(v)); #else //NANO_WARNING("Using software implementation for util::findLowestOn(uint64_t)") static const unsigned char DeBruijn[64] = { 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12, }; // disable unary minus on unsigned warning #if defined(_MSC_VER) && !defined(__NVCC__) #pragma warning(push) #pragma warning(disable : 4146) #endif return DeBruijn[uint64_t((v & -v) * UINT64_C(0x022FDD63CC95386D)) >> 58]; #if defined(_MSC_VER) && !defined(__NVCC__) #pragma warning(pop) #endif #endif }// util::findLowestOn(uint64_t) // -------------------> findHighestOn <---------------------------- /// @brief Returns the index of the highest, i.e. most significant, on bit in the specified 32 bit word /// /// @warning Assumes that at least one bit is set in the word, i.e. @a v != uint32_t(0)! NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ inline uint32_t findHighestOn(uint32_t v) { NANOVDB_ASSERT(v); #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS) return sizeof(uint32_t) * 8 - 1 - __clz(v); // Return the number of consecutive high-order zero bits in a 32-bit integer. #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS) unsigned long index; _BitScanReverse(&index, v); return static_cast<uint32_t>(index); #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS) return sizeof(unsigned long) * 8 - 1 - __builtin_clzl(v); #else //NANO_WARNING("Using software implementation for util::findHighestOn(uint32_t)") static const unsigned char DeBruijn[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31}; v |= v >> 1; // first round down to one less than a power of 2 v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; return DeBruijn[uint32_t(v * 0x07C4ACDDU) >> 27]; #endif }// util::findHighestOn /// @brief Returns the index of the highest, i.e. most significant, on bit in the specified 64 bit word /// /// @warning Assumes that at least one bit is set in the word, i.e. @a v != uint32_t(0)! NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ inline uint32_t findHighestOn(uint64_t v) { NANOVDB_ASSERT(v); #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS) return sizeof(unsigned long) * 8 - 1 - __clzll(static_cast<unsigned long long int>(v)); #elif defined(_MSC_VER) && defined(NANOVDB_USE_INTRINSICS) unsigned long index; _BitScanReverse64(&index, v); return static_cast<uint32_t>(index); #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS) return sizeof(unsigned long) * 8 - 1 - __builtin_clzll(v); #else const uint32_t* p = reinterpret_cast<const uint32_t*>(&v); return p[1] ? 32u + findHighestOn(p[1]) : findHighestOn(p[0]); #endif }// util::findHighestOn // ----------------------------> util::countOn <-------------------------------------- /// @return Number of bits that are on in the specified 64-bit word NANOVDB_HOSTDEV_DISABLE_WARNING __hostdev__ inline uint32_t countOn(uint64_t v) { #if (defined(__CUDA_ARCH__) || defined(__HIP__)) && defined(NANOVDB_USE_INTRINSICS) //#warning Using popcll for util::countOn return __popcll(v); // __popcnt64 intrinsic support was added in VS 2019 16.8 #elif defined(_MSC_VER) && defined(_M_X64) && (_MSC_VER >= 1928) && defined(NANOVDB_USE_INTRINSICS) //#warning Using popcnt64 for util::countOn return uint32_t(__popcnt64(v)); #elif (defined(__GNUC__) || defined(__clang__)) && defined(NANOVDB_USE_INTRINSICS) //#warning Using builtin_popcountll for util::countOn return __builtin_popcountll(v); #else // use software implementation //NANO_WARNING("Using software implementation for util::countOn") v = v - ((v >> 1) & uint64_t(0x5555555555555555)); v = (v & uint64_t(0x3333333333333333)) + ((v >> 2) & uint64_t(0x3333333333333333)); return (((v + (v >> 4)) & uint64_t(0xF0F0F0F0F0F0F0F)) * uint64_t(0x101010101010101)) >> 56; #endif }// util::countOn(uint64_t) }// namespace util ================================================================== [[deprecated("Use nanovdb::util::findLowestOn instead")]] __hostdev__ inline uint32_t FindLowestOn(uint32_t v){return util::findLowestOn(v);} [[deprecated("Use nanovdb::util::findLowestOn instead")]] __hostdev__ inline uint32_t FindLowestOn(uint64_t v){return util::findLowestOn(v);} [[deprecated("Use nanovdb::util::findHighestOn instead")]] __hostdev__ inline uint32_t FindHighestOn(uint32_t v){return util::findHighestOn(v);} [[deprecated("Use nanovdb::util::findHighestOn instead")]] __hostdev__ inline uint32_t FindHighestOn(uint64_t v){return util::findHighestOn(v);} [[deprecated("Use nanovdb::util::countOn instead")]] __hostdev__ inline uint32_t CountOn(uint64_t v){return util::countOn(v);} } // namespace nanovdb =================================================================== #endif // end of NANOVDB_UTIL_UTIL_H_HAS_BEEN_INCLUDED
27,051
C
40.112462
126
0.634838
NVIDIA/warp/warp/native/nanovdb/util/cuda/Timer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /// @file nanovdb/util/cuda/Timer.h /// /// @author Ken Museth /// /// @brief A simple GPU timing class #ifndef NANOVDB_UTIL_CUDA_TIMER_H_HAS_BEEN_INCLUDED #define NANOVDB_UTIL_CUDA_TIMER_H_HAS_BEEN_INCLUDED #include <iostream>// for std::cerr #include <cuda.h> #include <cuda_runtime_api.h> namespace nanovdb { namespace util{ namespace cuda { class Timer { cudaStream_t mStream{0}; cudaEvent_t mStart, mStop; public: /// @brief Default constructor /// @param stream CUDA stream to be timed (defaults to stream 0) /// @note Starts the timer Timer(cudaStream_t stream = 0) : mStream(stream) { cudaEventCreate(&mStart); cudaEventCreate(&mStop); cudaEventRecord(mStart, mStream); } /// @brief Construct and start the timer /// @param msg string message to be printed when timer is started /// @param stream CUDA stream to be timed (defaults to stream 0) /// @param os output stream for the message above Timer(const std::string &msg, cudaStream_t stream = 0, std::ostream& os = std::cerr) : mStream(stream) { os << msg << " ... " << std::flush; cudaEventCreate(&mStart); cudaEventCreate(&mStop); cudaEventRecord(mStart, mStream); } /// @brief Destructor ~Timer() { cudaEventDestroy(mStart); cudaEventDestroy(mStop); } /// @brief Start the timer /// @param stream CUDA stream to be timed (defaults to stream 0) /// @param os output stream for the message above void start() {cudaEventRecord(mStart, mStream);} /// @brief Start the timer /// @param msg string message to be printed when timer is started /// @param os output stream for the message above void start(const std::string &msg, std::ostream& os = std::cerr) { os << msg << " ... " << std::flush; this->start(); } /// @brief Start the timer /// @param msg string message to be printed when timer is started /// @param os output stream for the message above void start(const char* msg, std::ostream& os = std::cerr) { os << msg << " ... " << std::flush; this->start(); } /// @brief elapsed time (since start) in miliseconds /// @return elapsed time (since start) in miliseconds float elapsed() { cudaEventRecord(mStop, mStream); cudaEventSynchronize(mStop); float diff = 0.0f; cudaEventElapsedTime(&diff, mStart, mStop); return diff; } /// @brief stop the timer /// @param os output stream for the message above void stop(std::ostream& os = std::cerr) { float diff = this->elapsed(); os << "completed in " << diff << " milliseconds" << std::endl; } /// @brief stop and start the timer /// @param msg string message to be printed when timer is started /// @warning Remember to call start before restart void restart(const std::string &msg, std::ostream& os = std::cerr) { this->stop(); this->start(msg, os); } };// Timer }}// namespace util::cuda using GpuTimer [[deprecated("Use nanovdb::util::cuda::Timer instead")]]= util::cuda::Timer; } // namespace nanovdb #endif // NANOVDB_UTIL_CUDA_TIMER_H_HAS_BEEN_INCLUDED
3,363
C
27.752137
91
0.621469
NVIDIA/warp/warp/native/nanovdb/util/cuda/Util.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! \file nanovdb/util/cuda/Util.h \author Ken Museth \date December 20, 2023 \brief Cuda specific utility functions */ #ifndef NANOVDB_UTIL_CUDA_UTIL_H_HAS_BEEN_INCLUDED #define NANOVDB_UTIL_CUDA_UTIL_H_HAS_BEEN_INCLUDED #include <cuda.h> #include <cuda_runtime_api.h> #include <nanovdb/util/Util.h> // for stderr and NANOVDB_ASSERT // change 1 -> 0 to only perform asserts during debug builds #if 1 || defined(DEBUG) || defined(_DEBUG) static inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "CUDA error %u: %s (%s:%d)\n", unsigned(code), cudaGetErrorString(code), file, line); //fprintf(stderr, "CUDA Runtime Error: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } static inline void ptrAssert(const void* ptr, const char* msg, const char* file, int line, bool abort = true) { if (ptr == nullptr) { fprintf(stderr, "NULL pointer error: %s %s %d\n", msg, file, line); if (abort) exit(1); } else if (uint64_t(ptr) % 32) { fprintf(stderr, "Pointer misalignment error: %s %s %d\n", msg, file, line); if (abort) exit(1); } } #else static inline void gpuAssert(cudaError_t, const char*, int, bool = true){} static inline void ptrAssert(void*, const char*, const char*, int, bool = true){} #endif // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. #define cudaCheck(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } #define checkPtr(ptr, msg) \ { \ ptrAssert((ptr), (msg), __FILE__, __LINE__); \ } #define cudaSync() \ { \ cudaCheck(cudaDeviceSynchronize()); \ } #define cudaCheckError() \ { \ cudaCheck(cudaGetLastError()); \ } namespace nanovdb {// ========================================================= namespace util{ namespace cuda {// ====================================================== //#define NANOVDB_USE_SYNC_CUDA_MALLOC // cudaMallocAsync and cudaFreeAsync were introduced in CUDA 11.2 so we introduce // custom implementations that map to cudaMalloc and cudaFree below. If NANOVDB_USE_SYNC_CUDA_MALLOC // is defined these implementations will also be defined, which is useful in virtualized environments // that slice up the GPU and share it between instances as vGPU's. GPU unified memory is usually disabled // out of security considerations. Asynchronous CUDA malloc/free depends on GPU unified memory, so it // is not possible to use cudaMallocAsync and cudaFreeAsync in such environments. #if (CUDART_VERSION < 11020) || defined(NANOVDB_USE_SYNC_CUDA_MALLOC) // 11.2 introduced cudaMallocAsync and cudaFreeAsync /// @brief Simple wrapper that calls cudaMalloc /// @param d_ptr Device pointer to allocated device memory /// @param size Number of bytes to allocate /// @param dummy The stream establishing the stream ordering contract and the memory pool to allocate from (ignored) /// @return Cuda error code inline cudaError_t mallocAsync(void** d_ptr, size_t size, cudaStream_t){return cudaMalloc(d_ptr, size);} /// @brief Simple wrapper that calls cudaFree /// @param d_ptr Device pointer that will be freed /// @param dummy The stream establishing the stream ordering promise (ignored) /// @return Cuda error code inline cudaError_t freeAsync(void* d_ptr, cudaStream_t){return cudaFree(d_ptr);} #else /// @brief Simple wrapper that calls cudaMallocAsync /// @param d_ptr Device pointer to allocated device memory /// @param size Number of bytes to allocate /// @param stream The stream establishing the stream ordering contract and the memory pool to allocate from /// @return Cuda error code inline cudaError_t mallocAsync(void** d_ptr, size_t size, cudaStream_t stream){return cudaMallocAsync(d_ptr, size, stream);} /// @brief Simple wrapper that calls cudaFreeAsync /// @param d_ptr Device pointer that will be freed /// @param stream The stream establishing the stream ordering promise /// @return Cuda error code inline cudaError_t freeAsync(void* d_ptr, cudaStream_t stream){return cudaFreeAsync(d_ptr, stream);} #endif /// @brief Simple (naive) implementation of a unique device pointer /// using stream ordered memory allocation and deallocation. /// @tparam T Type of the device pointer template <typename T> class unique_ptr { T *mPtr;// pointer to stream ordered memory allocation cudaStream_t mStream; public: unique_ptr(size_t count = 0, cudaStream_t stream = 0) : mPtr(nullptr), mStream(stream) { if (count>0) cudaCheck(mallocAsync((void**)&mPtr, count*sizeof(T), stream)); } unique_ptr(const unique_ptr&) = delete; unique_ptr(unique_ptr&& other) : mPtr(other.mPtr), mStream(other.mStream) { other.mPtr = nullptr; } ~unique_ptr() { if (mPtr) cudaCheck(freeAsync(mPtr, mStream)); } unique_ptr& operator=(const unique_ptr&) = delete; unique_ptr& operator=(unique_ptr&& rhs) noexcept { mPtr = rhs.mPtr; mStream = rhs.mStream; rhs.mPtr = nullptr; return *this; } void reset() { if (mPtr) { cudaCheck(freeAsync(mPtr, mStream)); mPtr = nullptr; } } T* get() const {return mPtr;} explicit operator bool() const {return mPtr != nullptr;} };// util::cuda::unique_ptr /// @brief Computes the number of blocks per grid given the problem size and number of threads per block /// @param numItems Problem size /// @param threadsPerBlock Number of threads per block (second CUDA launch parameter) /// @return number of blocks per grid (first CUDA launch parameter) /// @note CUDA launch parameters: kernel<<< blocksPerGrid, threadsPerBlock, sharedMemSize, streamID>>> inline size_t blocksPerGrid(size_t numItems, size_t threadsPerBlock) { NANOVDB_ASSERT(numItems > 0 && threadsPerBlock >= 32 && threadsPerBlock % 32 == 0); return (numItems + threadsPerBlock - 1) / threadsPerBlock; } #if defined(__CUDACC__)// the following functions only run on the GPU! /// @brief Cuda kernel that launches device lambda functions /// @param numItems Problem size template<typename Func, typename... Args> __global__ void lambdaKernel(const size_t numItems, Func func, Args... args) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= numItems) return; func(tid, args...); }// util::cuda::lambdaKernel #endif// __CUDACC__ }}// namespace util::cuda ============================================================ }// namespace nanovdb =============================================================== #if defined(__CUDACC__)// the following functions only run on the GPU! template<typename Func, typename... Args> [[deprecated("Use nanovdb::cuda::lambdaKernel instead")]] __global__ void cudaLambdaKernel(const size_t numItems, Func func, Args... args) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= numItems) return; func(tid, args...); } #endif// __CUDACC__ #endif// NANOVDB_UTIL_CUDA_UTIL_H_HAS_BEEN_INCLUDED
7,343
C
37.051813
124
0.657769
NVIDIA/warp/warp/native/nanovdb/cuda/DeviceBuffer.h
// Copyright Contributors to the OpenVDB Project // SPDX-License-Identifier: MPL-2.0 /*! \file DeviceBuffer.h \author Ken Museth \date January 8, 2020 \brief Implements a simple dual (host/device) CUDA buffer. \note This file has no device-only kernel functions, which explains why it's a .h and not .cuh file. */ #ifndef NANOVDB_CUDA_DEVICEBUFFER_H_HAS_BEEN_INCLUDED #define NANOVDB_CUDA_DEVICEBUFFER_H_HAS_BEEN_INCLUDED #include <nanovdb/HostBuffer.h>// for BufferTraits #include <nanovdb/util/cuda/Util.h>// for cudaMalloc/cudaMallocManaged/cudaFree namespace nanovdb {// ================================================================ namespace cuda {// =================================================================== // ----------------------------> DeviceBuffer <-------------------------------------- /// @brief Simple memory buffer using un-managed pinned host memory when compiled with NVCC. /// Obviously this class is making explicit used of CUDA so replace it with your own memory /// allocator if you are not using CUDA. /// @note While CUDA's pinned host memory allows for asynchronous memory copy between host and device /// it is significantly slower then cached (un-pinned) memory on the host. class DeviceBuffer { uint64_t mSize; // total number of bytes managed by this buffer (assumed to be identical for host and device) void *mCpuData, *mGpuData; // raw pointers to the host and device buffers bool mManaged; public: /// @brief Static factory method that return an instance of this buffer /// @param size byte size of buffer to be initialized /// @param dummy this argument is currently ignored but required to match the API of the HostBuffer /// @param host If true buffer is initialized only on the host/CPU, else on the device/GPU /// @param stream optional stream argument (defaults to stream NULL) /// @return An instance of this class using move semantics static DeviceBuffer create(uint64_t size, const DeviceBuffer* dummy = nullptr, bool host = true, void* stream = nullptr); /// @brief Static factory method that return an instance of this buffer that wraps externally managed memory /// @param size byte size of buffer specified by external memory /// @param cpuData pointer to externally managed host memory /// @param gpuData pointer to externally managed device memory /// @return An instance of this class using move semantics static DeviceBuffer create(uint64_t size, void* cpuData, void* gpuData); /// @brief Constructor /// @param size byte size of buffer to be initialized /// @param host If true buffer is initialized only on the host/CPU, else on the device/GPU /// @param stream optional stream argument (defaults to stream NULL) DeviceBuffer(uint64_t size = 0, bool host = true, void* stream = nullptr) : mSize(0) , mCpuData(nullptr) , mGpuData(nullptr) , mManaged(false) { if (size > 0) this->init(size, host, stream); } DeviceBuffer(uint64_t size, void* cpuData, void* gpuData) : mSize(size) , mCpuData(cpuData) , mGpuData(gpuData) , mManaged(false) { } /// @brief Disallow copy-construction DeviceBuffer(const DeviceBuffer&) = delete; /// @brief Move copy-constructor DeviceBuffer(DeviceBuffer&& other) noexcept : mSize(other.mSize) , mCpuData(other.mCpuData) , mGpuData(other.mGpuData) , mManaged(other.mManaged) { other.mSize = 0; other.mCpuData = nullptr; other.mGpuData = nullptr; other.mManaged = false; } /// @brief Disallow copy assignment operation DeviceBuffer& operator=(const DeviceBuffer&) = delete; /// @brief Move copy assignment operation DeviceBuffer& operator=(DeviceBuffer&& other) noexcept { this->clear(); mSize = other.mSize; mCpuData = other.mCpuData; mGpuData = other.mGpuData; mManaged = other.mManaged; other.mSize = 0; other.mCpuData = nullptr; other.mGpuData = nullptr; other.mManaged = false; return *this; } /// @brief Destructor frees memory on both the host and device ~DeviceBuffer() { this->clear(); }; /// @brief Initialize buffer /// @param size byte size of buffer to be initialized /// @param host If true buffer is initialized only on the host/CPU, else on the device/GPU /// @note All existing buffers are first cleared /// @warning size is expected to be non-zero. Use clear() clear buffer! void init(uint64_t size, bool host = true, void* stream = nullptr); /// @brief Retuns a raw pointer to the host/CPU buffer managed by this allocator. /// @warning Note that the pointer can be NULL! void* data() const { return mCpuData; } /// @brief Retuns a raw pointer to the device/GPU buffer managed by this allocator. /// @warning Note that the pointer can be NULL! void* deviceData() const { return mGpuData; } /// @brief Upload this buffer from the host to the device, i.e. CPU -> GPU. /// @param stream optional CUDA stream (defaults to CUDA stream 0) /// @param sync if false the memory copy is asynchronous /// @note If the device/GPU buffer does not exist it is first allocated /// @warning Assumes that the host/CPU buffer already exists void deviceUpload(void* stream = nullptr, bool sync = true) const; /// @brief Upload this buffer from the device to the host, i.e. GPU -> CPU. /// @param stream optional CUDA stream (defaults to CUDA stream 0) /// @param sync if false the memory copy is asynchronous /// @note If the host/CPU buffer does not exist it is first allocated /// @warning Assumes that the device/GPU buffer already exists void deviceDownload(void* stream = nullptr, bool sync = true) const; /// @brief Returns the size in bytes of the raw memory buffer managed by this allocator. uint64_t size() const { return mSize; } //@{ /// @brief Returns true if this allocator is empty, i.e. has no allocated memory bool empty() const { return mSize == 0; } bool isEmpty() const { return mSize == 0; } //@} /// @brief De-allocate all memory managed by this allocator and set all pointers to NULL void clear(void* stream = nullptr); }; // DeviceBuffer class // --------------------------> Implementations below <------------------------------------ inline DeviceBuffer DeviceBuffer::create(uint64_t size, const DeviceBuffer*, bool host, void* stream) { return DeviceBuffer(size, host, stream); } inline DeviceBuffer DeviceBuffer::create(uint64_t size, void* cpuData, void* gpuData) { return DeviceBuffer(size, cpuData, gpuData); } inline void DeviceBuffer::init(uint64_t size, bool host, void* stream) { if (mSize>0) this->clear(stream); NANOVDB_ASSERT(size > 0); if (host) { cudaCheck(cudaMallocHost((void**)&mCpuData, size)); // un-managed pinned memory on the host (can be slow to access!). Always 32B aligned checkPtr(mCpuData, "cuda::DeviceBuffer::init: failed to allocate host buffer"); } else { cudaCheck(util::cuda::mallocAsync((void**)&mGpuData, size, reinterpret_cast<cudaStream_t>(stream))); // un-managed memory on the device, always 32B aligned! checkPtr(mGpuData, "cuda::DeviceBuffer::init: failed to allocate device buffer"); } mSize = size; mManaged = true; } // DeviceBuffer::init inline void DeviceBuffer::deviceUpload(void* stream, bool sync) const { if (!mManaged) throw std::runtime_error("DeviceBuffer::deviceUpload called on externally managed memory. Replace deviceUpload call with the appropriate external copy operation."); checkPtr(mCpuData, "uninitialized cpu data"); if (mGpuData == nullptr) { cudaCheck(util::cuda::mallocAsync((void**)&mGpuData, mSize, reinterpret_cast<cudaStream_t>(stream))); // un-managed memory on the device, always 32B aligned! } checkPtr(mGpuData, "uninitialized gpu data"); cudaCheck(cudaMemcpyAsync(mGpuData, mCpuData, mSize, cudaMemcpyHostToDevice, reinterpret_cast<cudaStream_t>(stream))); if (sync) cudaCheck(cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(stream))); } // DeviceBuffer::gpuUpload inline void DeviceBuffer::deviceDownload(void* stream, bool sync) const { if (!mManaged) throw std::runtime_error("DeviceBuffer::deviceDownload called on externally managed memory. Replace deviceDownload call with the appropriate external copy operation."); checkPtr(mGpuData, "uninitialized gpu data"); if (mCpuData == nullptr) { cudaCheck(cudaMallocHost((void**)&mCpuData, mSize)); // un-managed pinned memory on the host (can be slow to access!). Always 32B aligned } checkPtr(mCpuData, "uninitialized cpu data"); cudaCheck(cudaMemcpyAsync(mCpuData, mGpuData, mSize, cudaMemcpyDeviceToHost, reinterpret_cast<cudaStream_t>(stream))); if (sync) cudaCheck(cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(stream))); } // DeviceBuffer::gpuDownload inline void DeviceBuffer::clear(void *stream) { if (mManaged && mGpuData) cudaCheck(util::cuda::freeAsync(mGpuData, reinterpret_cast<cudaStream_t>(stream))); if (mManaged && mCpuData) cudaCheck(cudaFreeHost(mCpuData)); mCpuData = mGpuData = nullptr; mSize = 0; mManaged = false; } // DeviceBuffer::clear }// namespace cuda using CudaDeviceBuffer [[deprecated("Use nanovdb::cuda::DeviceBudder instead")]] = cuda::DeviceBuffer; template<> struct BufferTraits<cuda::DeviceBuffer> { static constexpr bool hasDeviceDual = true; }; }// namespace nanovdb #endif // end of NANOVDB_CUDA_DEVICEBUFFER_H_HAS_BEEN_INCLUDED
9,790
C
41.202586
187
0.675587
NVIDIA/warp/warp/native/clang/clang.cpp
/** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved. * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include "../native/crt.h" #include <clang/Frontend/CompilerInstance.h> #include <clang/Basic/DiagnosticOptions.h> #include <clang/Frontend/TextDiagnosticPrinter.h> #if LLVM_VERSION_MAJOR >= 18 #include <llvm/Frontend/Debug/Options.h> #else #include <llvm/Support/CodeGen.h> #endif #include <clang/CodeGen/CodeGenAction.h> #include <clang/Basic/TargetInfo.h> #include <clang/Lex/PreprocessorOptions.h> #include <llvm/Support/TargetSelect.h> #include <llvm/IR/Module.h> #include <llvm/IR/LLVMContext.h> #include <llvm/ExecutionEngine/GenericValue.h> #include <llvm/Target/TargetMachine.h> #include <llvm/MC/TargetRegistry.h> #include <llvm/PassRegistry.h> #include <llvm/InitializePasses.h> #include <llvm/IR/LegacyPassManager.h> #include <llvm/IRReader/IRReader.h> #include <llvm/Linker/Linker.h> #include <llvm/ExecutionEngine/Orc/LLJIT.h> #include <llvm/ExecutionEngine/JITEventListener.h> #include <llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h> #include <llvm/ExecutionEngine/Orc/ExecutionUtils.h> #include <llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h> #include <llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h> #include <llvm/ExecutionEngine/SectionMemoryManager.h> #include <cmath> #include <vector> #include <iostream> #include <string> #include <cstring> #if defined(_WIN64) extern "C" void __chkstk(); #elif defined(__APPLE__) extern "C" void __bzero(void*, size_t); extern "C" __double2 __sincos_stret(double); extern "C" __float2 __sincosf_stret(float); #endif extern "C" { // GDB and LLDB support debugging of JIT-compiled code by observing calls to __jit_debug_register_code() // by putting a breakpoint on it, and retrieving the debug info through __jit_debug_descriptor. // On Linux it suffices for these symbols not to be stripped out, while for Windows a .pdb has to contain // their information. LLVM defines them, but we don't want a huge .pdb with all LLVM source code's debug // info. By forward-declaring them here it suffices to compile this file with /Zi. extern struct jit_descriptor __jit_debug_descriptor; extern void __jit_debug_register_code(); } namespace wp { #if defined (_WIN32) // Windows defaults to using the COFF binary format (aka. "msvc" in the target triple). // Override it to use the ELF format to support DWARF debug info, but keep using the // Microsoft calling convention (see also https://llvm.org/docs/DebuggingJITedCode.html). static const char* target_triple = "x86_64-pc-windows-elf"; #else static const char* target_triple = LLVM_DEFAULT_TARGET_TRIPLE; #endif static void initialize_llvm() { llvm::InitializeAllTargetInfos(); llvm::InitializeAllTargets(); llvm::InitializeAllTargetMCs(); llvm::InitializeAllAsmPrinters(); } static std::unique_ptr<llvm::Module> cpp_to_llvm(const std::string& input_file, const char* cpp_src, const char* include_dir, bool debug, bool verify_fp, llvm::LLVMContext& context) { // Compilation arguments std::vector<const char*> args; args.push_back(input_file.c_str()); args.push_back("-I"); args.push_back(include_dir); args.push_back(debug ? "-O0" : "-O2"); args.push_back("-triple"); args.push_back(target_triple); #if defined(__x86_64__) || defined(_M_X64) args.push_back("-target-feature"); args.push_back("+f16c"); // Enables support for _Float16 #endif clang::IntrusiveRefCntPtr<clang::DiagnosticOptions> diagnostic_options = new clang::DiagnosticOptions(); std::unique_ptr<clang::TextDiagnosticPrinter> text_diagnostic_printer = std::make_unique<clang::TextDiagnosticPrinter>(llvm::errs(), &*diagnostic_options); clang::IntrusiveRefCntPtr<clang::DiagnosticIDs> diagnostic_ids; std::unique_ptr<clang::DiagnosticsEngine> diagnostic_engine = std::make_unique<clang::DiagnosticsEngine>(diagnostic_ids, &*diagnostic_options, text_diagnostic_printer.release()); clang::CompilerInstance compiler_instance; auto& compiler_invocation = compiler_instance.getInvocation(); clang::CompilerInvocation::CreateFromArgs(compiler_invocation, args, *diagnostic_engine.release()); if(debug) { #if LLVM_VERSION_MAJOR >= 18 compiler_invocation.getCodeGenOpts().setDebugInfo(llvm::codegenoptions::FullDebugInfo); #else compiler_invocation.getCodeGenOpts().setDebugInfo(clang::codegenoptions::FullDebugInfo); #endif } // Map code to a MemoryBuffer std::unique_ptr<llvm::MemoryBuffer> buffer = llvm::MemoryBuffer::getMemBufferCopy(cpp_src); compiler_invocation.getPreprocessorOpts().addRemappedFile(input_file.c_str(), buffer.get()); if(!debug) { compiler_instance.getPreprocessorOpts().addMacroDef("NDEBUG"); } if(verify_fp) { compiler_instance.getPreprocessorOpts().addMacroDef("WP_VERIFY_FP"); } compiler_instance.getLangOpts().MicrosoftExt = 1; // __forceinline / __int64 compiler_instance.getLangOpts().DeclSpecKeyword = 1; // __declspec compiler_instance.createDiagnostics(text_diagnostic_printer.get(), false); clang::EmitLLVMOnlyAction emit_llvm_only_action(&context); bool success = compiler_instance.ExecuteAction(emit_llvm_only_action); buffer.release(); return success ? std::move(emit_llvm_only_action.takeModule()) : nullptr; } static std::unique_ptr<llvm::Module> cuda_to_llvm(const std::string& input_file, const char* cpp_src, const char* include_dir, bool debug, llvm::LLVMContext& context) { // Compilation arguments std::vector<const char*> args; args.push_back(input_file.c_str()); args.push_back("-I"); args.push_back(include_dir); args.push_back(debug ? "-O0" : "-O2"); args.push_back("-triple"); args.push_back("nvptx64-nvidia-cuda"); args.push_back("-target-cpu"); args.push_back("sm_70"); clang::IntrusiveRefCntPtr<clang::DiagnosticOptions> diagnostic_options = new clang::DiagnosticOptions(); std::unique_ptr<clang::TextDiagnosticPrinter> text_diagnostic_printer = std::make_unique<clang::TextDiagnosticPrinter>(llvm::errs(), &*diagnostic_options); clang::IntrusiveRefCntPtr<clang::DiagnosticIDs> diagnostic_ids; std::unique_ptr<clang::DiagnosticsEngine> diagnostic_engine = std::make_unique<clang::DiagnosticsEngine>(diagnostic_ids, &*diagnostic_options, text_diagnostic_printer.release()); clang::CompilerInstance compiler_instance; auto& compiler_invocation = compiler_instance.getInvocation(); clang::CompilerInvocation::CreateFromArgs(compiler_invocation, args, *diagnostic_engine.release()); if(debug) { #if LLVM_VERSION_MAJOR >= 18 compiler_invocation.getCodeGenOpts().setDebugInfo(llvm::codegenoptions::FullDebugInfo); #else compiler_invocation.getCodeGenOpts().setDebugInfo(clang::codegenoptions::FullDebugInfo); #endif } // Map code to a MemoryBuffer std::unique_ptr<llvm::MemoryBuffer> buffer = llvm::MemoryBuffer::getMemBufferCopy(cpp_src); compiler_invocation.getPreprocessorOpts().addRemappedFile(input_file.c_str(), buffer.get()); // According to https://llvm.org/docs/CompileCudaWithLLVM.html, "Both clang and nvcc define `__CUDACC__` during CUDA compilation." // But this normally happens in the __clang_cuda_runtime_wrapper.h header, which we don't include. // The __CUDA__ and __CUDA_ARCH__ macros are internally defined by llvm-project/clang/lib/Frontend/InitPreprocessor.cpp compiler_instance.getPreprocessorOpts().addMacroDef("__CUDACC__"); if(!debug) { compiler_instance.getPreprocessorOpts().addMacroDef("NDEBUG"); } compiler_instance.getLangOpts().CUDA = 1; compiler_instance.getLangOpts().CUDAIsDevice = 1; compiler_instance.getLangOpts().CUDAAllowVariadicFunctions = 1; compiler_instance.createDiagnostics(text_diagnostic_printer.get(), false); clang::EmitLLVMOnlyAction emit_llvm_only_action(&context); bool success = compiler_instance.ExecuteAction(emit_llvm_only_action); buffer.release(); return success ? std::move(emit_llvm_only_action.takeModule()) : nullptr; } extern "C" { WP_API int compile_cpp(const char* cpp_src, const char *input_file, const char* include_dir, const char* output_file, bool debug, bool verify_fp) { initialize_llvm(); llvm::LLVMContext context; std::unique_ptr<llvm::Module> module = cpp_to_llvm(input_file, cpp_src, include_dir, debug, verify_fp, context); if(!module) { return -1; } std::string error; const llvm::Target* target = llvm::TargetRegistry::lookupTarget(target_triple, error); const char* CPU = "generic"; const char* features = ""; llvm::TargetOptions target_options; llvm::Reloc::Model relocation_model = llvm::Reloc::PIC_; // Position Independent Code llvm::CodeModel::Model code_model = llvm::CodeModel::Large; // Don't make assumptions about displacement sizes llvm::TargetMachine* target_machine = target->createTargetMachine(target_triple, CPU, features, target_options, relocation_model, code_model); module->setDataLayout(target_machine->createDataLayout()); std::error_code error_code; llvm::raw_fd_ostream output(output_file, error_code, llvm::sys::fs::OF_None); llvm::legacy::PassManager pass_manager; #if LLVM_VERSION_MAJOR >= 18 llvm::CodeGenFileType file_type = llvm::CodeGenFileType::ObjectFile; #else llvm::CodeGenFileType file_type = llvm::CGFT_ObjectFile; #endif target_machine->addPassesToEmitFile(pass_manager, output, nullptr, file_type); pass_manager.run(*module); output.flush(); delete target_machine; return 0; } WP_API int compile_cuda(const char* cpp_src, const char *input_file, const char* include_dir, const char* output_file, bool debug) { initialize_llvm(); llvm::LLVMContext context; std::unique_ptr<llvm::Module> module = cuda_to_llvm(input_file, cpp_src, include_dir, debug, context); if(!module) { return -1; } std::string error; const llvm::Target* target = llvm::TargetRegistry::lookupTarget("nvptx64-nvidia-cuda", error); const char* CPU = "sm_70"; const char* features = "+ptx75"; // Warp requires CUDA 11.5, which supports PTX ISA 7.5 llvm::TargetOptions target_options; llvm::Reloc::Model relocation_model = llvm::Reloc::PIC_; llvm::TargetMachine* target_machine = target->createTargetMachine("nvptx64-nvidia-cuda", CPU, features, target_options, relocation_model); module->setDataLayout(target_machine->createDataLayout()); // Link libdevice llvm::SMDiagnostic diagnostic; std::string libdevice_path = std::string(include_dir) + "/libdevice/libdevice.10.bc"; std::unique_ptr<llvm::Module> libdevice(llvm::parseIRFile(libdevice_path, diagnostic, context)); if(!libdevice) { return -1; } llvm::Linker linker(*module.get()); if(linker.linkInModule(std::move(libdevice), llvm::Linker::Flags::LinkOnlyNeeded) == true) { return -1; } std::error_code error_code; llvm::raw_fd_ostream output(output_file, error_code, llvm::sys::fs::OF_None); llvm::legacy::PassManager pass_manager; #if LLVM_VERSION_MAJOR >= 18 llvm::CodeGenFileType file_type = llvm::CodeGenFileType::AssemblyFile; #else llvm::CodeGenFileType file_type = llvm::CGFT_AssemblyFile; #endif target_machine->addPassesToEmitFile(pass_manager, output, nullptr, file_type); pass_manager.run(*module); output.flush(); delete target_machine; return 0; } // Global JIT instance static llvm::orc::LLJIT* jit = nullptr; // Load an object file into an in-memory DLL named `module_name` WP_API int load_obj(const char* object_file, const char* module_name) { if(!jit) { initialize_llvm(); auto jit_expected = llvm::orc::LLJITBuilder() .setObjectLinkingLayerCreator( [&](llvm::orc::ExecutionSession &session, const llvm::Triple &triple) { auto get_memory_manager = []() { return std::make_unique<llvm::SectionMemoryManager>(); }; auto obj_linking_layer = std::make_unique<llvm::orc::RTDyldObjectLinkingLayer>(session, std::move(get_memory_manager)); // Register the event listener. obj_linking_layer->registerJITEventListener(*llvm::JITEventListener::createGDBRegistrationListener()); // Make sure the debug info sections aren't stripped. obj_linking_layer->setProcessAllSections(true); return obj_linking_layer; }) .create(); if(!jit_expected) { std::cerr << "Failed to create JIT instance: " << toString(jit_expected.takeError()) << std::endl; return -1; } jit = (*jit_expected).release(); } auto dll = jit->createJITDylib(module_name); if(!dll) { std::cerr << "Failed to create JITDylib: " << toString(dll.takeError()) << std::endl; return -1; } // Define symbols for Warp's CRT functions subset { #if defined(__APPLE__) #define MANGLING_PREFIX "_" #else #define MANGLING_PREFIX "" #endif const auto flags = llvm::JITSymbolFlags::Exported | llvm::JITSymbolFlags::Absolute; #if LLVM_VERSION_MAJOR >= 18 #define SYMBOL(sym) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::orc::ExecutorAddr::fromPtr(&::sym), flags} } #define SYMBOL_T(sym, T) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::orc::ExecutorAddr::fromPtr(static_cast<T>(&::sym)), flags} } auto error = dll->define(llvm::orc::absoluteSymbols(llvm::orc::SymbolMap({ #else #define SYMBOL(sym) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::pointerToJITTargetAddress(&::sym), flags} } #define SYMBOL_T(sym, T) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::pointerToJITTargetAddress(static_cast<T>(&::sym)), flags} } auto error = dll->define(llvm::orc::absoluteSymbols({ #endif SYMBOL(printf), SYMBOL(puts), SYMBOL(putchar), SYMBOL_T(abs, int(*)(int)), SYMBOL(llabs), SYMBOL(fmodf), SYMBOL_T(fmod, double(*)(double, double)), SYMBOL(logf), SYMBOL_T(log, double(*)(double)), SYMBOL(log2f), SYMBOL_T(log2, double(*)(double)), SYMBOL(log10f), SYMBOL_T(log10, double(*)(double)), SYMBOL(expf), SYMBOL_T(exp, double(*)(double)), SYMBOL(sqrtf), SYMBOL_T(sqrt, double(*)(double)), SYMBOL(cbrtf), SYMBOL_T(cbrt, double(*)(double)), SYMBOL(powf), SYMBOL_T(pow, double(*)(double, double)), SYMBOL(floorf), SYMBOL_T(floor, double(*)(double)), SYMBOL(ceilf), SYMBOL_T(ceil, double(*)(double)), SYMBOL(fabsf), SYMBOL_T(fabs, double(*)(double)), SYMBOL(roundf), SYMBOL_T(round, double(*)(double)), SYMBOL(truncf), SYMBOL_T(trunc, double(*)(double)), SYMBOL(rintf), SYMBOL_T(rint, double(*)(double)), SYMBOL(acosf), SYMBOL_T(acos, double(*)(double)), SYMBOL(asinf), SYMBOL_T(asin, double(*)(double)), SYMBOL(atanf), SYMBOL_T(atan, double(*)(double)), SYMBOL(atan2f), SYMBOL_T(atan2, double(*)(double, double)), SYMBOL(cosf), SYMBOL_T(cos, double(*)(double)), SYMBOL(sinf), SYMBOL_T(sin, double(*)(double)), SYMBOL(tanf), SYMBOL_T(tan, double(*)(double)), SYMBOL(sinhf), SYMBOL_T(sinh, double(*)(double)), SYMBOL(coshf), SYMBOL_T(cosh, double(*)(double)), SYMBOL(tanhf), SYMBOL_T(tanh, double(*)(double)), SYMBOL(fmaf), SYMBOL(memcpy), SYMBOL(memset), SYMBOL(memmove), SYMBOL(_wp_assert), SYMBOL(_wp_isfinite), SYMBOL(_wp_isnan), SYMBOL(_wp_isinf), #if defined(_WIN64) // For functions with large stack frames the compiler will emit a call to // __chkstk() to linearly touch each memory page. This grows the stack without // triggering the stack overflow guards. SYMBOL(__chkstk), #elif defined(__APPLE__) SYMBOL(__bzero), SYMBOL(__sincos_stret), SYMBOL(__sincosf_stret), #else SYMBOL(sincosf), SYMBOL_T(sincos, void(*)(double,double*,double*)), #endif #if LLVM_VERSION_MAJOR >= 18 }))); #else })); #endif if(error) { std::cerr << "Failed to define symbols: " << llvm::toString(std::move(error)) << std::endl; return -1; } } // Load the object file into a memory buffer auto buffer = llvm::MemoryBuffer::getFile(object_file); if(!buffer) { std::cerr << "Failed to load object file: " << buffer.getError().message() << std::endl; return -1; } auto err = jit->addObjectFile(*dll, std::move(*buffer)); if(err) { std::cerr << "Failed to add object file: " << llvm::toString(std::move(err)) << std::endl; return -1; } return 0; } WP_API int unload_obj(const char* module_name) { if(!jit) // If there's no JIT instance there are no object files loaded { return 0; } auto* dll = jit->getJITDylibByName(module_name); llvm::Error error = jit->getExecutionSession().removeJITDylib(*dll); if(error) { std::cerr << "Failed to unload: " << llvm::toString(std::move(error)) << std::endl; return -1; } return 0; } WP_API uint64_t lookup(const char* dll_name, const char* function_name) { auto* dll = jit->getJITDylibByName(dll_name); auto func = jit->lookup(*dll, function_name); if(!func) { std::cerr << "Failed to lookup symbol: " << llvm::toString(func.takeError()) << std::endl; return 0; } return func->getValue(); } } // extern "C" } // namespace wp
18,649
C++
36.525151
181
0.659231
NVIDIA/warp/warp/native/cutlass/PUBLICATIONS.md
# Publications Using Cutlass ## 2022 - ["Bolt: Bridging the Gap between Auto-tuners and Hardware-native Performance"](https://arxiv.org/abs/2110.15238). Jiarong Xing, Leyuan Wang, Shang Zhang, Jack Chen, Ang Chen, Yibo Zhu. _Proceedings of the 5th MLSys Conference_, August 2022. - ["Recovering single precision accuracy from Tensor Cores while surpassing the FP32 theoretical peak performance"](https://arxiv.org/abs/2203.03341). Hiroyuki Ootomo, Rio Yokota. _International Journal of High Performance Computing_, March 2022. ## 2021 - ["Arithmetic-intensity-guided fault tolerance for neural network inference on GPUs"](https://dl.acm.org/doi/abs/10.1145/3458817.3476184). Jack Kosaian, K. V. Rashmi. _Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis_, November 2021. - ["Real-time Neural Radiance Caching for Path Tracing"](https://d1qx31qr3h6wln.cloudfront.net/publications/paper_4.pdf). Thomas Muller, Fabrice Rousselle, Jan Novak, Alex Keller. _ACM Trans. Graph._, August 2021. ## 2020 - ["Scalable Knowledge Graph Analytics at 136 Petaflop/s"](https://www.computer.org/csdl/proceedings-article/sc/2020/999800a061/1oeORDgCM0g). Ramakrishnan Kannan, Piyush Sao, Hao Lu, Drahomira Herrmannova, Vijay Thakkar, Robert Patton, Richard Vuduc, Thomas Potok. _Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis_, November 2020. - ["Accelerating Sparse DNN Models without Hardware-Support via Tile-Wise Sparsity "](https://arxiv.org/abs/2008.13006). Cong Guo, Bo Yang Hsueh, Jingwen Leng, Yuxian Qiu, Yue Guan, Zehuan Wang, Xiaoying Jia, Xipeng Li, Minyi Guo, Yuhao Zhu. _Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis_, November 2020. - ["Strassen's Algorithm Reloaded on GPUs"](https://dl.acm.org/doi/10.1145/3372419). Jianyu Huang, Chenhan D. Yu, Robert A. van de Geijn. _ACM Transactions on Mathematical Software_, March 2020.
2,019
Markdown
86.826083
392
0.778108
NVIDIA/warp/warp/native/cutlass/CONTRIBUTORS.md
![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS") [README](/README.md#documentation) > **Contributors** # CUTLASS Developers and Contributors This is the official list of CUTLASS developers and contributors. ## DEVELOPERS Andrew Kerr Haicheng Wu Manish Gupta Dustyn Blasig Pradeep Ramani Cris Cecka Vijay Thakkar Aniket Shivam Honghao Lu Ethan Yan Zhaodong Chen Jack Kosaian Yujia Zhai Naila Farooqui Piotr Majcher Paul Springer Jin Wang Chinmay Talegaonkar Shang Zhang Scott Yokim Markus Hohnerbach Aditya Atluri David Tanner Manikandan Ananth ## CUTLASS Product Manager Matthew Nicely ## CONTRIBUTORS Timothy Costa Julien Demouth Brian Fahs Michael Goldfarb Mostafa Hagog Fei Hu Alan Kaatz Tina Li Timmy Liu Duane Merrill Kevin Siu Markus Tavenrath John Tran Vicki Wang Junkai Wu Fung Xie Albert Xu Jack Yang Xiuxia Zhang Nick Zhao ## ACKNOWLEDGEMENTS Girish Bharambe Luke Durant Olivier Giroux Stephen Jones Rishkul Kulkarni Bryce Lelbach Joel McCormack Kyrylo Perelygin
1,011
Markdown
13.457143
74
0.816024
NVIDIA/warp/warp/native/cutlass/CHANGELOG.md
# NVIDIA CUTLASS Changelog ## [2.11.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.11.0) (2022-11-19) * Stream-K, which is a new general way to do split-K. It can not only improve performance, but can also significantly reduce the number of tile sizes that need to be profiled to find the best one. * [Fused multi-head attention Kernel](/examples/41_fused_multi_head_attention). It has two variants: one uses batched GEMM for the fixed sequence length, and the other one uses group GEMM for the variable sequence length. Both versions just need one kernel. * [Dual GEMM](/examples/45_dual_gemm), which can fuse A x B and A x C into one kernel. Two GEMMs has no producer-consumer dependency. * Hopper improves [double precision matrix multiplication](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) by 2x compared to Ampere at iso-clocks. It is supported since CUDA 11.8. * [BLAS3](/test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu) functions with Hoppers new double precision matrix multiplication instructions. * [ELL Block Sparse GEMM](/examples/43_ell_block_sparse_gemm), which uses an [ELL matrix](https://developer.nvidia.com/blog/accelerating-matrix-multiplication-with-block-sparse-format-and-nvidia-tensor-cores/) to describe the sparsity of A matrix. B and output matrices are still dense. The block size can be arbitary. * Optimized [Group Conv](/examples/42_ampere_tensorop_group_conv) for SingleGroup mode, which requires that the output channel per group is a multiple of Threadblock tile N. * [Optimized DepthWise Conv](/examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu). Two new modes are added * [kOptimized](/test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) - use direct conv to compute instead of implicit GEMM. * The restrictions are: 1) input ,output channel and group number should be multiple of (128 / sizeof(input element)). 2) The input filter size should be the same as the template parameter configuration. * [kFixedStrideDilation](/test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_fixed_stride_dilation_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) - which puts stride and dilation into templates to further improve the performance. In this mode, kernel persistents some inputs into register to squeeze more performance, so large filter/stride/dilation is not recommanded. * The restrictions are: 1) input, output channel and group number should be multiple of (128 / sizeof(input element)). 2) input filter size, stride, dilation should same as the template parameter configuration. * [Scripts](/examples/44_multi_gemm_ir_and_codegen) to fuse multiple back-to-back GEMM. Its implementation was discussed in a GTC'22 Spring [talk](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41606/). * [FP8 data type definition](/include/cutlass/float8.h) and [conversion routines](/include/cutlass/numeric_conversion.h#L1274-2115). * Updates and bugfixes from the community (thanks!). Big shout out to Meta's [xFormers](https://github.com/facebookresearch/xformers). * **Deprecation announcement:** CUTLASS plans to deprecate the following: * Maxwell and Pascal GPU architectures * Ubuntu 16.04 * CUDA 10.2 ## [2.10.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.10.0) (2022-08-23) * [CUTLASS Python](/examples/40_cutlass_py) now supports GEMM, CONV, Group GEMM for different data types as well as different epilogue flavours. * Optimizations for CUTLASS's [Grouped GEMM](examples/24_gemm_grouped/gemm_grouped.cu) kernel. Threadblock scheduling part is improved. Some computation can be moved to the host side if applicable. [Grouped Syr2k](examples/38_syr2k_grouped/syr2k_grouped.cu) kernels are added, too. * Optimizations for [GEMM+Softmax](examples/35_gemm_softmax). All the reduction computation is fused into the previous GEMM. More template arguments are provided to fine tune the performance. * [Grouped GEMM for Multihead Attention](examples/41_multi_head_attention). This general group gemm based MHA does not require the sequence length of all GEMMs to be the same which makes it most useful for natural language processing. * [GEMM + Layer norm fusion for Ampere](examples/37_gemm_layernorm_gemm_fusion/) splits the layernorm into two parts and both of them can be fused into the GEMMs before and after separately. In addition to use square sum to compute variance of layernorm, [Shift-K](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data) is provided if square sum raise numerical issues. * [GEMM Epilogue Permutation Fusion](examples/39_gemm_permute) can apply user provided permutation layout mapping in the GEMM epilogue. * [Grouped convolution targeting implicit GEMM](test/unit/conv/device/group_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) introduces the first group convolution implementation to CUTLASS. It is an Analytical implementation, not an Optimized. The restrictions are: 1) input and output channel number should be multiple of group number. 2) split-K is not supported. The implementation has 2 modes: * kSingleGroup: output channel per group is multiple of Threadblock tile N. * kMultipleGroup: Threadblock tile N is multiple of output channel per group. * [Depthwise separable convolution](test/unit/conv/device/depthwise_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) introduces the first depthwise convolution which is also Analytical for now. The restrictions are: 1) SIMT only 2) No split-K 3) input channel equals to output channel equals to group number. * Standalone [Layernorm](/tools/util/include/cutlass/util/device_layernorm.h) and [Pooling](/tools/util/include/cutlass/util/device_nhwc_pooling.h) kernels. * [Back-to-back GEMM/CONV](examples/13_two_tensor_op_fusion) relaxes the requirement that the first GEMM K dimension needs to be the multiple of Threadblock Tile K dimension. * Optimal performance using [**CUDA 11.6u2**](https://developer.nvidia.com/cuda-downloads) * Updates and bugfixes from the community (thanks!) ## [2.9.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.9.0) (2022-04-21) * [First layer Convolution kernels](/test/unit/conv/device/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) specialized for small channel counts and reduced alignment * [Few channels](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h) specialization for reduced alignment capabilities * [Fixed channels](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h) further specialized when channel count perfectly matches the access vector size * [Unit tests](/test/unit/conv/device/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) * [Python-based instance emitter](/tools/library/scripts/generator.py) in the CUTLASS Library and support in the Profiler * [BLAS3](https://docs.nvidia.com/cuda/cublas/index.html#cublas-level-3-function-reference) operators accelerated by Tensor Cores * Supported types: f32, cf32, f64, cf64, tf32x3, complex tf32x3 * [HERK](/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_fast_f32_sm80.cu) with [emitter](/tools/library/scripts/rank_k_operation.py) * [SYRK](/test/unit/gemm/device/syrk_f32n_f32t_tensor_op_fast_f32_sm80.cu) with [emitter](/tools/library/scripts/rank_k_operation.py) * [SYMM](/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu) with [emitter](/tools/library/scripts/symm_operation.py) * [TRMM](/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu) with [emitter](/tools/library/scripts/trmm_operation.py) * [Unit tests](/test/unit/gemm/device/testbed_rank_k_universal.h) * [CUTLASS Python](/examples/40_cutlass_py) demonstrating JIT compilation of CUTLASS kernels and a Python-based runtime using [CUDA Python](https://developer.nvidia.com/cuda-python) * [Python-based runtime](/tools/library/scripts/rt.py) interoperable with existing emitters * [GEMM + Softmax example](/examples/35_gemm_softmax) * [Gather and Scatter Fusion with GEMM](/examples/36_gather_scatter_fusion) can gather inputs and scatters outputs based on indices vectors in the same GEMM kernel. * It can select random rows in a row major matrix. * It can select random columns in a column major matrix. * [Back-to-back GEMM/CONV](examples/13_two_tensor_op_fusion) fully supports buffering the first GEMM/CONV results in the shared memory for the latter one to use. It can eliminate register spill when the tile size is big. Additionally, bias vector add is supported in the first GEMM/CONV. * Supported kernels: GEMM and CONV. * Supported types: fp16 and int8. * Supported architectures: Turing and Ampere. * [Transposed Convolution](/examples/34_transposed_conv2d) (a.k.a Deconvolution) support which reuses Dgrad implementation. * [Utility functions](/tools/util/include/cutlass/util) that can pad NHWC and convert between NCHW and NHWC. * [Small alignment implicit gemm](https://github.com/NVIDIA/cutlass/issues/242) support for Fprop/Dgrad/Wgrad so that padding is no longer mandated to use tensor cores in these kernels. * Epilogue enhancement: * Eliminate bank conflicts in int8 tensor core kernels. * Half2 usage if epilogue compute type is fp16. * More activation functions: Silu, Hardswish, Leaky Relu. * New elementwise fusion pattern for [residual block](/include/cutlass/epilogue/thread/linear_combination_residual_block.h). * [Group GEMM](/examples/24_gemm_grouped) thread block number calculation fix which helps to launch the intended number of threadblocks to fully occupy the GPUs. * [Parallel GEMM splitk](https://github.com/NVIDIA/cutlass/pull/277) support in the CUTLASS profiler. * Optimal performance using [**CUDA 11.6u2**](https://developer.nvidia.com/cuda-downloads) * Updates and bugfixes from the community (thanks!) ## [2.8.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.8.0) (2021-11-19) * **TF32x3:** emulated single-precision using Tensor Cores * 45+ TFLOPs on NVIDIA A100 * [GEMM SDK example](/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu) (real) * [COMPLEX GEMM SDK example](/examples/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm.cu) (complex) * [Implicit GEMM Convolution SDK example](/examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/ampere_3xtf32_fast_accurate_tensorop_fprop.cu) * **Mainloop fusion for Convolution:** convolution with fused per-channel scale-bias-relu * [Conv Fprop SDK example](/examples/25_ampere_fprop_mainloop_fusion/ampere_fprop_mainloop_fusion.cu) * [Conv WGrad SDK example](/examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu) * [cutlass::conv::device::ImplicitGemmConvolutionFusion](/include/cutlass/conv/device/implicit_gemm_convolution_fusion.h) * **Grouped GEMM:** similar to batched GEMM with distinct problem size per group * [SDK example](/examples/24_gemm_grouped) with performance comparison with Batched Strided GEMM * [cutlass::gemm::device::GemmGrouped](/include/cutlass/gemm/device/gemm_grouped.h) * [Implicit GEMM Convolution fusion](/examples/13_two_tensor_op_fusion/) supports staging 1st convolution's output accumulator in the shared memory on Turing. This allows more flexible warp tile sizes and less regsiter pressue. * Optimal performance using [**CUDA 11.5**](https://developer.nvidia.com/cuda-downloads) * Updates from the community (thanks!) * **Deprecation announcement:** CUTLASS plans to deprecate the following: * Maxwell and Pascal GPU architectures * Ubuntu 16.04 * CUDA 10.2 ## [2.7.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.7.0) (2021-09-24) * Mainloop fusion for GEMM: [summation over A or B](/examples/23_ampere_gemm_operand_reduction_fusion/ampere_gemm_operand_reduction_fusion.cu) * [Strided DGRAD (optimized iterators)](/include/cutlass/conv/kernel/default_conv2d_dgrad.h) * [Half-precision GELU_taylor activation functions](/include/cutlass/epilogue/thread/activation.h#L196) * Use these when accumulation and epilogue compute types are all `cutlass::half_t` * Tuning and bug fixes to [fused GEMM + GEMM example](/examples/13_two_tensor_op_fusion/) * Support for smaller than 128b aligned Convolutions: [see examples](test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu#L272) * Caching of results to accelerate Convolution [unit tests](test/unit/conv/device/cache_testbed_output.h) * Can be enabled or disabled by running `cmake .. -DCUTLASS_TEST_ENABLE_CACHED_RESULTS=OFF` * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.6.1](https://github.com/NVIDIA/cutlass/releases/tag/v2.6.1) (2021-09-03) * Arbitrary padding and striding for CUTLASS Strided DGRAD Convolution operator (Analytic Iterators) * Tuning for GEMMs fused with partial reductions * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.6.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.6.0) (2021-07-22) * Optimal performance when compiled with the [CUDA 11.4 Toolkit](https://developer.nvidia.com/cuda-toolkit) * Adopt the new L2 prefetch feature in [cp.async](/include/cutlass/arch/memory.h) and [global load](/include/cutlass/arch/memory_sm80.h) * Fused operators with GEMM and Convolution * [Fused broadcast in epilogue](test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu) * [Fused partial reduction in epilogue](/test/unit/gemm/device/gemm_with_reduction_f16n_f16n_f16n_tensorop_f32_sm75.cu) * 64b tensor strides and leading dimensions support for GEMMs * Affine rank=2 matrix layouts * Row stride and column stride for matrices using [cutlass::layout::AffineRank2](/include/cutlass/layout/matrix.h) * Support [FP64 tensor core](/examples/18_ampere_fp64_tensorop_affine2_gemm/ampere_fp64_tensorop_affine2_gemm.cu) and SIMT GEMM. * [Batched GEMV](/test/unit/gemm/device/gemv.cu) preview implementation * [New strided Dgrad](test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) implementation * Accelerates over previous implementation by cutting down redundant math by 4x * Support using new `Dy` and `w` analytic iterators and existing `cutlass::conv::device::ImplicitGemmConvolution` interface * Quaternion-valued GEMM and Convolution in single- and double-precision (targeting CUDA Cores) * Updates to [quaternion.h](/include/cutlass/quaternion.h) and [functional.h](/include/cutlass/functional.h) * SDK Example for [GEMM](/examples/21_quaternion_gemm/quaternion_gemm.cu) and [Convolution](/examples/22_quaternion_gemm/quaternion_conv.cu) * [Unit tests for GEMM](/test/unit/gemm/device/simt_qgemm_nn_sm50.cu) and [Convolution](/test/unit/conv/device/conv2d_fprop_implicit_gemm_qf32nhwc_qf32nhwc_qf32nhwc_simt_f32_sm50.cu) * Many improvements to the epilogue. * Provide an [option](/include/cutlass/epilogue/threadblock/epilogue.h) to not fully unroll the epilogue to reduce the code size and improve the performance when using complicated elementwise operations * Performance improvement for FP16 tensor core kernels * Bug fixes * Enhanced Clang support and the combination of Clang 13 and CUDA 11.4 can build and run kernels from Pascal and Ampere. * Updated minimum CUDA Toolkit requirement to 10.2 * [CUDA 11.4 Toolkit](https://developer.nvidia.com/cuda-toolkit) recommended * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.5.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.5.0) (2021-02-26) * Tensor reductions * _m_-to-_n_ reductions of tensors with affine layout * [Specializations](/test/unit/reduction/device/tensor_reduce_contiguous.cu) for reductions including contiguous dimension * [Specializations](/test/unit/reduction/device/tensor_reduce_strided.cu) for reductions excluding contiguous dimension * Custom reduction functors such as `cutlass::logical_and` * Large tensor support, up to 2^63 elements (however, each dimension is limited to an extent of 2^31) * Optimizations for 3-D convolution * [Optimized tile iterators](include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h) using precomputed delta table for 3-D convolution * Full coverage of [forward](test/unit/conv/device/conv3d_fprop_implicit_gemm_f16ndhwc_f16ndhwc_f32ndhwc_tensor_op_f32_sm80.cu) and [backwards](test/unit/conv/device/conv3d_dgrad_implicit_gemm_f16ndhwc_f16ndhwc_f32ndhwc_tensor_op_f32_sm80.cu) passes for 3D convolution * [Fused Convolution+Convolution example](/examples/13_two_tensor_op_fusion/README.md) * Corrections and bug fixes reported by the CUTLASS community * Thank you for filing these issues! ## [2.4.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.4.0) (2020-11-19) * Implicit GEMM convolution kernels supporting CUDA and Tensor Cores on NVIDIA GPUs * Operators: forward (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad) convolution * Data type: FP32, complex<FP32>, Tensor Float 32 (TF32), BFloat16 (BF16), Float16, Int4, Int8, Int32 * Spatial dimensions: 1-D, 2-D, and 3-D * Layout: NHWC, NCxHWx * Implicit GEMM convolution components: * Global memory iterators supporting Fprop, Dgrad, and Wgrad * `MmaMultistage` for implicit GEMM convolution for NVIDIA Ampere architecture * `MmaPipeline` for implicit GEMM convolution for NVIDIA Volta and Turing architectures * [Documentation](/media/docs/implicit_gemm_convolution.md) describing Implicit GEMM Convolution algorithm and implementation ## [2.3.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.3.0) (2020-09-23) * [NVIDIA Ampere Architecture features](https://devblogs.nvidia.com/nvidia-ampere-architecture-in-depth/) * [Sparse Tensor Core GEMM kernels](test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu): * Direct access to Sparse Tensor Cores and maximum performance via [`mma.sp.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma-and-friends) * Fast SGEMM targeting GeForce RTX 30-series CUDA Cores * Minor Features: * [Activation functions](/include/cutlass/epilogue/thread/activation.h) such as [GeLU](/include/cutlass/epilogue/thread/linear_combination_gelu.h) and [Sigmoid](/include/cutlass/epilogue/thread/linear_combination_sigmoid.h) * Small [matrix](/include/cutlass/matrix.h) and [quaternion](/include/cutlass/quaternion.h) template classes in device code * [Floating-point constants](/include/cutlass/constants.h) * NVIDIA Ampere GPU Architecture examples and documentation: * [Tensor Float 32](/examples/14_ampere_tf32_tensorop_gemm/ampere_tf32_tensorop_gemm.cu) and * [Sparse Tensor Cores](/examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm.cu) * Documentation added on CUTLASS [efficient row-major epilogue](/media/docs/gemm_api.md#efficient-epilogue) ## [2.2.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.2.0) (2020-06-08) * [NVIDIA Ampere Architecture features](https://devblogs.nvidia.com/nvidia-ampere-architecture-in-depth/) * Fast Tensor Core operations: * Maximum performance via [`mma.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma-and-friends) * Tensor Float 32, BFloat16, and double-precision data types * Mixed integer data types (int8, int4, bin1) * Asynchronous copy for deep software pipelines via [`cp.async`](https://docs.nvidia.com/cuda/parallel-thread-execution) * Described in [GTC 2020 Webinar (SR 21745)](https://developer.nvidia.com/gtc/2020/video/s21745) (free registration required) * Features: * SDK examples showing GEMM fused with bias+relu and fused GEMM+GEMM * Complex-valued GEMMs targeting NVIDIA Ampere Tensor Cores in double-precision and Tensor Float 32 * Gaussian complex GEMMs using 3m complex multiply algorithm * Universal GEMM kernel supporting two batch modes and two algorithms for parallel reductions * Policy updates: * [CUDA 11 Toolkit](https://developer.nvidia.com/cuda-toolkit) needed to enable NVIDIA Ampere Architecture features * Disabled F16C by default for compatibility - enable on cmake command line with `-DCUTLASS_ENABLE_F16C=ON` ## [2.1.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.1.0) (2020-04-06) * BLAS-style host-side API added to [CUTLASS Library](/media/docs/quickstart.md#cutlass-library) * API to launch compiled kernel instances for GEMM and planar complex GEMM * Planar Complex GEMM kernels targeting Volta and Turing Tensor Cores * Computes complex matrix products on matrices stored as disjoint real and imaginary parts * [SDK Examples of Planar Complex GEMMs](/examples/10_planar_complex/planar_complex.cu) * Minor enhancements and bug fixes ## [2.0.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.0.0) (2019-11-19) * Substantially refactored for * Better performance, particularly for native Turing Tensor Cores * Robust and durable templates spanning the design space * Encapsulated functionality embodying modern C++11 programming techniques * Optimized containers and data types for efficient, generic, portable device code * Updates to: * [Quick start guide](/media/docs/quickstart.md) * [Documentation](/README.md#documentation) * [Utilities](/media/docs/utilities.md) * [CUTLASS Profiler](/media/docs/profiler.md) * Native Turing Tensor Cores * Efficient GEMM kernels targeting Turing Tensor Cores * Mixed-precision floating point, 8-bit integer, 4-bit integer, and binarized operands * Coverage of existing CUTLASS functionality * GEMM kernels targeting CUDA and Tensor Cores in NVIDIA GPUs * Volta Tensor Cores through native mma.sync and through WMMA API * Optimizations such as parallel reductions, threadblock rasterization, and intra-threadblock reductions * Batched GEMM operations * Complex-valued GEMMs * **Note: a host compiler supporting C++11 or greater is required.** # CUTLASS 1.x ## [1.3.2](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.2) (2019-07-09) * Performance improvement for Volta Tensor Cores TN and TT layouts. ## [1.3.1](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.1) (2019-04-09) * Corrected NVRTC unit tests. ## [1.3.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.0) (2019-03-20) * Efficient GEMM kernel targeting Volta Tensor Cores via `mma.sync` instruction added in CUDA 10.1. ## [1.2.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.2.0) (2018-10-26) * Parallelized reductions across threadblocks ("Split-K") * Improved IGEMM performance * Batched strided WMMA GEMMs ## [1.1.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.1.0) (2018-09-19) * Turing Features * WMMA GEMM targeting TensorCores - INT8, INT4, 1-bit * Batched Strided GEMM * Threadblock rasterization strategies * Improved performance for adverse problem sizes and data layouts * Extended CUTLASS Core comonents * Tensor views support arbitrary matrix and tensor layouts * Zip iterators for structuring multiple data streams * Enhanced CUTLASS utilities * Reference code for tensor operations in host and device code * Added HostMatrix<> for simplified matrix creation * Examples * Basic GEMM, tensor views, CUTLASS utilities, batched GEMM, WMMA GEMM ## [1.0.1](https://github.com/NVIDIA/cutlass/releases/tag/v1.0.1) (2018-06-11) * Intra-threadblock reduction added for small threadblock tile sizes * sgemm_64x128x16, sgemm_128x128x16, sgemm_128x64x16, sgemm_128x32x16, sgemm_64x64x16, sgemm_64x32x16 * igemm_32x32x128 * GEMM _K_ residue handled during prologue prior to mainloop * Replaced Google Test copy with submodule. Use `git submodule init --recursive --update` ## [1.0.0](https://github.com/NVIDIA/cutlass/commit/2028ebe120aab22bfd0b2baf8902d4c9627eb33f) (2018-05-16) * Substantial rewrite to accommodate new architecture * Kernels: SGEMM, DGEMM, IGEMM, HGEMM, WMMA GEMM * Unit and performance tests ## [0.0.1](https://github.com/NVIDIA/cutlass/commit/d08ba8ac46e2fa3f745e070c390182edb56b2e91) (2017-12-04) * Initial release ## Copyright Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
26,247
Markdown
81.54088
427
0.770107
NVIDIA/warp/warp/native/cutlass/README.md
![ALT](/media/images/gemm-hierarchy-with-epilogue-no-labels.png "Complete CUDA GEMM decomposition") # CUTLASS 2.11 _CUTLASS 2.11 - November 2022_ CUTLASS is a collection of CUDA C++ template abstractions for implementing high-performance matrix-multiplication (GEMM) and related computations at all levels and scales within CUDA. It incorporates strategies for hierarchical decomposition and data movement similar to those used to implement cuBLAS and cuDNN. CUTLASS decomposes these "moving parts" into reusable, modular software components abstracted by C++ template classes. These thread-wide, warp-wide, block-wide, and device-wide primitives can be specialized and tuned via custom tiling sizes, data types, and other algorithmic policy. The resulting flexibility simplifies their use as building blocks within custom kernels and applications. To support a wide variety of applications, CUTLASS provides extensive support for mixed-precision computations, providing specialized data-movement and multiply-accumulate abstractions for half-precision floating point (FP16), BFloat16 (BF16), Tensor Float 32 (TF32), single-precision floating point (FP32), [FP32 emulation via tensor core instruction](/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm), double-precision floating point (FP64) types, integer data types (4b and 8b), and binary data types (1b). CUTLASS demonstrates warp-synchronous matrix multiply operations targeting the programmable, high-throughput _Tensor Cores_ implemented by NVIDIA's Volta, Turing, and Ampere architectures. CUTLASS implements high-performance Convolution via the implicit GEMM algorithm. Implicit GEMM is the formulation of a convolution operation as a GEMM thereby taking advantage of CUTLASS's modular GEMM pipeline. This allows CUTLASS to build convolutions by reusing highly optimized warp-wide GEMM components and below. See the [Quick Start Guide](/media/docs/quickstart.md) to get started quickly. See the [functionality listing](/media/docs/functionality.md) for the list of operations supported at each level of the execution model hierarchy. # What's New in CUTLASS 2.11 CUTLASS 2.11 is an update to CUTLASS adding: - Stream-K, which is a new general way to do split-K. It can not only improve performance, but can also significantly reduce the number of tile sizes that need to be profiled to find the best one. - [Fused multi-head attention kernel](/examples/41_fused_multi_head_attention). It has two variants: one for fixed sequence lengths, and another for variable sequence lengths. - [Dual GEMM](/examples/45_dual_gemm). It can run two GEMMs that share the same left input matrix in one kernel. - Hopper improves [double precision matrix multiplication](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) by 2x compared to Ampere at iso-clocks. It is supported since CUDA 11.8. - [BLAS3](/test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu) functions with Hoppers new double precision matrix multiplication instructions. - [ELL Block Sparse GEMM](/examples/43_ell_block_sparse_gemm). - [Optimized Group Conv](/examples/42_ampere_tensorop_group_conv). - [Optimized DepthWise Conv](/examples/46_depthwise_simt_conv2dfprop). - [Scripts](/examples/44_multi_gemm_ir_and_codegen) to fuse multiple back-to-back GEMM. - [FP8 data type definition](/include/cutlass/float8.h) and [conversion routines](/include/cutlass/numeric_conversion.h#L1274-2115). - Updates and bugfixes from the community (thanks!). Big shout out to Meta's [xFormers](https://github.com/facebookresearch/xformers). - **Deprecation announcement:** CUTLASS plans to deprecate the following in the next major release: - Maxwell and Pascal GPU architectures - Ubuntu 16.04 - CUDA 10.2 **See the [CHANGELOG](CHANGELOG.md) for a detailed listing of releases and updates.** # Performance <p align="center"><img src=/media/images/cutlass-2.8-gemm-performance.png></p> CUTLASS primitives are very efficient. When used to construct device-wide GEMM kernels, they exhibit performance comparable to cuBLAS for scalar GEMM computations. The above figure shows CUTLASS performance relative to cuBLAS for large matrix dimensions on an [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/), an [NVIDIA A2](https://www.nvidia.com/en-us/data-center/products/a2/), an [NVIDIA TitanV](https://www.nvidia.com/en-us/titan/titan-v/), and an [NVIDIA GeForce 2080 Ti](https://www.nvidia.com/en-us/geforce/graphics-cards/rtx-2080-ti/) compiled with the [CUDA 11.5 Toolkit](https://developer.nvidia.com/cuda-downloads). Tensor Core operations are implemented using CUDA's [mma instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma). <p align="center"><img src=/media/images/cutlass-2.9-implicit-gemm-performance.png></p> When using CUTLASS building blocks to construct device-wide implicit gemm (Fprop, Dgrad, and Wgrad) kernels, CUTLASS performance is also comparable to cuDNN when running Resnet-50 layers on an [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) as shown in the above figure. Tensor Core operations are still implemented using CUDA's [mma instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma). # Compatibility CUTLASS requires a C++11 host compiler and performs best when built with the [**CUDA 11.8 Toolkit**](https://developer.nvidia.com/cuda-toolkit). It is also compatible with CUDA 11.x. ## Operating Systems We have tested the following environments. |**Operating System** | **Compiler** | |-----------------|----------| | Windows 10 | Microsoft Visual Studio 2015| | | Microsoft Visual Studio 2017| | | Microsoft Visual Studio 2019| | Ubuntu 18.04 | GCC 7.5.0 | | Ubuntu 20.04 | GCC 10.3.0 | | Ubuntu 22.04 | GCC 11.2.0 | Additionally, CUTLASS may be built with clang. See [these instructions](media/docs/quickstart.md#clang) for more details. ## Hardware CUTLASS runs successfully on the following NVIDIA GPUs, and it is expected to be efficient on any Volta-, Turing-, or NVIDIA Ampere- architecture NVIDIA GPU. |**GPU**|**CUDA Compute Capability**|**Minimum CUDA Toolkit**|**Minimum CUDA Toolkit Enabling Native Tensor Cores**| |---|---|---|---| |NVIDIA Tesla V100|7.0|9.2|10.1| |NVIDIA TitanV|7.0|9.2|10.1| |NVIDIA GeForce RTX 2080 TI, 2080, 2070|7.5|10.0|10.2| |NVIDIA Tesla T4|7.5|10.0|10.2| |NVIDIA A100|8.0|11.0|11.0| |NVIDIA A10 |8.6|11.1|11.1| |NVIDIA GeForce 3090|8.6|11.1|11.1| |NVIDIA H100 PCIe|9.0|11.8|Double-precision: 11.8| # Documentation CUTLASS is described in the following documents and the accompanying [Doxygen documentation](https://nvidia.github.io/cutlass). - [Quick Start Guide](/media/docs/quickstart.md) - build and run CUTLASS - [Functionality](/media/docs/functionality.md) - summarizes functionality available in CUTLASS - [Efficient GEMM in CUDA](media/docs/efficient_gemm.md) - describes how GEMM kernels may be implemented efficiently in CUDA - [GEMM API](media/docs/gemm_api.md) - describes the CUTLASS GEMM model and C++ template concepts - [Implicit GEMM Convolution](media/docs/implicit_gemm_convolution.md) - describes 2-D and 3-D convolution in CUTLASS - [Code Organization](media/docs/code_organization.md) - describes the organization and contents of the CUTLASS project - [Terminology](media/docs/terminology.md) - describes terms used in the code - [Programming Guidelines](media/docs/programming_guidelines.md) - guidelines for writing efficient modern CUDA C++ - [Fundamental types](media/docs/fundamental_types.md) - describes basic C++ classes used in CUTLASS to represent numeric quantities and arrays - [Layouts](media/docs/layout.md) - describes layouts of matrices and tensors in memory - [Tile Iterators](media/docs/tile_iterator_concept.md) - describes C++ concepts for iterating over tiles of matrices in memory - [CUTLASS Profiler](media/docs/profiler.md) - command-line driven profiling application - [CUTLASS Utilities](media/docs/utilities.md) - additional templates used to facilate rapid development # Resources We have also described the structure of an efficient GEMM in our talk at the [GPU Technology Conference 2018](http://on-demand.gputechconf.com/gtc/2018/presentation/s8854-cutlass-software-primitives-for-dense-linear-algebra-at-all-levels-and-scales-within-cuda.pdf). - [CUTLASS: Software Primitives for Dense Linear Algebra at All Levels and Scales within CUDA](https://www.nvidia.com/en-us/on-demand/session/gtcsiliconvalley2018-s8854/) - [Developing CUDA Kernels to Push Tensor Cores to the Absolute Limit on NVIDIA A100](https://www.nvidia.com/en-us/on-demand/session/gtcsj20-s21745/) - [Accelerating Convolution with Tensor Cores in CUTLASS](https://www.nvidia.com/en-us/on-demand/session/gtcspring21-s31883/) - [Accelerating Backward Data Gradient by Increasing Tensor Core Utilization in CUTLASS](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41996/) - [CUTLASS: Python API, Enhancements, and NVIDIA Hopper](https://www.nvidia.com/en-us/on-demand/session/gtcfall22-a41131/) # Building CUTLASS CUTLASS is a header-only template library and does not need to be built to be used by other projects. Client applications should target CUTLASS's `include/` directory in their include paths. CUTLASS unit tests, examples, and utilities can be build with CMake starting version 3.12. Make sure the `CUDACXX` environment variable points to NVCC in the CUDA Toolkit installed on your system. ```bash $ export CUDACXX=${CUDA_INSTALL_PATH}/bin/nvcc ``` Create a build directory within the CUTLASS project, then run CMake. By default CUTLASS will build kernels for CUDA architecture versions 5.0, 6.0, 6.1, 7.0, 7.5, 8.0, and 8.6. To reduce compile time you can specify the architectures to build CUTLASS for by changing the CMake configuration setting `CUTLASS_NVCC_ARCHS`. ```bash $ mkdir build && cd build $ cmake .. -DCUTLASS_NVCC_ARCHS=80 # compiles for NVIDIA's Ampere Architecture ``` From the `build/` directory, compile and run the CUTLASS unit tests by building the target `test_unit` with make. The unit tests are organized as several binaries mirroring the top-level namespaces of CUTLASS, and they may be executed in parallel via make's `-j` command line argument. ```bash $ make test_unit -j ... ... ... [----------] Global test environment tear-down [==========] 946 tests from 57 test cases ran. (10812 ms total) [ PASSED ] 946 tests. ``` All tests should pass on supported platforms, though the exact number of tests may vary over time. # Project Structure CUTLASS is arranged as a header-only library along with Utilities, Tools, Examples, and unit tests. [Doxygen documentation](https://nvidia.github.io/cutlass) provides a complete list of files, classes, and template concepts defined in the CUTLASS project. A detailed explanation of the source code organization may be found in the [CUTLASS documentation](media/docs/code_organization.md), but several main components are summarized below. ## CUTLASS Template Library ``` include/ # client applications should target this directory in their build's include paths cutlass/ # CUDA Templates for Linear Algebra Subroutines and Solvers - headers only arch/ # direct exposure of architecture features (including instruction-level GEMMs) conv/ # code specialized for convolution epilogue/ # code specialized for the epilogue of gemm/convolution gemm/ # code specialized for general matrix product computations layout/ # layout definitions for matrices, tensors, and other mathematical objects in memory platform/ # CUDA-capable Standard Library components reduction/ # bandwidth-limited reduction kernels that do not fit the "gemm" model thread/ # simt code that can be performed within a CUDA thread transform/ # code specialized for layout, type, and domain transformations * # core vocabulary types, containers, and basic numeric operations ``` ### CUTLASS SDK Examples [CUTLASS SDK examples](/examples) apply CUTLASS templates to implement basic computations. ### Tools ``` tools/ library/ # CUTLASS Instance Library - contains instantiations of all supported CUTLASS templates include/ cutlass/ library/ profiler/ # CUTLASS Profiler - command-line utility for executing operations in the # CUTLASS Library util/ # CUTLASS Utilities - contains numerous helper classes for include/ # manging tensors in device memory, reference cutlass/ # implementations for GEMM, random initialization util/ # of tensors, and I/O. ``` ### Test The `test/unit/` directory consist of unit tests implemented with Google Test that demonstrate basic usage of Core API components and complete tests of the CUTLASS GEMM computations. Instructions for building and running the Unit tests are described in the [Quickstart guide](media/docs/quickstart.md). # Performance Profiling The `tools/profiler/` directory contains a command-line utility for launching each of the GEMM kernels. It can be built as follows: ```bash $ make cutlass_profiler -j16 ``` ## Building all GEMM and Convolution kernels (_long_ build times) By default, only one tile size is instantiated for each data type, math instruction, and layout. To instantiate all, set the following environment variable when running CMake from an empty `build/` directory. Beware, this results in *thousands* of kernels and long build times. ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS=75 -DCUTLASS_LIBRARY_KERNELS=all ... $ make cutlass_profiler -j16 ``` ## Building a subset of GEMM and Convolution kernels (_reduced_ build times) To compile strictly one kernel or a small set of kernels, a comma-delimited list of kernel names with wildcard characters may be used to reduce the set of kernels. The following examples show building exactly one or a subset of kernels for NVIDIA Ampere and Turing architecture: ### Building a subset Tensor Core GEMM kernels To compile a subset of Tensor Core GEMM kernels with FP32 accumulation and FP16 input targetting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_s*gemm_f16_*_nt_align8 ... $ make cutlass_profiler -j16 ``` Example command line for profiling a subset of Tensor Core GEMM kernels is as follows: ```bash ./tools/profiler/cutlass_profiler --kernels=cutlass_tensorop_s*gemm_f16_*_nt_align8 --m=3456 --n=4096 --k=4096 ... ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_tensorop_s1688gemm_f16_256x128_32x2_nt_align8 Status: Success Verification: ON Disposition: Passed reference_device: Passed cuBLAS: Passed Arguments: --gemm_kind=universal --m=3456 --n=4096 --k=4096 --A=f16:column --B=f16:row --C=f32:column --alpha=1 \ --beta=0 --split_k_slices=1 --batch_count=1 --op_class=tensorop --accum=f32 --cta_m=256 --cta_n=128 \ --cta_k=32 --stages=2 --warps_m=4 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=8 --min_cc=75 \ --max_cc=1024 Bytes: 118489088 bytes FLOPs: 115992428544 flops Runtime: 1.55948 ms Memory: 70.7616 GiB/s Math: 74378.8 GFLOP/s ============================= ... ``` ### Building one CUDA Core GEMM kernel To compile one SGEMM kernel targetting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_simt_sgemm_128x128_8x2_nn_align1 ... $ make cutlass_profiler -j16 ``` Example command line for profiling single SGEMM CUDA kernel is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=3456 --n=4096 --k=4096 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: gemm Operation: cutlass_simt_sgemm_128x128_8x2_nn_align1 Status: Success Verification: ON Disposition: Passed cuBLAS: Passed Arguments: --m=3456 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \ --batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 180355072 bytes FLOPs: 115992428544 flops Runtime: 6.73655 ms Memory: 24.934 GiB/s Math: 17218.4 GFLOP/s ============================= ``` ### Building a subset of Tensor Core Convolution kernels To compile a subset of Tensor core convolution kernels implementing forward propagation (fprop) with FP32 accumulation and FP16 input targetting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_s*fprop_optimized_f16 ... $ make cutlass_profiler -j16 ``` Example command line for profiling a subset of Tensor Core convolution kernels is as follows: ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_tensorop_s*fprop_optimized_f16 --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ... ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_tensorop_s16816fprop_optimized_f16_128x128_32x5_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f16:nhwc --Filter=f16:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=tensorop --accum=f32 --cta_m=128 --cta_n=128 --cta_k=32 --stages=5 \ --warps_m=2 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024 Bytes: 1130659840 bytes FLOPs: 118482796544 flops Runtime: 0.711496 ms Memory: 1479.99 GiB/s Math: 166526 GFLOP/s ============================= ... ``` ### Building one Convolution CUDA kernel To compile and run one CUDA Core convolution kernel implementing forward propagation (fprop) with F32 accumulation and FP32 input targetting NVIDIA Ampere and Turing architecture, use the below cmake command line: ```bash $ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_simt_sfprop_optimized_128x128_8x2_nhwc ... $ make cutlass_profiler -j16 ``` Example command line for profiling one CUDA Core convolution kernel: ```bash $ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sfprop_optimized_128x128_8x2_nhwc --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 ============================= Problem ID: 1 Provider: CUTLASS OperationKind: conv2d Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc Status: Success Verification: ON Disposition: Passed reference_device: Passed Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \ --stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \ --conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \ --eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \ --warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024 Bytes: 2055798784 bytes FLOPs: 118482796544 flops Runtime: 7.34266 ms Memory: 260.752 GiB/s Math: 16136.2 GFLOP/s ============================= ``` ## More Details on Compiling CUTLASS Kernels and CUTLASS Profiler - Please follow the links for more CMake examples on selectively compiling CUTLASS kernels: - [GEMM CMake Examples](media/docs/quickstart.md#gemm-cmake-examples) - [Implicit GEMM conovlution CMake Examples](media/docs/quickstart.md#convolution-cmake-examples) - [Further details about the CUTLASS Profiler are described here.](media/docs/profiler.md) # About CUTLASS is released by NVIDIA Corporation as Open Source software under the [3-clause "New" BSD license](LICENSE.txt). # Contributors The official list of CUTLASS developers and contributors is available here: [CONTRIBUTORS](CONTRIBUTORS.md). # Copyright Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
23,092
Markdown
44.015594
199
0.712151
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/host_reorder.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief reorder data from the host side */ #pragma once #include "cutlass/coord.h" #include "cutlass/util/host_tensor.h" #include "cutlass/tensor_view.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/gemm.h" namespace cutlass { /// This is needed for the interleaved integer tensor core kernels. The purpose /// is to use skip the shared memory part in the epilogue. template <int Interleaved, typename Element, typename Layout> void reorder_column(TensorRef<Element, Layout> dest, TensorRef<Element, Layout> src, cutlass::gemm::GemmCoord problem_size) { const int InstructionShapeCol = 8; // 4 threads per Quad const int ElementsPerThread = InstructionShapeCol / 4; // 4 threads per Quad const int ReorderedElementsPerThread = Interleaved / 4; for (int n = 0; n < problem_size.n(); n++) { for (int k = 0; k < problem_size.k(); k++) { dest.at({k, (n / Interleaved) * Interleaved + ((n % ReorderedElementsPerThread) / ElementsPerThread) * InstructionShapeCol + ((n % Interleaved) / ReorderedElementsPerThread) * ElementsPerThread + (n % ElementsPerThread)}) = src.at({k, n}); } } } template <int ColumnInterleaved, int LayoutInterleaved = ColumnInterleaved, typename Element, typename Layout> void reorder_convK(TensorRef<Element, Layout> dest, TensorRef<Element, Layout> src, cutlass::gemm::GemmCoord problem_size) { TensorRef<Element, layout::RowMajorInterleaved<LayoutInterleaved>> mappedDest(dest.data(), dest.stride(0)); TensorRef<Element, layout::RowMajorInterleaved<LayoutInterleaved>> mappedSrc(src.data(), src.stride(0)); reorder_column<ColumnInterleaved>( mappedDest, mappedSrc, problem_size); } /// This is needed for the sparse tensor core kernels. The purpose /// is to use ldmatrix to load from shared memory to the register file. template <typename Element, typename LayoutDest, typename LayoutSrc> void reorder_meta(TensorRef<Element, LayoutDest> dest, TensorRef<Element, LayoutSrc> src, cutlass::gemm::GemmCoord problem_size) { for (int m = 0; m < problem_size.m(); m++) { for (int k = 0; k < problem_size.k(); k++) { // First reorder the rows. int group = (sizeof(Element) == 2) ? 32 : 16; int interweave = (sizeof(Element) == 2) ? 4 : 2; int dest_row = m / group * group + (m % 8) * interweave + (m % group) / 8; int dest_col = k; // Next swizzle the 2x2 blocks from Z to N. if (((dest_row % 2) == 0) && ((dest_col % 2) == 1)) { ++dest_row; --dest_col; } else if (((dest_row % 2) == 1) && ((dest_col % 2) == 0)) { --dest_row; ++dest_col; } dest.at({dest_row, dest_col}) = src.at({m, k}); } } } } // namespace cutlass
4,821
C
42.053571
111
0.641361
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/exceptions.h
/****************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * \brief C++ exception semantics for CUDA error codes */ #include <cuda_runtime.h> #include <iosfwd> #include <stdexcept> #include "cutlass/platform/platform.h" namespace cutlass { /// C++ exception wrapper for CUDA \p cudaError_t class cuda_exception : public std::exception { public: /// Constructor cuda_exception(const char* msg = "", cudaError_t err = cudaErrorUnknown) : msg(msg), err(err) {} /// Returns the underlying CUDA \p cudaError_t cudaError_t cudaError() const { return err; } protected: /// Explanatory string const char* msg; /// Underlying CUDA \p cudaError_t cudaError_t err; }; /// Writes a cuda_exception instance to an output stream inline std::ostream& operator<<(std::ostream& out, cuda_exception const& e) { return out << e.what() << ": " << cudaGetErrorString(e.cudaError()); } } // namespace cutlass
2,674
C
37.214285
98
0.698953
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/tensor_view_io.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/core_io.h" #include "cutlass/tensor_view.h" #include "cutlass/tensor_view_planar_complex.h" #include "cutlass/complex.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Helper to write the least significant rank of a TensorView template < typename Element, typename Layout > inline std::ostream & TensorView_WriteLeastSignificantRank( std::ostream& out, TensorView<Element, Layout> const& view, Coord<Layout::kRank> const &start_coord, int rank, std::streamsize width) { for (int idx = 0; idx < view.extent(rank); ++idx) { Coord<Layout::kRank> coord(start_coord); coord[rank] = idx; if (idx) { out.width(0); out << ", "; } if (idx || coord) { out.width(width); } out << ScalarIO<Element>(view.at(coord)); } return out; } /// Helper to write a rank of a TensorView template < typename Element, typename Layout > inline std::ostream & TensorView_WriteRank( std::ostream& out, TensorView<Element, Layout> const& view, Coord<Layout::kRank> const &start_coord, int rank, std::streamsize width) { // If called on the least significant rank, write the result as a row if (rank + 1 == Layout::kRank) { return TensorView_WriteLeastSignificantRank(out, view, start_coord, rank, width); } // Otherwise, write a sequence of rows and newlines for (int idx = 0; idx < view.extent(rank); ++idx) { Coord<Layout::kRank> coord(start_coord); coord[rank] = idx; if (rank + 2 == Layout::kRank) { // Write least significant ranks asa matrix with rows delimited by "\n" out << (idx ? ",\n" : ""); TensorView_WriteLeastSignificantRank(out, view, coord, rank + 1, width); } else { // Higher ranks are separated by newlines out << (idx ? ",\n\n" : ""); TensorView_WriteRank(out, view, coord, rank + 1, width); } } return out; } /// Helper to write the least significant rank of a TensorView template < typename Element, typename Layout > inline std::ostream & TensorViewPlanarComplex_WriteLeastSignificantRank( std::ostream& out, TensorViewPlanarComplex<Element, Layout> const& view, Coord<Layout::kRank> const &start_coord, int rank, std::streamsize width) { for (int idx = 0; idx < view.extent(rank); ++idx) { Coord<Layout::kRank> coord(start_coord); coord[rank] = idx; if (idx) { out.width(0); out << ", "; } if (idx || coord) { out.width(width); } complex<Element> x = view.at(coord); out << x; } return out; } /// Helper to write a rank of a TensorView template < typename Element, typename Layout > inline std::ostream & TensorViewPlanarComplex_WriteRank( std::ostream& out, TensorViewPlanarComplex<Element, Layout> const& view, Coord<Layout::kRank> const &start_coord, int rank, std::streamsize width) { // If called on the least significant rank, write the result as a row if (rank + 1 == Layout::kRank) { return TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, start_coord, rank, width); } // Otherwise, write a sequence of rows and newlines for (int idx = 0; idx < view.extent(rank); ++idx) { Coord<Layout::kRank> coord(start_coord); coord[rank] = idx; if (rank + 2 == Layout::kRank) { // Write least significant ranks asa matrix with rows delimited by ";\n" out << (idx ? ";\n" : ""); TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, coord, rank + 1, width); } else { // Higher ranks are separated by newlines out << (idx ? "\n" : ""); TensorViewPlanarComplex_WriteRank(out, view, coord, rank + 1, width); } } return out; } } // namespace detail /////////////////////////////////////////////////////////////////////////////////////////////////// /// Prints human-readable representation of a TensorView to an ostream template < typename Element, typename Layout > inline std::ostream& TensorViewWrite( std::ostream& out, TensorView<Element, Layout> const& view) { // Prints a TensorView according to the following conventions: // - least significant rank is printed as rows separated by ";\n" // - all greater ranks are delimited with newlines // // The result is effectively a whitespace-delimited series of 2D matrices. return detail::TensorView_WriteRank(out, view, Coord<Layout::kRank>(), 0, out.width()); } /// Prints human-readable representation of a TensorView to an ostream template < typename Element, typename Layout > inline std::ostream& operator<<( std::ostream& out, TensorView<Element, Layout> const& view) { // Prints a TensorView according to the following conventions: // - least significant rank is printed as rows separated by ";\n" // - all greater ranks are delimited with newlines // // The result is effectively a whitespace-delimited series of 2D matrices. return TensorViewWrite(out, view); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Prints human-readable representation of a TensorView to an ostream template < typename Element, typename Layout > inline std::ostream& TensorViewWrite( std::ostream& out, TensorViewPlanarComplex<Element, Layout> const& view) { // Prints a TensorView according to the following conventions: // - least significant rank is printed as rows separated by ";\n" // - all greater ranks are delimited with newlines // // The result is effectively a whitespace-delimited series of 2D matrices. return detail::TensorViewPlanarComplex_WriteRank(out, view, Coord<Layout::kRank>(), 0, out.width()); } /// Prints human-readable representation of a TensorView to an ostream template < typename Element, typename Layout > inline std::ostream& operator<<( std::ostream& out, TensorViewPlanarComplex<Element, Layout> const& view) { // Prints a TensorView according to the following conventions: // - least significant rank is printed as rows separated by ";\n" // - all greater ranks are delimited with newlines // // The result is effectively a whitespace-delimited series of 2D matrices. return TensorViewWrite(out, view); } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
8,285
C
30.505703
102
0.646832
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/distribution.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once /*! \file \brief This header contains a class to parametrize a statistical distribution function. */ #include <ostream> namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Distribution type struct Distribution { /// Variant types enum Kind { Invalid, Uniform, Gaussian, Identity, Sequential, AllZeros, AllOnes }; /// Distribution state union { /// Uniform distribution struct { double min; double max; } uniform; /// Gaussian distribution struct { double mean; double stddev; } gaussian; /// Elements are linear combination of row and column index struct { double start; double delta; } sequential; }; /// Active variant kind Kind kind; /// Random values are cast to integer after scaling by this power of two int int_scale; // // Methods // Distribution() : kind(Invalid), int_scale(0) {} /// Configures distribution as uniform random Distribution &set_uniform(double _min, double _max, int _int_scale = 0) { kind = Uniform; uniform.min = _min; uniform.max = _max; int_scale = _int_scale; return *this; } /// Configures distribution as Gaussian distribution Distribution &set_gaussian(double _mean, double _stddev, int _int_scale = 0) { kind = Gaussian; gaussian.mean = _mean; gaussian.stddev = _stddev; int_scale = _int_scale; return *this; } /// Sets identity Distribution &set_identity() { kind = Identity; return *this; } /// Sets sequential Distribution &set_sequential(double start, double delta, int _int_scale = 0) { kind = Sequential; sequential.start = start; sequential.delta = delta; int_scale = _int_scale; return *this; } }; } // namespace cutlass //////////////////////////////////////////////////////////////////////////////////////////////////// /// Prints a Distribution to ostream inline std::ostream &operator<<(std::ostream &out, cutlass::Distribution const &dist) { switch (dist.kind) { case cutlass::Distribution::Uniform: out << "uniform, min: " << dist.uniform.min << ", max: " << dist.uniform.max; break; case cutlass::Distribution::Gaussian: out << "gaussian, mean: " << dist.gaussian.mean << ", stddev: " << dist.gaussian.stddev; break; case cutlass::Distribution::Identity: out << "identity"; break; case cutlass::Distribution::Sequential: out << "sequential"; break; default: out << "unknown"; } out << ", int_scale: " << dist.int_scale; return out; } ////////////////////////////////////////////////////////////////////////////////////////////////////
4,597
C
30.930555
100
0.611486
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h
/****************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * \brief cuda kernels to do avg/max pooling on a device memory tensor with NHWC layout. */ #include "cutlass/cutlass.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_coord.h" #include "cutlass/tensor_ref.h" #include "device_utils.h" #include <float.h> namespace cutlass { /** \brief interface to do avg/max pooling on a device memory tensor with NHWC layout. * \tparam T: data type */ template <typename T> void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord filter_tensor_size, cutlass::Tensor4DCoord output_tensor_size, cutlass::MatrixCoord padding, cutlass::MatrixCoord stride, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, int poolingType, //0 for avg pooling ; 1 for max pooling cudaStream_t stream); /** get the output size of pooling */ inline int getOutputSize(int H_W, int padding, int kernel_size, int stride) { return (H_W + 2 * padding - kernel_size) / stride + 1; } /** * input is [N, H, W, C] * assume stride == kernel_size * output_h = (H + 2*padding_H - kernel_H)/stride_H * output_w = (W + 2*padding_W - kernel_W)/stride_W * output is [N, output_h, output_w, C] * grid(N, output_h, output_w) * block(min(C, 256)) : * each block deals with C elements of output when each thread deals with ((C + 255)/256 element of output) */ template<typename T, bool IS_AVG_POOLING> __global__ void pooling_nhwc_element1_kernel(T* output, const T* input, const int N, const int H, const int W, const int C, const int output_H, const int output_W, const int kernel_H, const int kernel_W, const int stride_H, const int stride_W, const int padding_H, const int padding_W) { const int tid = threadIdx.x; const int n_idx = blockIdx.x; const int output_h_idx = blockIdx.y; const int output_w_idx = blockIdx.z; int h_start_idx = output_h_idx * stride_H - padding_H; int h_end_idx = h_start_idx + kernel_H; h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx; h_end_idx = h_end_idx > H ? H : h_end_idx; int w_start_idx = output_w_idx * stride_W - padding_W; int w_end_idx = w_start_idx + kernel_W; w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx; w_end_idx = w_end_idx > W ? W : w_end_idx; input += n_idx * H * W * C; output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C; const int kernel_size2 = kernel_H * kernel_W; for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) { float pooling; if (IS_AVG_POOLING){ pooling = 0.0f; } else{ pooling = -FLT_MAX; } for (int h = h_start_idx; h < h_end_idx; h++) { for (int w = w_start_idx; w < w_end_idx; w++) { const int idx = (h * W + w) * C; const float tmp = static_cast<float>(input[idx + c_idx]); if (IS_AVG_POOLING){ pooling = pooling + tmp; } else{ pooling = pooling > tmp ? pooling : tmp; } } } T output_val; if (IS_AVG_POOLING){ output_val = T(pooling/kernel_size2); } else{ output_val = T(pooling); } output[c_idx] = output_val; } } template<typename T2, typename T, bool IS_AVG_POOLING> __global__ void pooling_nhwc_element2_kernel(T2* output, const T2* input, const int N, const int H, const int W, const int C, const int output_H, const int output_W, const int kernel_H, const int kernel_W, const int stride_H, const int stride_W, const int padding_H, const int padding_W) { const int tid = threadIdx.x; const int n_idx = blockIdx.x; const int output_h_idx = blockIdx.y; const int output_w_idx = blockIdx.z; int h_start_idx = output_h_idx * stride_H - padding_H; int h_end_idx = h_start_idx + kernel_H; h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx; h_end_idx = h_end_idx > H ? H : h_end_idx; int w_start_idx = output_w_idx * stride_W - padding_W; int w_end_idx = w_start_idx + kernel_W; w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx; w_end_idx = w_end_idx > W ? W : w_end_idx; input += n_idx * H * W * C; output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C; const int kernel_size2 = kernel_H * kernel_W; for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) { float2 pooling; if (IS_AVG_POOLING) { pooling = {0.0f, 0.0f}; } else { pooling = {-FLT_MAX, -FLT_MAX}; } for (int h = h_start_idx; h < h_end_idx; h++) { for (int w = w_start_idx; w < w_end_idx; w++) { const int idx = (h * W + w) * C; const T2 tmp = input[idx + c_idx]; const float2 tmp_flt2 = {static_cast<float>(tmp.x), static_cast<float>(tmp.y)}; if (IS_AVG_POOLING) { pooling.x += tmp_flt2.x; pooling.y += tmp_flt2.y; } else { pooling.x = pooling.x > tmp_flt2.x ? pooling.x : tmp_flt2.x; pooling.y = pooling.y > tmp_flt2.y ? pooling.y : tmp_flt2.y; } } } T2 output_val; if (IS_AVG_POOLING) { output_val.x = T(pooling.x/kernel_size2); output_val.y = T(pooling.y/kernel_size2); } else { output_val.x = T(pooling.x); output_val.y = T(pooling.y); } output[c_idx] = output_val; } } /** * output [N, 1, 1, C] * input [N, H, W, C] * grid(C, N) * block(block_size) -- each block deals with H*W/block_size elements; */ template<typename T, bool IS_AVG_POOLING> __global__ void pooling_nxhTo1x1_element1_kernel( T* output, const T* input, const int N, const int HW, const int C) { const int c_idx = blockIdx.x; const int n_idx = blockIdx.y; float pooling[1]; if (IS_AVG_POOLING) { pooling[0] = 0.0f; } else { pooling[0] = -FLT_MAX; } const size_t input_offset = n_idx * HW * C + c_idx; input += input_offset; const size_t output_offset = n_idx * C + c_idx; output += output_offset; int tid = threadIdx.x; for (int index = tid; index < HW; index += blockDim.x) { float val = static_cast<float>(input[index * C]); if (IS_AVG_POOLING) { pooling[0] += val; } else { pooling[0] = pooling[0] > val ? pooling[0] : val; } } if (blockDim.x <= 32) { if (IS_AVG_POOLING) { warpReduceSum<float, 1>(pooling); } else { warpReduceMax<float, 1>(pooling); } } else { if (IS_AVG_POOLING) { blockReduceSum<float, 1>(pooling); } else { blockReduceMax<float, 1>(pooling); } } __syncthreads(); if (threadIdx.x == 0) { T output_val; if (IS_AVG_POOLING) { output_val = T(pooling[0] / HW); } else { output_val = T(pooling[0]); } output[0] = output_val; } } /** * output [N, 1, 1, C] * input [N, H, W, C] * grid(C/2, N) * block(block_size) -- each thread deals with H*W/block_size * 2 elements; */ template<typename T2, typename T, bool IS_AVG_POOLING> __global__ void pooling_nxhTo1x1_element2_kernel( T2* output, const T2* input, const int N, const int HW, const int C) { const int c_idx = blockIdx.x; const int n_idx = blockIdx.y; float pooling[2]; if (IS_AVG_POOLING) { pooling[0] = pooling[1] = 0.0f; } else { pooling[0] = pooling[1] = -FLT_MAX; } const int C_2 = C / 2; const size_t input_offset = n_idx * HW * C_2 + c_idx; input += input_offset; const size_t output_offset = n_idx * C_2 + c_idx; output += output_offset; int tid = threadIdx.x; for (int index = tid; index < HW; index += blockDim.x) { T2 val = input[index * C_2]; float2 val_flt2 = {static_cast<float>(val.x), static_cast<float>(val.y)}; if (IS_AVG_POOLING) { pooling[0] += val_flt2.x; pooling[1] += val_flt2.y; } else { pooling[0] = pooling[0] > val_flt2.x ? pooling[0] : val_flt2.x; pooling[1] = pooling[1] > val_flt2.y ? pooling[1] : val_flt2.y; } } if (blockDim.x <= 32) { if (IS_AVG_POOLING) { warpReduceSum<float, 2>(pooling); } else { warpReduceMax<float, 2>(pooling); } } else { if (IS_AVG_POOLING) { blockReduceSum<float, 2>(pooling); } else { blockReduceMax<float, 2>(pooling); } } __syncthreads(); if (threadIdx.x == 0) { T2 output_val; if (IS_AVG_POOLING) { output_val.x = T(pooling[0] / HW); output_val.y = T(pooling[1] / HW); } else { output_val.x = T(pooling[0]); output_val.y = T(pooling[1]); } output[0] = output_val; } } template <typename T> void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord filter_tensor_size, cutlass::Tensor4DCoord output_tensor_size, cutlass::Tensor4DCoord padding, cutlass::MatrixCoord stride, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, int poolingType, //0 for avg pooling ; 1 for max pooling cudaStream_t stream) { assert(input_tensor_size.n() == output_tensor_size.n() && input_tensor_size.c() == output_tensor_size.c()); assert(filter_tensor_size.h() == stride.row() && filter_tensor_size.w() == stride.column()); const int N = input_tensor_size.n(); const int H = input_tensor_size.h(); const int W = input_tensor_size.w(); const int C = input_tensor_size.c(); const int padding_H = padding.h(); const int padding_W = padding.w(); const int kernel_H = filter_tensor_size.h(); const int kernel_W = filter_tensor_size.w(); const int stride_H = stride.row(); const int stride_W = stride.column(); const int output_H = getOutputSize(H, padding_H, kernel_H, stride_H); const int output_W = getOutputSize(W, padding_W, kernel_W, stride_W); assert(output_tensor_size.h() == output_H && output_tensor_size.w() == output_W); if (C % 2 != 0) { if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) { dim3 grid(C, N); dim3 block(256); if (H*W < block.x){ block.x = (H*W + 31)/32*32; } if (poolingType == 0) { pooling_nxhTo1x1_element1_kernel<T, true><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H*W, C); } // if (poolingType == 0) else { pooling_nxhTo1x1_element1_kernel<T, false><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H*W, C); } } // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) else { dim3 grid(N, output_H, output_W); dim3 block(256); if (C < block.x) { block.x = C; } if (poolingType == 0) { pooling_nhwc_element1_kernel<T, true><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H, W, C, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } // if (poolingType == 0) else { pooling_nhwc_element1_kernel<T, false><<<grid, block, 0, stream>>>( ref_output.data(), ref_input.data(), N, H, W, C, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } } } // if (C % 2 != 0)) else { if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) { dim3 grid(C/2, N); dim3 block(256); if (H*W < block.x){ block.x = (H*W + 31)/32*32; } if (poolingType == 0) { if (std::is_same<T, float>::value) { pooling_nxhTo1x1_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H*W, C); } // if (std::is_same<T, float>::value) else { pooling_nxhTo1x1_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H*W, C); } } // if (poolingType == 0) else { if (std::is_same<T, float>::value) { pooling_nxhTo1x1_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H*W, C); } // if (std::is_same<T, float>::value) else { pooling_nxhTo1x1_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H*W, C); } } } // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) else { dim3 grid(N, output_H, output_W); dim3 block(256); if (C/2 < block.x) { block.x = C/2; } if (poolingType == 0) { if (std::is_same<T, float>::value) { pooling_nhwc_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } // if (std::is_same<T, float>::value) else { pooling_nhwc_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } } // if (poolingType == 0) else { if (std::is_same<T, float>::value) { pooling_nhwc_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>( (float2*)(ref_output.data()), (const float2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } // if (std::is_same<T, float>::value) else { pooling_nhwc_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>( (half2*)(ref_output.data()), (const half2*)(ref_input.data()), N, H, W, C/2, output_H, output_W, kernel_H, kernel_W, stride_H, stride_W, padding_H, padding_W); } } } } } } //namespace cutlass
18,653
C
31.329289
107
0.498097
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_dump.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <stdio.h> #include "cutlass/cutlass.h" /** * \file * \brief C++ interface to dump fragments and shared memory contents for * debugging. */ namespace cutlass { namespace debug { /****************************************************************************** * Dump the fragments ******************************************************************************/ /// The first N threads dump the first M elements from their fragments with a /// stride of S elements. If N is not specified, dump the data of all the /// threads. If M is not specified, dump all the elements of the fragment. template <typename Fragment> CUTLASS_DEVICE void dump_fragment(Fragment const& frag, int N = 0, int M = 0, int S = 1) { int total_threads = blockDim.x * blockDim.y * blockDim.z; int block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; if (N < 0 || N > total_threads) { if (thread_id == 0 && block_id == 0) printf("Thread number N = %d should between [1, %d].\n", N, total_threads); __syncthreads(); return; } int total_elements = frag.size(); if (M < 0 || M > total_elements) { if (thread_id == 0 && block_id == 0) printf("Element number M = %d should between [1, %d].\n", M, total_elements); __syncthreads(); return; } if (N == 0) N = total_threads; if (M == 0) M = total_elements; if (S < 1 || S > M) { if (thread_id == 0 && block_id == 0) printf("Stride S = %d should between [1, %d].\n", S, M); __syncthreads(); return; } if (thread_id == 0 && block_id == 0) printf("\n*******************Dumping the fragments*******************\n\n"); CUTLASS_PRAGMA_NO_UNROLL for (int tid = 0; tid < N; ++tid) { if (tid == thread_id) { printf("TB%d W%d T%d: ", block_id, tid / 32, tid & 31); CUTLASS_PRAGMA_NO_UNROLL for (int i = 0; i < M; i += S) { printf("%.0f ", float(typename Fragment::value_type(frag[i]))); } printf("\n"); } __syncthreads(); } if (thread_id == 0 && block_id == 0) printf("\n***********************************************************\n\n"); __syncthreads(); return; } /****************************************************************************** * Dump the shared memory ******************************************************************************/ #define SHMEM_ROW_SIZE 128 /// Dump the shared memory contents. ptr is the begin address, size specifies /// the number of elements that need to be dumped, and S specifies the stride. template <typename Element> CUTLASS_DEVICE void dump_shmem(Element const* ptr, size_t size, int S = 1) { int block_id = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; if (ptr == nullptr) { if (thread_id == 0 && block_id == 0) printf("ptr is null.\n"); __syncthreads(); return; } if (size < 1) { if (thread_id == 0 && block_id == 0) printf("Element size is less than 1\n"); __syncthreads(); return; } int row_elements = SHMEM_ROW_SIZE / sizeof(Element); if (S < 1 || S > row_elements) { if (thread_id == 0 && block_id == 0) printf("Stride S = %d should between [1, %d].\n", S, row_elements); __syncthreads(); return; } __syncthreads(); if (thread_id == 0) printf("\n********Dumping the shared memory of TB %d*******\n\n", block_id); if (thread_id == 0) { for (int i = 0; i < size; i += row_elements) { for (int j = 0; j < row_elements; j += S) { printf("%.0f ", float(ptr[i + j])); } printf("\n"); } } if (thread_id == 0) printf("\n***********************************************************\n\n"); __syncthreads(); return; } } // namespace debug } // namespace cutlass
5,953
C
30.670213
100
0.555014
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_groupnorm.h
/****************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * \brief cuda kernels to do group norm on a device memory tensor with NHWC layout. The tensor will be divided into [N, H, W, G, C'] and then we do normalization on [H, W, C']. */ #include "cutlass/cutlass.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_coord.h" #include "cutlass/tensor_ref.h" #include "device_utils.h" #include <float.h> namespace cutlass { /** \brief interface to do group norm on a device memory tensor with NHWC layout. * \tparam T: data type */ template <typename T> void groupnorm(cutlass::Tensor4DCoord input_size, const int num_groups, const float eps, TensorRef<T, layout::TensorNHWC> ref_output, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_gamma, TensorRef<T, layout::TensorNHWC> ref_beta, cudaStream_t stream); extern __shared__ char groupnorm_shm[]; // For small prod_dim1_to_last_dim/num_groups, to avoid multiple loads from global memory, // we store the input in the shared memory. // grid(num_groups, dim0) // block(BLOCKSIZE) // BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group template<typename TVec, typename T, int T_PER_TVec> __global__ void groupnorm_twopass_store_locally(T* output, const T* input, const T* gamma, const T* beta, int num_groups, int prod_dim1_to_last_dim, int last_dim, const float eps, const int TVecs_PER_THREAD) { const int bid = blockIdx.y; // index of batch const int gid = blockIdx.x; // index of group const int tid = threadIdx.x; // index of thread const int bdimx = blockDim.x; const int s_reduce_elements = prod_dim1_to_last_dim / num_groups; const int v_reduce_elements = s_reduce_elements / T_PER_TVec; const int s_group_stride = last_dim / num_groups; const int v_group_stride = s_group_stride / T_PER_TVec; const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec; const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group; TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group; T* local_val = ((T*)groupnorm_shm) + TVecs_PER_THREAD * T_PER_TVec * tid; float local_sum[1] = {0.0f}; // load from global memory into shared memory #pragma unroll for (int i = 0; i < TVecs_PER_THREAD; i += 1) { const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; const int offset_in_group = ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) / T_PER_TVec; if (current_load_start_idx < s_reduce_elements) { TVec tmp_vec = input_TVec_ptr[offset_in_group]; T* tmp_vec_ptr = (T*)(&tmp_vec); const int local_val_offset = i * T_PER_TVec; #pragma unroll for (int j = 0; j < T_PER_TVec; j++) { float tmp = static_cast<float>(tmp_vec_ptr[j]); local_sum[0] += tmp; local_val[local_val_offset + j] = tmp_vec_ptr[j]; } } } __shared__ float s_mean, s_variance; // reduction for mean if (bdimx <= 32) { warpReduceSum<float, 1>(local_sum); } else { blockReduceSum<float, 1>(local_sum); } if (tid == 0) { s_mean = local_sum[0] / s_reduce_elements; } __syncthreads(); // reduction for std local_sum[0] = 0.0f; #pragma unroll for (int i = 0; i < TVecs_PER_THREAD; i += 1) { const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; if (current_load_start_idx < s_reduce_elements) { const int local_val_offset = i * T_PER_TVec; #pragma unroll for (int j = 0; j < T_PER_TVec; j++) { float tmp = static_cast<float>(local_val[local_val_offset + j]); tmp -= s_mean; local_sum[0] += tmp * tmp; } } } if (bdimx <= 32) { warpReduceSum<float, 1>(local_sum); } else { blockReduceSum<float, 1>(local_sum); } if (tid == 0) { s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps); } __syncthreads(); // normalize const int gamma_offset_of_group = gid * v_group_stride; const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group; const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group; #pragma unroll for (int i = 0; i < TVecs_PER_THREAD; i += 1) { const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; const int offset_in_group = ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) / T_PER_TVec; const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec; const int local_val_offset = i * T_PER_TVec; if (current_load_start_idx < s_reduce_elements) { TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group]; TVec beta_val = beta_TVec_ptr[gamma_offset_in_group]; T* gamma_val_ptr = (T*)(&gamma_val); T* beta_val_ptr = (T*)(&beta_val); TVec tmp_vec; T* tmp_vec_ptr = (T*)(&tmp_vec); #pragma unroll for (int j = 0; j < T_PER_TVec; j++) { float tmp = (static_cast<float>(local_val[local_val_offset + j]) - s_mean) * s_variance * static_cast<float>(gamma_val_ptr[j]) + static_cast<float>(beta_val_ptr[j]); if (sizeof(T) == sizeof(half)) { tmp_vec_ptr[j] = T(__float2half_rn(tmp)); } else { tmp_vec_ptr[j] = T(tmp); } } output_TVec_ptr[offset_in_group] = tmp_vec; } } } // For large prod_dim1_to_last_dim/num_groups, // in which the data cannot be stored locally, // we will load from global memory multiple times, // grid(num_groups, dim0) // block(BLOCKSIZE) // BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group template<typename TVec, typename T, int T_PER_TVec> __global__ void groupnorm_twopass_multiple_load(T* output, const T* input, const T* gamma, const T* beta, int num_groups, int prod_dim1_to_last_dim, int last_dim, const float eps, const int TVecs_PER_THREAD) { const int bid = blockIdx.y; // index of batch const int gid = blockIdx.x; // index of group const int tid = threadIdx.x; // index of thread const int bdimx = blockDim.x; const int s_reduce_elements = prod_dim1_to_last_dim / num_groups; const int v_reduce_elements = s_reduce_elements / T_PER_TVec; const int s_group_stride = last_dim / num_groups; const int v_group_stride = s_group_stride / T_PER_TVec; const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec; const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group; TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group; float local_sum[1] = {0.0f}; #pragma unroll for (int i = 0; i < TVecs_PER_THREAD; i += 1) { const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; if (current_load_start_idx < s_reduce_elements) { const int offset_in_group = ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) / T_PER_TVec; TVec tmp_vec = input_TVec_ptr[offset_in_group]; T* tmp_vec_ptr = (T*)(&tmp_vec); #pragma unroll for (int j = 0; j < T_PER_TVec; j++) { float tmp = static_cast<float>(tmp_vec_ptr[j]); local_sum[0] += tmp; } } } __shared__ float s_mean, s_variance; // reduction for mean if (bdimx <= 32) { warpReduceSum<float, 1>(local_sum); } else { blockReduceSum<float, 1>(local_sum); } if (tid == 0) { s_mean = local_sum[0] / s_reduce_elements; } __syncthreads(); // reduction for std local_sum[0] = 0.0f; #pragma unroll for (int i = 0; i < TVecs_PER_THREAD; i += 1) { const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; if (current_load_start_idx < s_reduce_elements) { const int offset_in_group = ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) / T_PER_TVec; TVec tmp_vec = input_TVec_ptr[offset_in_group]; T* tmp_vec_ptr = (T*)(&tmp_vec); #pragma unroll for (int j = 0; j < T_PER_TVec; j++) { float tmp = static_cast<float>(tmp_vec_ptr[j]); tmp -= s_mean; local_sum[0] += tmp * tmp; } } } if (bdimx <= 32) { warpReduceSum<float, 1>(local_sum); } else { blockReduceSum<float, 1>(local_sum); } if (tid == 0) { s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps); } __syncthreads(); // normalize const int gamma_offset_of_group = gid * v_group_stride; const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group; const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group; #pragma unroll for (int i = 0; i < TVecs_PER_THREAD; i += 1) { const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec; if (current_load_start_idx < s_reduce_elements) { const int offset_in_group = ((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride)) / T_PER_TVec; const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec; TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group]; TVec beta_val = beta_TVec_ptr[gamma_offset_in_group]; T* gamma_val_ptr = (T*)(&gamma_val); T* beta_val_ptr = (T*)(&beta_val); TVec tmp_vec = input_TVec_ptr[offset_in_group]; T* tmp_vec_ptr = (T*)(&tmp_vec); TVec output_tmp_vec; T* output_tmp_vec_ptr = (T*)(&output_tmp_vec); #pragma unroll for (int j = 0; j < T_PER_TVec; j++) { float tmp = (static_cast<float>(tmp_vec_ptr[j]) - s_mean) * s_variance * static_cast<float>(gamma_val_ptr[j]) + static_cast<float>(beta_val_ptr[j]); if (sizeof(T) == sizeof(half)) { output_tmp_vec_ptr[j] = T(__float2half_rn(tmp)); } else { output_tmp_vec_ptr[j] = T(tmp); } } output_TVec_ptr[offset_in_group] = output_tmp_vec; } } } //ref_input & ref_output should be [N, H, W, C] //ref_gamma & ref_beta shoud be [1, 1, 1, C] template <typename T> void groupnorm(cutlass::Tensor4DCoord input_size, const int num_groups, const float eps, TensorRef<T, layout::TensorNHWC> ref_output, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_gamma, TensorRef<T, layout::TensorNHWC> ref_beta, cudaStream_t stream){ const int N = input_size.n(); const int H = input_size.h(); const int W = input_size.w(); const int C = input_size.c(); if (C % num_groups != 0){ printf("[ERROR] C should be a multiple of num_groups.\n"); } T* output = ref_output.data(); const T* input = ref_input.data(); const T* gamma = ref_gamma.data(); const T* beta = ref_beta.data(); const int dim0 = N; const int last_dim = C; const int prod_dim1_to_last_dim = H*W*C; const int s_reduce_elements = prod_dim1_to_last_dim / num_groups; const int s_group_stride = last_dim / num_groups; dim3 grid(num_groups, dim0); int threadblock_size = 32; if (s_group_stride % 2 == 0) { const int T_PER_TVec = 2; while (threadblock_size < 1024) { if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8) break; threadblock_size *= 2; } dim3 block(threadblock_size); const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size; const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T); // for small s_reduce_elements, specific case for H=W=22, C=1280, num_groups=32; // the size of grid & block may have better choice for different cases. // ensure shared memory is smaller than 48KB if (std::is_same<T, float>::value){ if (shm_size < 48 * 1024) { groupnorm_twopass_store_locally<float2, T, T_PER_TVec><<<grid, block, shm_size, stream>>>( output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); } else { groupnorm_twopass_multiple_load<float2, T, T_PER_TVec><<<grid, block, 0, stream>>>( output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); } } else{ if (shm_size < 48 * 1024) { groupnorm_twopass_store_locally<half2, T, T_PER_TVec><<<grid, block, shm_size, stream>>>( output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); } else { groupnorm_twopass_multiple_load<half2, T, T_PER_TVec><<<grid, block, 0, stream>>>( output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); } } } else { const int T_PER_TVec = 1; while (threadblock_size < 1024) { if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8) break; threadblock_size *= 2; } dim3 block(threadblock_size); const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size; const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T); if (shm_size < 48 * 1024) { groupnorm_twopass_store_locally<T, T, T_PER_TVec><<<grid, block, shm_size, stream>>>( output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); } else { groupnorm_twopass_multiple_load<T, T, T_PER_TVec><<<grid, block, 0, stream>>>( output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD); } } } } //namespace cutlass
17,695
C
42.91067
176
0.545465
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/debug.h
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Contains code for debugging cutlass code */ #pragma once #include "device_dump.h" //////////////////////////////////////////////////////////////////////////////////////////////////// /****************************************************************************** * Debug and logging macros ******************************************************************************/ /** * Formats and prints the given message to stdout */ #if !defined(CUDA_LOG) #if !defined(__CUDA_ARCH__) #define CUDA_LOG(format, ...) printf(format, __VA_ARGS__) #else #define CUDA_LOG(format, ...) \ printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \ blockIdx.x, \ blockIdx.y, \ blockIdx.z, \ threadIdx.x, \ threadIdx.y, \ threadIdx.z, \ __VA_ARGS__); #endif #endif /** * Formats and prints the given message to stdout only if DEBUG is defined */ #if !defined(CUDA_LOG_DEBUG) #ifdef DEBUG #define CUDA_LOG_DEBUG(format, ...) CUDA_LOG(format, __VA_ARGS__) #else #define CUDA_LOG_DEBUG(format, ...) #endif #endif /** * \brief The corresponding error message is printed to \p stderr (or \p stdout in device code) * along with the supplied source context. * * \return The CUDA error. */ __host__ CUTLASS_DEVICE cudaError_t cuda_perror_impl(cudaError_t error, const char* expression, const char* filename, int line) { (void)filename; (void)line; if (error) { #if !defined(__CUDA_ARCH__) fprintf( stderr, "CUDA error %d [%s, %d] in expression '%s': %s\n", error, filename, line, expression, cudaGetErrorString(error)); fflush(stderr); #else printf("CUDA error %d [%s, %d] in expression '%s'\n", error, filename, line, expression); #endif } return error; } /** * \brief Perror macro */ #ifndef CUDA_PERROR #define CUDA_PERROR(e) cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__) #endif /** * \brief Perror macro with exit */ #ifndef CUDA_PERROR_EXIT #define CUDA_PERROR_EXIT(e) \ do { if (cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)) { \ exit(1); \ } } while (0) #endif /** * \brief Perror macro only if DEBUG is defined */ #ifndef CUDA_PERROR_DEBUG #ifdef DEBUG #define CUDA_PERROR_DEBUG(e) CUDA_PERROR(e) #else #define CUDA_PERROR_DEBUG(e) (e) #endif #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // A small helper class to dump a type at compile time // Usage:: DumpType<Class>::Class template <typename T> struct DebugType {}; template <typename T> void DebugTypeFunc(T const& t) { T::t; } // A small helper class to dump a compile time constant at compile time // Usage: DumpValue<Class::kConstant>::kConstant template <int Value> struct DebugValue {};
5,104
C
34.451389
129
0.551724