file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVIDIA/warp/warp/tests/test_volume.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# float volume tests
@wp.kernel
def test_volume_lookup_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = p[0] * p[1] * p[2]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
expected = 10.0
i = int(p[0])
j = int(p[1])
k = int(p[2])
expect_eq(wp.volume_lookup_f(volume, i, j, k), expected)
expect_eq(wp.volume_lookup(volume, i, j, k, dtype=wp.float32), expected)
@wp.kernel
def test_volume_sample_closest_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = round(p[0])
j = round(p[1])
k = round(p[2])
expected = i * j * k
if abs(i) > 10.0 or abs(j) > 10.0 or abs(k) > 10.0:
expected = 10.0
expect_eq(wp.volume_sample_f(volume, p, wp.Volume.CLOSEST), expected)
expect_eq(wp.volume_sample(volume, p, wp.Volume.CLOSEST, dtype=wp.float32), expected)
q = wp.volume_index_to_world(volume, p)
q_inv = wp.volume_world_to_index(volume, q)
expect_eq(p, q_inv)
@wp.kernel
def test_volume_sample_linear_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = p[0] * p[1] * p[2]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
expect_near(wp.volume_sample_f(volume, p, wp.Volume.LINEAR), expected, 2.0e-4)
expect_near(wp.volume_sample(volume, p, wp.Volume.LINEAR, dtype=wp.float32), expected, 2.0e-4)
@wp.kernel
def test_volume_sample_grad_linear_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected_val = p[0] * p[1] * p[2]
expected_gx = p[1] * p[2]
expected_gy = p[0] * p[2]
expected_gz = p[0] * p[1]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
grad = wp.vec3(0.0, 0.0, 0.0)
val = wp.volume_sample_grad_f(volume, p, wp.Volume.LINEAR, grad)
expect_near(val, expected_val, 2.0e-4)
expect_near(grad[0], expected_gx, 2.0e-4)
expect_near(grad[1], expected_gy, 2.0e-4)
expect_near(grad[2], expected_gz, 2.0e-4)
val = wp.volume_sample_grad(volume, p, wp.Volume.LINEAR, grad, dtype=wp.float32)
expect_near(val, expected_val, 2.0e-4)
expect_near(grad[0], expected_gx, 2.0e-4)
expect_near(grad[1], expected_gy, 2.0e-4)
expect_near(grad[2], expected_gz, 2.0e-4)
@wp.kernel
def test_volume_sample_local_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
p = points[tid]
values[tid] = wp.volume_sample_f(volume, p, wp.Volume.LINEAR)
@wp.kernel
def test_volume_sample_grad_local_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32), case_num: int
):
tid = wp.tid()
p = points[tid]
grad = wp.vec3(0.0, 0.0, 0.0)
val = wp.volume_sample_grad_f(volume, p, wp.Volume.LINEAR, grad)
if case_num == 0:
values[tid] = val
elif case_num == 1:
values[tid] = grad[0]
elif case_num == 2:
values[tid] = grad[1]
elif case_num == 3:
values[tid] = grad[2]
@wp.kernel
def test_volume_sample_world_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
q = points[tid]
p = wp.volume_world_to_index(volume, q)
values[tid] = wp.volume_sample_f(volume, p, wp.Volume.LINEAR)
@wp.kernel
def test_volume_sample_grad_world_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32), case_num: int
):
tid = wp.tid()
q = points[tid]
p = wp.volume_world_to_index(volume, q)
grad = wp.vec3(0.0, 0.0, 0.0)
val = wp.volume_sample_grad_f(volume, p, wp.Volume.LINEAR, grad)
if case_num == 0:
values[tid] = val
elif case_num == 1:
values[tid] = grad[0]
elif case_num == 2:
values[tid] = grad[1]
elif case_num == 3:
values[tid] = grad[2]
# vec3f volume tests
@wp.kernel
def test_volume_lookup_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = wp.vec3(
p[0] + 2.0 * p[1] + 3.0 * p[2], 4.0 * p[0] + 5.0 * p[1] + 6.0 * p[2], 7.0 * p[0] + 8.0 * p[1] + 9.0 * p[2]
)
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
expected = wp.vec3(10.8, -4.13, 10.26)
i = int(p[0])
j = int(p[1])
k = int(p[2])
expect_eq(wp.volume_lookup_v(volume, i, j, k), expected)
expect_eq(wp.volume_lookup(volume, i, j, k, dtype=wp.vec3), expected)
@wp.kernel
def test_volume_sample_closest_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = round(p[0])
j = round(p[1])
k = round(p[2])
expected = wp.vec3(i + 2.0 * j + 3.0 * k, 4.0 * i + 5.0 * j + 6.0 * k, 7.0 * i + 8.0 * j + 9.0 * k)
if abs(i) > 10.0 or abs(j) > 10.0 or abs(k) > 10.0:
expected = wp.vec3(10.8, -4.13, 10.26)
expect_eq(wp.volume_sample_v(volume, p, wp.Volume.CLOSEST), expected)
expect_eq(wp.volume_sample(volume, p, wp.Volume.CLOSEST, dtype=wp.vec3), expected)
q = wp.volume_index_to_world(volume, p)
q_inv = wp.volume_world_to_index(volume, q)
expect_eq(p, q_inv)
@wp.kernel
def test_volume_sample_linear_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = wp.vec3(
p[0] + 2.0 * p[1] + 3.0 * p[2], 4.0 * p[0] + 5.0 * p[1] + 6.0 * p[2], 7.0 * p[0] + 8.0 * p[1] + 9.0 * p[2]
)
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
expect_near(wp.volume_sample_v(volume, p, wp.Volume.LINEAR), expected, 2.0e-4)
expect_near(wp.volume_sample(volume, p, wp.Volume.LINEAR, dtype=wp.vec3), expected, 2.0e-4)
@wp.kernel
def test_volume_sample_grad_linear_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
expected_val = wp.vec3(
p[0] + 2.0 * p[1] + 3.0 * p[2], 4.0 * p[0] + 5.0 * p[1] + 6.0 * p[2], 7.0 * p[0] + 8.0 * p[1] + 9.0 * p[2]
)
expected_grad = wp.mat33(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
grad = wp.mat33(0.0)
val = wp.volume_sample_grad(volume, p, wp.Volume.LINEAR, grad, dtype=wp.vec3)
expect_near(val, expected_val, 2.0e-4)
expect_near(grad[0], expected_grad[0], 2.0e-4)
expect_near(grad[1], expected_grad[1], 2.0e-4)
expect_near(grad[2], expected_grad[2], 2.0e-4)
@wp.kernel
def test_volume_sample_local_v_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
p = points[tid]
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_sample_v(volume, p, wp.Volume.LINEAR), ones)
@wp.kernel
def test_volume_sample_world_v_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
q = points[tid]
p = wp.volume_world_to_index(volume, q)
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_sample_v(volume, p, wp.Volume.LINEAR), ones)
# int32 volume tests
@wp.kernel
def test_volume_lookup_i(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
expected = i * j * k
if abs(i) > 10 or abs(j) > 10 or abs(k) > 10:
expected = 10
expect_eq(wp.volume_lookup_i(volume, i, j, k), expected)
expect_eq(wp.volume_lookup(volume, i, j, k, dtype=wp.int32), expected)
@wp.kernel
def test_volume_sample_i(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = round(p[0])
j = round(p[1])
k = round(p[2])
expected = int(i * j * k)
if abs(i) > 10.0 or abs(j) > 10.0 or abs(k) > 10.0:
expected = 10
expect_eq(wp.volume_sample_i(volume, p), expected)
expect_eq(wp.volume_sample(volume, p, wp.Volume.CLOSEST, dtype=wp.int32), expected)
q = wp.volume_index_to_world(volume, p)
q_inv = wp.volume_world_to_index(volume, q)
expect_eq(p, q_inv)
# Index/world transformation tests
@wp.kernel
def test_volume_index_to_world(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=wp.float32),
grad_values: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
p = points[tid]
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_index_to_world(volume, p), ones)
grad_values[tid] = wp.volume_index_to_world_dir(volume, ones)
@wp.kernel
def test_volume_world_to_index(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=wp.float32),
grad_values: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
p = points[tid]
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_world_to_index(volume, p), ones)
grad_values[tid] = wp.volume_world_to_index_dir(volume, ones)
# Volume write tests
@wp.kernel
def test_volume_store_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store(volume, i, j, k, float(i + 100 * j + 10000 * k))
values[tid] = wp.volume_lookup_f(volume, i, j, k)
@wp.kernel
def test_volume_store_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store(volume, i, j, k, p)
values[tid] = wp.volume_lookup_v(volume, i, j, k)
@wp.kernel
def test_volume_store_i(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.int32)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store(volume, i, j, k, i + 100 * j + 10000 * k)
values[tid] = wp.volume_lookup_i(volume, i, j, k)
devices = get_test_devices()
rng = np.random.default_rng(101215)
# Note about the test grids:
# test_grid and test_int32_grid
# active region: [-10,10]^3
# values: v[i,j,k] = i * j * k
# voxel size: 0.25
#
# test_vec_grid
# active region: [-10,10]^3
# values: v[i,j,k] = (i + 2*j + 3*k, 4*i + 5*j + 6*k, 7*i + 8*j + 9*k)
# voxel size: 0.25
#
# torus
# index to world transformation:
# [0.1, 0, 0, 0]
# [0, 0, 0.1, 0]
# [0, 0.1, 0, 0]
# [1, 2, 3, 1]
# (-90 degrees rotation along X)
# voxel size: 0.1
volume_paths = {
"float": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_grid.nvdb")),
"int32": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_int32_grid.nvdb")),
"vec3f": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_vec_grid.nvdb")),
"index": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_index_grid.nvdb")),
"torus": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/torus.nvdb")),
"float_write": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_grid.nvdb")),
}
test_volume_tiles = (
np.array([[i, j, k] for i in range(-2, 2) for j in range(-2, 2) for k in range(-2, 2)], dtype=np.int32) * 8
)
volumes = {}
for value_type, path in volume_paths.items():
volumes[value_type] = {}
volume_data = open(path, "rb").read()
for device in devices:
try:
volume = wp.Volume.load_from_nvdb(volume_data, device)
except RuntimeError as e:
raise RuntimeError(f'Failed to load volume from "{path}" to {device} memory:\n{e}') from e
volumes[value_type][device.alias] = volume
axis = np.linspace(-1, 1, 3)
point_grid = np.array([[x, y, z] for x in axis for y in axis for z in axis], dtype=np.float32)
def test_volume_sample_linear_f_gradient(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
values = wp.array(np.zeros(1), dtype=wp.float32, device=device, requires_grad=True)
for test_case in points:
uvws = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
xyzs = wp.array(test_case * 0.25, dtype=wp.vec3, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_local_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, uvws, values],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_expected = np.array([y * z, x * z, x * y])
grad_computed = tape.gradients[uvws].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_world_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, xyzs, values],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_expected = np.array([y * z, x * z, x * y]) / 0.25
grad_computed = tape.gradients[xyzs].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
def test_volume_sample_grad_linear_f_gradient(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
values = wp.array(np.zeros(1), dtype=wp.float32, device=device, requires_grad=True)
for test_case in points:
uvws = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
xyzs = wp.array(test_case * 0.25, dtype=wp.vec3, device=device, requires_grad=True)
for case_num in range(4):
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_grad_local_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, uvws, values, case_num],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_computed = tape.gradients[uvws].numpy()[0]
if case_num == 0:
grad_expected = np.array([y * z, x * z, x * y])
elif case_num == 1:
grad_expected = np.array([0.0, z, y])
elif case_num == 2:
grad_expected = np.array([z, 0.0, x])
elif case_num == 3:
grad_expected = np.array([y, x, 0.0])
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape.zero()
for case_num in range(4):
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_grad_world_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, xyzs, values, case_num],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_computed = tape.gradients[xyzs].numpy()[0]
if case_num == 0:
grad_expected = np.array([y * z, x * z, x * y]) / 0.25
elif case_num == 1:
grad_expected = np.array([0.0, z, y]) / 0.25
elif case_num == 2:
grad_expected = np.array([z, 0.0, x]) / 0.25
elif case_num == 3:
grad_expected = np.array([y, x, 0.0]) / 0.25
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape.zero()
def test_volume_sample_linear_v_gradient(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
values = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
for test_case in points:
uvws = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
xyzs = wp.array(test_case * 0.25, dtype=wp.vec3, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_local_v_linear_values,
dim=1,
inputs=[volumes["vec3f"][device.alias].id, uvws, values],
device=device,
)
tape.backward(values)
grad_expected = np.array([12.0, 15.0, 18.0])
grad_computed = tape.gradients[uvws].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_world_v_linear_values,
dim=1,
inputs=[volumes["vec3f"][device.alias].id, xyzs, values],
device=device,
)
tape.backward(values)
grad_expected = np.array([12.0, 15.0, 18.0]) / 0.25
grad_computed = tape.gradients[xyzs].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
def test_volume_transform_gradient(test, device):
values = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
grad_values = wp.zeros(1, dtype=wp.vec3, device=device)
test_points = rng.uniform(-10.0, 10.0, size=(10, 3))
for test_case in test_points:
points = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_index_to_world,
dim=1,
inputs=[volumes["torus"][device.alias].id, points, values, grad_values],
device=device,
)
tape.backward(values)
grad_computed = tape.gradients[points].numpy()
grad_expected = grad_values.numpy()
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
grad_computed = tape.gradients[points].numpy()
grad_expected = grad_values.numpy()
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
def test_volume_store(test, device):
values_ref = np.array([x + 100 * y + 10000 * z for x, y, z in point_grid])
points = wp.array(point_grid, dtype=wp.vec3, device=device)
values = wp.empty(len(point_grid), dtype=wp.float32, device=device)
wp.launch(
test_volume_store_f,
dim=len(point_grid),
inputs=[volumes["float_write"][device.alias].id, points, values],
device=device,
)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocation_f(test, device):
bg_value = -123.0
points_np = np.append(point_grid, [[8096, 8096, 8096]], axis=0)
values_ref = np.append(np.array([x + 100 * y + 10000 * z for x, y, z in point_grid]), bg_value)
volume = wp.Volume.allocate(min=[-11, -11, -11], max=[11, 11, 11], voxel_size=0.1, bg_value=bg_value, device=device)
points = wp.array(points_np, dtype=wp.vec3, device=device)
values = wp.empty(len(points_np), dtype=wp.float32, device=device)
wp.launch(test_volume_store_f, dim=len(points_np), inputs=[volume.id, points, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocation_v(test, device):
bg_value = (-1, 2.0, -3)
points_np = np.append(point_grid, [[8096, 8096, 8096]], axis=0)
values_ref = np.append(point_grid, [bg_value], axis=0)
volume = wp.Volume.allocate(min=[-11, -11, -11], max=[11, 11, 11], voxel_size=0.1, bg_value=bg_value, device=device)
points = wp.array(points_np, dtype=wp.vec3, device=device)
values = wp.empty(len(points_np), dtype=wp.vec3, device=device)
wp.launch(test_volume_store_v, dim=len(points_np), inputs=[volume.id, points, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocation_i(test, device):
bg_value = -123
points_np = np.append(point_grid, [[8096, 8096, 8096]], axis=0)
values_ref = np.append(np.array([x + 100 * y + 10000 * z for x, y, z in point_grid], dtype=np.int32), bg_value)
volume = wp.Volume.allocate(min=[-11, -11, -11], max=[11, 11, 11], voxel_size=0.1, bg_value=bg_value, device=device)
points = wp.array(points_np, dtype=wp.vec3, device=device)
values = wp.empty(len(points_np), dtype=wp.int32, device=device)
wp.launch(test_volume_store_i, dim=len(points_np), inputs=[volume.id, points, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_introspection(test, device):
for volume_names in ("float", "vec3f"):
with test.subTest(volume_names=volume_names):
volume = volumes[volume_names][device.alias]
tiles_actual = volume.get_tiles().numpy()
tiles_sorted = tiles_actual[np.lexsort(tiles_actual.T[::-1])]
voxel_size = np.array(volume.get_voxel_size())
np.testing.assert_equal(test_volume_tiles, tiles_sorted)
np.testing.assert_equal([0.25] * 3, voxel_size)
voxel_count = volume.get_voxel_count()
voxels_actual = volume.get_voxels().numpy()
assert voxel_count == voxels_actual.shape[0]
# Voxel coordinates should be unique
voxels_unique = np.unique(voxels_actual, axis=0)
assert voxel_count == voxels_unique.shape[0]
# Get back tiles from voxels, should match get_tiles()
voxel_tiles = 8 * (voxels_unique // 8)
voxel_tiles_sorted = voxel_tiles[np.lexsort(voxel_tiles.T[::-1])]
voxel_tiles_unique = np.unique(voxel_tiles_sorted, axis=0)
np.testing.assert_equal(voxel_tiles_unique, tiles_sorted)
def test_volume_multiple_grids(test, device):
volume = volumes["index"][device.alias]
volume_2 = volume.load_next_grid()
test.assertIsNotNone(volume_2)
test.assertNotEqual(volume.id, volume_2.id)
test.assertNotEqual(volume.get_voxel_count(), volume_2.get_voxel_count())
test.assertEqual(volume.get_grid_info().grid_count, volume_2.get_grid_info().grid_count)
test.assertEqual(volume.get_grid_info().grid_index + 1, volume_2.get_grid_info().grid_index)
volume_3 = volume_2.load_next_grid()
test.assertIsNone(volume_3)
def test_volume_feature_array(test, device):
volume = volumes["index"][device.alias]
test.assertEqual(volume.get_feature_array_count(), 1)
array = volume.feature_array(0, dtype=wp.uint64)
test.assertEqual(array.device, device)
test.assertEqual(array.dtype, wp.uint64)
# fVDB convention, data starts with array ndim + shape
np.testing.assert_equal(array.numpy()[0:4], [3, volume.get_voxel_count(), 2, 3])
@wp.kernel
def fill_leaf_values_kernel(volume: wp.uint64, ijk: wp.array2d(dtype=wp.int32), values: wp.array(dtype=Any)):
tid = wp.tid()
i = ijk[tid, 0]
j = ijk[tid, 1]
k = ijk[tid, 2]
expect_eq(tid, wp.volume_lookup_index(volume, i, j, k))
values[tid] = wp.volume_lookup(volume, i, j, k, dtype=values.dtype)
@wp.kernel
def test_volume_sample_index_kernel(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=Any),
background: wp.array(dtype=Any),
sampled_values: wp.array(dtype=Any),
):
tid = wp.tid()
p = points[tid]
ref = wp.volume_sample(volume, p, wp.Volume.LINEAR, dtype=values.dtype)
sampled_values[tid] = wp.volume_sample_index(volume, p, wp.Volume.LINEAR, values, background[0])
expect_eq(sampled_values[tid], ref)
@wp.kernel
def test_volume_sample_grad_index_kernel(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=Any),
background: wp.array(dtype=Any),
sampled_values: wp.array(dtype=Any),
sampled_grads: wp.array(dtype=Any),
):
tid = wp.tid()
p = points[tid]
ref_grad = sampled_grads.dtype()
ref = wp.volume_sample_grad(volume, p, wp.Volume.LINEAR, ref_grad, dtype=values.dtype)
grad = type(ref_grad)()
sampled_values[tid] = wp.volume_sample_grad_index(volume, p, wp.Volume.LINEAR, values, background[0], grad)
expect_eq(sampled_values[tid], ref)
expect_eq(grad[0], ref_grad[0])
expect_eq(grad[1], ref_grad[1])
expect_eq(grad[2], ref_grad[2])
sampled_grads[tid] = grad
def test_volume_sample_index(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
points[0:10, 0] += 100.0 # ensure some points are over unallocated voxels
uvws = wp.array(points, dtype=wp.vec3, device=device)
bg_values = {
"float": 10.0,
"vec3f": wp.vec3(10.8, -4.13, 10.26),
}
grad_types = {
"float": wp.vec3,
"vec3f": wp.mat33,
}
for volume_names in ("float", "vec3f"):
with test.subTest(volume_names=volume_names):
volume = volumes[volume_names][device.alias]
ijk = volume.get_voxels()
values = wp.empty(shape=volume.get_voxel_count(), dtype=volume.dtype, device=device, requires_grad=True)
vid = wp.uint64(volume.id)
wp.launch(fill_leaf_values_kernel, dim=values.shape, inputs=[vid, ijk, values], device=device)
sampled_values = wp.empty(shape=points.shape[0], dtype=volume.dtype, device=device, requires_grad=True)
background = wp.array([bg_values[volume_names]], dtype=volume.dtype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_index_kernel,
dim=points.shape[0],
inputs=[vid, uvws, values, background, sampled_values],
device=device,
)
sampled_values.grad.fill_(1.0)
tape.backward()
# test adjoint w.r.t voxel and background value arrays
# we should have sum(sampled_values) = sum(adj_values * values) + (adj_background * background)
sum_sampled_values = np.sum(sampled_values.numpy(), axis=0)
sum_values_adj = np.sum(values.numpy() * values.grad.numpy(), axis=0)
sum_background_adj = background.numpy()[0] * background.grad.numpy()[0]
np.testing.assert_allclose(sum_sampled_values, sum_values_adj + sum_background_adj, rtol=1.0e-3)
tape.reset()
sampled_grads = wp.empty(
shape=points.shape[0], dtype=grad_types[volume_names], device=device, requires_grad=True
)
with tape:
wp.launch(
test_volume_sample_grad_index_kernel,
dim=points.shape[0],
inputs=[vid, uvws, values, background, sampled_values, sampled_grads],
device=device,
)
sampled_values.grad.fill_(1.0)
tape.backward()
# we should have sum(sampled_values) = sum(adj_values * values) + (adj_background * background)
sum_sampled_values = np.sum(sampled_values.numpy(), axis=0)
sum_values_adj = np.sum(values.numpy() * values.grad.numpy(), axis=0)
sum_background_adj = background.numpy()[0] * background.grad.numpy()[0]
np.testing.assert_allclose(sum_sampled_values, sum_values_adj + sum_background_adj, rtol=1.0e-3)
tape.zero()
sampled_values.grad.fill_(0.0)
sampled_grads.grad.fill_(1.0)
tape.backward()
# we should have sum(sampled_grad, axes=(0, -1)) = sum(adj_values * values) + (adj_background * background)
sum_sampled_grads = np.sum(np.sum(sampled_grads.numpy(), axis=0), axis=-1)
sum_values_adj = np.sum(values.numpy() * values.grad.numpy(), axis=0)
sum_background_adj = background.numpy()[0] * background.grad.numpy()[0]
np.testing.assert_allclose(sum_sampled_grads, sum_values_adj + sum_background_adj, rtol=1.0e-3)
def test_volume_from_numpy(test, device):
# Volume.allocate_from_tiles() is only available with CUDA
mins = np.array([-3.0, -3.0, -3.0])
voxel_size = 0.2
maxs = np.array([3.0, 3.0, 3.0])
nums = np.ceil((maxs - mins) / (voxel_size)).astype(dtype=int)
center = np.array([0.0, 0.0, 0.0])
rad = 2.5
sphere_sdf_np = np.zeros(tuple(nums))
for x in range(nums[0]):
for y in range(nums[1]):
for z in range(nums[2]):
pos = mins + voxel_size * np.array([x, y, z])
dis = np.linalg.norm(pos - center)
sphere_sdf_np[x, y, z] = dis - rad
sphere_vdb = wp.Volume.load_from_numpy(sphere_sdf_np, mins, voxel_size, rad + 3.0 * voxel_size, device=device)
test.assertNotEqual(sphere_vdb.id, 0)
sphere_vdb_array = sphere_vdb.array()
test.assertEqual(sphere_vdb_array.dtype, wp.uint8)
test.assertIsNone(sphere_vdb_array.deleter)
class TestVolume(unittest.TestCase):
pass
add_function_test(
TestVolume, "test_volume_sample_linear_f_gradient", test_volume_sample_linear_f_gradient, devices=devices
)
add_function_test(
TestVolume, "test_volume_sample_grad_linear_f_gradient", test_volume_sample_grad_linear_f_gradient, devices=devices
)
add_function_test(
TestVolume, "test_volume_sample_linear_v_gradient", test_volume_sample_linear_v_gradient, devices=devices
)
add_function_test(TestVolume, "test_volume_transform_gradient", test_volume_transform_gradient, devices=devices)
add_function_test(TestVolume, "test_volume_store", test_volume_store, devices=devices)
add_function_test(
TestVolume, "test_volume_allocation_f", test_volume_allocation_f, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestVolume, "test_volume_allocation_v", test_volume_allocation_v, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestVolume, "test_volume_allocation_i", test_volume_allocation_i, devices=get_selected_cuda_test_devices()
)
add_function_test(TestVolume, "test_volume_introspection", test_volume_introspection, devices=devices)
add_function_test(
TestVolume, "test_volume_from_numpy", test_volume_from_numpy, devices=get_selected_cuda_test_devices()
)
add_function_test(TestVolume, "test_volume_multiple_grids", test_volume_multiple_grids, devices=devices)
add_function_test(TestVolume, "test_volume_feature_array", test_volume_feature_array, devices=devices)
add_function_test(TestVolume, "test_volume_sample_index", test_volume_sample_index, devices=devices)
points = {}
points_jittered = {}
for device in devices:
points_jittered_np = point_grid + rng.uniform(-0.5, 0.5, size=point_grid.shape)
points[device.alias] = wp.array(point_grid, dtype=wp.vec3, device=device)
points_jittered[device.alias] = wp.array(points_jittered_np, dtype=wp.vec3, device=device)
add_kernel_test(
TestVolume,
test_volume_lookup_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points[device.alias]],
devices=[device],
)
add_kernel_test(
TestVolume,
test_volume_sample_closest_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_linear_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_grad_linear_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_lookup_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_closest_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_linear_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_grad_linear_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_lookup_i,
dim=len(point_grid),
inputs=[volumes["int32"][device.alias].id, points[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_i,
dim=len(point_grid),
inputs=[volumes["int32"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 33,909 | Python | 34.24948 | 120 | 0.605591 |
NVIDIA/warp/warp/tests/test_volume_write.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# Volume write tests
@wp.kernel
def test_volume_store_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store_f(volume, i, j, k, float(i + 100 * j + 10000 * k))
@wp.kernel
def test_volume_readback_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
values[tid] = wp.volume_lookup_f(volume, i, j, k)
@wp.kernel
def test_get_list_of_tiles(
volume: wp.uint64,
points_is: wp.array2d(dtype=wp.int32),
points_ws: wp.array(dtype=wp.vec3),
tiles_is: wp.array2d(dtype=wp.int32),
tiles_ws: wp.array2d(dtype=wp.int32),
):
tid = wp.tid()
tiles_is[tid, 0] = points_is[tid, 0]
tiles_is[tid, 1] = points_is[tid, 1]
tiles_is[tid, 2] = points_is[tid, 2]
q = wp.volume_world_to_index(volume, points_ws[tid])
tiles_ws[tid, 0] = int(q[0] / 8.0) * 8
tiles_ws[tid, 1] = int(q[1] / 8.0) * 8
tiles_ws[tid, 2] = int(q[2] / 8.0) * 8
@wp.kernel
def test_volume_tile_store_f(volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32)):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
wp.volume_store_f(volume, ii, jj, kk, float(100 * ii + 10 * jj + kk))
@wp.kernel
def test_volume_tile_store_ws_f(volume: wp.uint64, tiles: wp.array(dtype=wp.vec3)):
tid = wp.tid()
q = wp.volume_world_to_index(volume, tiles[tid])
ti = int(wp.round(q[0]))
tj = int(wp.round(q[1]))
tk = int(wp.round(q[2]))
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
wp.volume_store_f(volume, ii, jj, kk, float(100 * ii + 10 * jj + kk))
@wp.kernel
def test_volume_tile_readback_f(
volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
values[tid * 512 + r] = wp.volume_lookup_f(volume, ii, jj, kk)
@wp.kernel
def test_volume_tile_store_v(volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32)):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
wp.volume_store_v(volume, ii, jj, kk, wp.vec3(float(ii), float(jj), float(kk)))
@wp.kernel
def test_volume_tile_readback_v(volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32), values: wp.array(dtype=wp.vec3)):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
values[tid * 512 + r] = wp.volume_lookup_v(volume, ii, jj, kk)
def test_volume_allocation(test, device):
voxel_size = 0.125
background_value = 123.456
translation = wp.vec3(-12.3, 4.56, -789)
axis = np.linspace(-11, 11, 23)
points_ref = np.array([[x, y, z] for x in axis for y in axis for z in axis])
values_ref = np.array([x + 100 * y + 10000 * z for x in axis for y in axis for z in axis])
num_points = len(points_ref)
bb_max = np.array([11, 11, 11])
volume_a = wp.Volume.allocate(
-bb_max,
bb_max,
voxel_size=voxel_size,
bg_value=background_value,
translation=translation,
device=device,
)
volume_b = wp.Volume.allocate(
-bb_max * voxel_size + translation,
bb_max * voxel_size + translation,
voxel_size=voxel_size,
bg_value=background_value,
translation=translation,
points_in_world_space=True,
device=device,
)
assert wp.types.types_equal(volume_a.dtype, wp.float32)
assert wp.types.types_equal(volume_b.dtype, wp.float32)
points = wp.array(points_ref, dtype=wp.vec3, device=device)
values_a = wp.empty(num_points, dtype=wp.float32, device=device)
values_b = wp.empty(num_points, dtype=wp.float32, device=device)
wp.launch(test_volume_store_f, dim=num_points, inputs=[volume_a.id, points], device=device)
wp.launch(test_volume_store_f, dim=num_points, inputs=[volume_b.id, points], device=device)
wp.launch(test_volume_readback_f, dim=num_points, inputs=[volume_a.id, points, values_a], device=device)
wp.launch(test_volume_readback_f, dim=num_points, inputs=[volume_b.id, points, values_b], device=device)
np.testing.assert_equal(values_a.numpy(), values_ref)
np.testing.assert_equal(values_b.numpy(), values_ref)
def test_volume_allocate_by_tiles_f(test, device):
voxel_size = 0.125
background_value = 123.456
translation = wp.vec3(-12.3, 4.56, -789)
num_tiles = 1000
rng = np.random.default_rng(101215)
tiles = rng.integers(-512, 512, size=(num_tiles, 3), dtype=np.int32)
points_is = tiles * 8 # points in index space
points_ws = points_is * voxel_size + translation # points in world space
values_ref = np.empty(num_tiles * 512)
for t in range(num_tiles):
ti, tj, tk = points_is[t]
for i in range(8):
for j in range(8):
for k in range(8):
values_ref[t * 512 + i * 64 + j * 8 + k] = float(100 * (ti + i) + 10 * (tj + j) + (tk + k))
points_is_d = wp.array(points_is, dtype=wp.int32, device=device)
points_ws_d = wp.array(points_ws, dtype=wp.vec3, device=device)
volume_a = wp.Volume.allocate_by_tiles(points_is_d, voxel_size, background_value, translation, device=device)
volume_b = wp.Volume.allocate_by_tiles(points_ws_d, voxel_size, background_value, translation, device=device)
assert wp.types.types_equal(volume_a.dtype, wp.float32)
assert wp.types.types_equal(volume_b.dtype, wp.float32)
values_a = wp.empty(num_tiles * 512, dtype=wp.float32, device=device)
values_b = wp.empty(num_tiles * 512, dtype=wp.float32, device=device)
wp.launch(test_volume_tile_store_f, dim=num_tiles, inputs=[volume_a.id, points_is_d], device=device)
wp.launch(test_volume_tile_store_ws_f, dim=num_tiles, inputs=[volume_b.id, points_ws_d], device=device)
wp.launch(test_volume_tile_readback_f, dim=num_tiles, inputs=[volume_a.id, points_is_d, values_a], device=device)
wp.launch(test_volume_tile_readback_f, dim=num_tiles, inputs=[volume_b.id, points_is_d, values_b], device=device)
np.testing.assert_equal(values_a.numpy(), values_ref)
np.testing.assert_equal(values_b.numpy(), values_ref)
def test_volume_allocate_by_tiles_v(test, device):
num_tiles = 1000
rng = np.random.default_rng(101215)
tiles = rng.integers(-512, 512, size=(num_tiles, 3), dtype=np.int32)
points_is = tiles * 8
values_ref = np.empty((len(tiles) * 512, 3))
for t in range(len(tiles)):
ti, tj, tk = points_is[t]
for i in range(8):
for j in range(8):
for k in range(8):
values_ref[t * 512 + i * 64 + j * 8 + k] = [ti + i, tj + j, tk + k]
points_d = wp.array(points_is, dtype=wp.int32, device=device)
volume = wp.Volume.allocate_by_tiles(points_d, 0.1, wp.vec3(1, 2, 3), device=device)
assert wp.types.types_equal(volume.dtype, wp.vec3)
values = wp.empty(len(points_d) * 512, dtype=wp.vec3, device=device)
wp.launch(test_volume_tile_store_v, dim=len(points_d), inputs=[volume.id, points_d], device=device)
wp.launch(test_volume_tile_readback_v, dim=len(points_d), inputs=[volume.id, points_d, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocate_by_tiles_index(test, device):
num_tiles = 10
rng = np.random.default_rng(101215)
tiles = rng.integers(-512, 512, size=(num_tiles, 3), dtype=np.int32)
points_is = tiles * 8
points_d = wp.array(points_is, dtype=wp.int32, device=device)
volume = wp.Volume.allocate_by_tiles(points_d, 0.1, bg_value=None, device=device)
assert volume.is_index
vol_tiles = volume.get_tiles().numpy() / 8
vol_tile_sorted = vol_tiles[np.lexsort(vol_tiles.T[::-1])]
vol_tile_unique = np.unique(vol_tile_sorted, axis=0)
tile_sorted = tiles[np.lexsort(tiles.T[::-1])]
tile_unique = np.unique(tile_sorted, axis=0)
np.testing.assert_equal(tile_unique, vol_tile_unique)
def test_volume_allocation_from_voxels(test, device):
point_count = 387
rng = np.random.default_rng(101215)
# Create from world-space points
points = wp.array(rng.uniform(5.0, 10.0, size=(point_count, 3)), dtype=float, device=device)
volume = wp.Volume.allocate_by_voxels(
voxel_points=points, voxel_size=0.25, translation=(0.0, 5.0, 10.0), device=device
)
assert volume.is_index
test.assertNotEqual(volume.id, 0)
test.assertAlmostEqual(volume.get_voxel_size(), (0.25, 0.25, 0.25))
voxel_count = volume.get_voxel_count()
test.assertGreaterEqual(point_count, voxel_count)
test.assertGreaterEqual(voxel_count, 1)
voxels = volume.get_voxels()
# Check that world-to-index transform has been correctly applied
voxel_low = np.min(voxels.numpy(), axis=0)
voxel_up = np.max(voxels.numpy(), axis=0)
np.testing.assert_array_less([19, -1, -21], voxel_low)
np.testing.assert_array_less(voxel_up, [41, 21, 1])
# Recreate the volume from ijk coords
volume_from_ijk = wp.Volume.allocate_by_voxels(
voxel_points=voxels, voxel_size=0.25, translation=(0.0, 5.0, 10.0), device=device
)
assert volume_from_ijk.is_index
assert volume_from_ijk.get_voxel_count() == voxel_count
ijk_voxels = volume_from_ijk.get_voxels().numpy()
voxels = voxels.numpy()
voxel_sorted = voxels[np.lexsort(voxels.T[::-1])]
ijk_voxel_sorted = ijk_voxels[np.lexsort(ijk_voxels.T[::-1])]
np.testing.assert_equal(voxel_sorted, ijk_voxel_sorted)
devices = get_selected_cuda_test_devices()
class TestVolumeWrite(unittest.TestCase):
pass
add_function_test(TestVolumeWrite, "test_volume_allocation", test_volume_allocation, devices=devices)
add_function_test(TestVolumeWrite, "test_volume_allocate_by_tiles_f", test_volume_allocate_by_tiles_f, devices=devices)
add_function_test(TestVolumeWrite, "test_volume_allocate_by_tiles_v", test_volume_allocate_by_tiles_v, devices=devices)
add_function_test(
TestVolumeWrite, "test_volume_allocate_by_tiles_index", test_volume_allocate_by_tiles_index, devices=devices
)
add_function_test(
TestVolumeWrite,
"test_volume_allocation_from_voxels",
test_volume_allocation_from_voxels,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 11,531 | Python | 33.017699 | 119 | 0.635678 |
NVIDIA/warp/warp/tests/test_struct.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.fem import Sample as StructFromAnotherModule
from warp.tests.unittest_utils import *
@wp.struct
class Model:
dt: float
gravity: wp.vec3
m: wp.array(dtype=float)
@wp.struct
class State:
x: wp.array(dtype=wp.vec3)
v: wp.array(dtype=wp.vec3)
@wp.kernel
def kernel_step(state_in: State, state_out: State, model: Model):
i = wp.tid()
state_out.v[i] = state_in.v[i] + model.gravity / model.m[i] * model.dt
state_out.x[i] = state_in.x[i] + state_out.v[i] * model.dt
@wp.kernel
def kernel_step_with_copy(state_in: State, state_out: State, model: Model):
i = wp.tid()
model_rescaled = Model(1.0, model.gravity / model.m[i] * model.dt, model.m)
state_out_copy = State(state_out.x, state_out.v)
state_out_copy.v[i] = state_in.v[i] + model_rescaled.gravity
state_out_copy.x[i] = state_in.x[i] + state_out_copy.v[i] * model.dt
def test_step(test, device):
rng = np.random.default_rng(123)
dim = 5
dt = 0.01
gravity = np.array([0, 0, -9.81])
m = np.ones(dim)
m_model = wp.array(m, dtype=float, device=device)
model = Model()
model.m = m_model
model.dt = dt
model.gravity = wp.vec3(0, 0, -9.81)
x = rng.normal(size=(dim, 3))
v = rng.normal(size=(dim, 3))
x_expected = x + (v + gravity / m[:, None] * dt) * dt
x_in = wp.array(x, dtype=wp.vec3, device=device)
v_in = wp.array(v, dtype=wp.vec3, device=device)
state_in = State()
state_in.x = x_in
state_in.v = v_in
state_out = State()
state_out.x = wp.empty_like(x_in)
state_out.v = wp.empty_like(v_in)
for step_kernel in [kernel_step, kernel_step_with_copy]:
with CheckOutput(test):
wp.launch(step_kernel, dim=dim, inputs=[state_in, state_out, model], device=device)
assert_np_equal(state_out.x.numpy(), x_expected, tol=1e-6)
@wp.kernel
def kernel_loss(x: wp.array(dtype=wp.vec3), loss: wp.array(dtype=float)):
i = wp.tid()
wp.atomic_add(loss, 0, x[i][0] * x[i][0] + x[i][1] * x[i][1] + x[i][2] * x[i][2])
def test_step_grad(test, device):
rng = np.random.default_rng(123)
dim = 5
dt = 0.01
gravity = np.array([0, 0, -9.81])
m = rng.random(size=dim) + 0.1
m_model = wp.array(m, dtype=float, device=device, requires_grad=True)
model = Model()
model.m = m_model
model.dt = dt
model.gravity = wp.vec3(0, 0, -9.81)
x = rng.normal(size=(dim, 3))
v = rng.normal(size=(dim, 3))
x_in = wp.array(x, dtype=wp.vec3, device=device, requires_grad=True)
v_in = wp.array(v, dtype=wp.vec3, device=device, requires_grad=True)
state_in = State()
state_in.x = x_in
state_in.v = v_in
state_out = State()
state_out.x = wp.empty_like(x_in, requires_grad=True)
state_out.v = wp.empty_like(v_in, requires_grad=True)
loss = wp.empty(1, dtype=float, device=device, requires_grad=True)
for step_kernel in [kernel_step, kernel_step_with_copy]:
tape = wp.Tape()
with tape:
wp.launch(step_kernel, dim=dim, inputs=[state_in, state_out, model], device=device)
wp.launch(kernel_loss, dim=dim, inputs=[state_out.x, loss], device=device)
tape.backward(loss)
dl_dx = 2 * state_out.x.numpy()
dl_dv = dl_dx * dt
dv_dm = -gravity * dt / m[:, None] ** 2
dl_dm = (dl_dv * dv_dm).sum(-1)
assert_np_equal(state_out.x.grad.numpy(), dl_dx, tol=1e-6)
assert_np_equal(state_in.x.grad.numpy(), dl_dx, tol=1e-6)
assert_np_equal(state_out.v.grad.numpy(), dl_dv, tol=1e-6)
assert_np_equal(state_in.v.grad.numpy(), dl_dv, tol=1e-6)
assert_np_equal(model.m.grad.numpy(), dl_dm, tol=1e-6)
tape.zero()
assert state_out.x.grad.numpy().sum() == 0.0
assert state_in.x.grad.numpy().sum() == 0.0
assert state_out.v.grad.numpy().sum() == 0.0
assert state_in.v.grad.numpy().sum() == 0.0
assert model.m.grad.numpy().sum() == 0.0
@wp.struct
class Empty:
pass
@wp.kernel
def test_empty(input: Empty):
tid = wp.tid()
@wp.struct
class Uninitialized:
data: wp.array(dtype=int)
@wp.kernel
def test_uninitialized(input: Uninitialized):
tid = wp.tid()
@wp.struct
class Baz:
data: wp.array(dtype=int)
z: wp.vec3
@wp.struct
class Bar:
baz: Baz
y: float
@wp.struct
class Foo:
bar: Bar
x: int
@wp.kernel
def kernel_nested_struct(foo: Foo):
tid = wp.tid()
foo.bar.baz.data[tid] = (
foo.bar.baz.data[tid] + foo.x + int(foo.bar.y * 100.0) + int(wp.length_sq(foo.bar.baz.z)) + tid * 2
)
def test_nested_struct(test, device):
dim = 3
foo = Foo()
foo.bar = Bar()
foo.bar.baz = Baz()
foo.bar.baz.data = wp.zeros(dim, dtype=int, device=device)
foo.bar.baz.z = wp.vec3(1, 2, 3)
foo.bar.y = 1.23
foo.x = 123
wp.launch(kernel_nested_struct, dim=dim, inputs=[foo], device=device)
assert_array_equal(
foo.bar.baz.data,
wp.array((260, 262, 264), dtype=int, device=device),
)
def test_struct_attribute_error(test, device):
@wp.kernel
def kernel(foo: Foo):
_ = foo.nonexisting
with test.assertRaisesRegex(AttributeError, r"`nonexisting` is not an attribute of 'foo' \([\w.]+\.Foo\)$"):
wp.launch(
kernel,
dim=1,
inputs=[Foo()],
device=device,
)
@wp.kernel
def test_struct_instantiate(data: wp.array(dtype=int)):
baz = Baz(data, wp.vec3(0.0, 0.0, 26.0))
bar = Bar(baz, 25.0)
foo = Foo(bar, 24)
wp.expect_eq(foo.x, 24)
wp.expect_eq(foo.bar.y, 25.0)
wp.expect_eq(foo.bar.baz.z[2], 26.0)
wp.expect_eq(foo.bar.baz.data[0], 1)
@wp.struct
class MathThings:
v1: wp.vec3
v2: wp.vec3
v3: wp.vec3
m1: wp.mat22
m2: wp.mat22
m3: wp.mat22
m4: wp.mat22
m5: wp.mat22
m6: wp.mat22
@wp.kernel
def check_math_conversions(s: MathThings):
wp.expect_eq(s.v1, wp.vec3(1.0, 2.0, 3.0))
wp.expect_eq(s.v2, wp.vec3(10.0, 20.0, 30.0))
wp.expect_eq(s.v3, wp.vec3(100.0, 200.0, 300.0))
wp.expect_eq(s.m1, wp.mat22(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(s.m2, wp.mat22(10.0, 20.0, 30.0, 40.0))
wp.expect_eq(s.m3, wp.mat22(100.0, 200.0, 300.0, 400.0))
wp.expect_eq(s.m4, wp.mat22(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(s.m5, wp.mat22(10.0, 20.0, 30.0, 40.0))
wp.expect_eq(s.m6, wp.mat22(100.0, 200.0, 300.0, 400.0))
def test_struct_math_conversions(test, device):
s = MathThings()
# test assigning various containers to vector and matrix attributes
s.v1 = (1, 2, 3)
s.v2 = [10, 20, 30]
s.v3 = np.array([100, 200, 300])
# 2d containers for matrices
s.m1 = ((1, 2), (3, 4))
s.m2 = [[10, 20], [30, 40]]
s.m3 = np.array([[100, 200], [300, 400]])
# 1d containers for matrices
s.m4 = (1, 2, 3, 4)
s.m5 = [10, 20, 30, 40]
s.m6 = np.array([100, 200, 300, 400])
wp.launch(check_math_conversions, dim=1, inputs=[s], device=device)
@wp.struct
class TestData:
value: wp.int32
@wp.func
def GetTestData(value: wp.int32):
return TestData(value * 2)
@wp.kernel
def test_return_struct(data: wp.array(dtype=wp.int32)):
tid = wp.tid()
data[tid] = GetTestData(tid).value
wp.expect_eq(data[tid], tid * 2)
@wp.struct
class ReturnStruct:
a: int
b: int
@wp.func
def test_return_func():
a = ReturnStruct(1, 2)
return a
@wp.kernel
def test_return():
t = test_return_func()
wp.expect_eq(t.a, 1)
wp.expect_eq(t.b, 2)
@wp.struct
class DefaultAttribNested:
f: float
@wp.struct
class DefaultAttribStruct:
i: int
d: wp.float64
v: wp.vec3
m: wp.mat22
a: wp.array(dtype=wp.int32)
s: DefaultAttribNested
@wp.func
def check_default_attributes_func(data: DefaultAttribStruct):
wp.expect_eq(data.i, wp.int32(0))
wp.expect_eq(data.d, wp.float64(0))
wp.expect_eq(data.v, wp.vec3(0.0, 0.0, 0.0))
wp.expect_eq(data.m, wp.mat22(0.0, 0.0, 0.0, 0.0))
wp.expect_eq(data.a.shape[0], 0)
wp.expect_eq(data.s.f, wp.float32(0.0))
@wp.kernel
def check_default_attributes_kernel(data: DefaultAttribStruct):
check_default_attributes_func(data)
# check structs default initialized in kernels correctly
@wp.kernel
def test_struct_default_attributes_kernel():
s = DefaultAttribStruct()
check_default_attributes_func(s)
@wp.struct
class MutableStruct:
param1: int
param2: float
@wp.kernel
def test_struct_mutate_attributes_kernel():
t = MutableStruct()
t.param1 = 1
t.param2 = 1.1
wp.expect_eq(t.param1, 1)
wp.expect_eq(t.param2, 1.1)
@wp.struct
class InnerStruct:
i: int
@wp.struct
class ArrayStruct:
array: wp.array(dtype=InnerStruct)
@wp.kernel
def struct2_reader(test: ArrayStruct):
k = wp.tid()
wp.expect_eq(k + 1, test.array[k].i)
def test_nested_array_struct(test, device):
var1 = InnerStruct()
var1.i = 1
var2 = InnerStruct()
var2.i = 2
struct = ArrayStruct()
struct.array = wp.array([var1, var2], dtype=InnerStruct, device=device)
wp.launch(struct2_reader, dim=2, inputs=[struct], device=device)
@wp.struct
class VecStruct:
value: wp.vec3
@wp.struct
class Bar2:
z: wp.array(dtype=float)
@wp.struct
class Foo2:
x: wp.array(dtype=float)
y: Bar2
def test_convert_to_device(test, device):
foo = Foo2()
foo.x = wp.array((1.23, 2.34), dtype=float, device=device)
foo.y = Bar2()
foo.y.z = wp.array((3.45, 4.56), dtype=float, device=device)
if device.is_cpu and wp.is_cuda_available():
dst_device = "cuda:0"
elif device.is_cuda and wp.is_cpu_available():
dst_device = "cpu"
else:
return
result = foo.to(dst_device)
assert result.x.device == dst_device
assert result.y.z.device == dst_device
@wp.struct
class EmptyNest1:
a: Empty
z: int
@wp.struct
class EmptyNest2:
a: Empty
b: Empty
z: int
@wp.struct
class EmptyNest3:
a: Empty
b: Empty
c: Empty
z: int
@wp.struct
class EmptyNest4:
a: Empty
b: Empty
c: Empty
d: Empty
z: int
@wp.struct
class EmptyNest5:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
z: int
@wp.struct
class EmptyNest6:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
f: Empty
z: int
@wp.struct
class EmptyNest7:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
f: Empty
g: Empty
z: int
@wp.struct
class EmptyNest8:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
f: Empty
g: Empty
h: Empty
z: int
@wp.kernel
def empty_nest_kernel(s: Any):
wp.expect_eq(s.z, 42)
wp.overload(empty_nest_kernel, [EmptyNest1])
wp.overload(empty_nest_kernel, [EmptyNest2])
wp.overload(empty_nest_kernel, [EmptyNest3])
wp.overload(empty_nest_kernel, [EmptyNest4])
wp.overload(empty_nest_kernel, [EmptyNest5])
wp.overload(empty_nest_kernel, [EmptyNest6])
wp.overload(empty_nest_kernel, [EmptyNest7])
wp.overload(empty_nest_kernel, [EmptyNest8])
def test_nested_empty_struct(test, device):
with wp.ScopedDevice(device):
e1 = EmptyNest1()
e1.z = 42
e2 = EmptyNest2()
e2.z = 42
e3 = EmptyNest3()
e3.z = 42
e4 = EmptyNest4()
e4.z = 42
e5 = EmptyNest5()
e5.z = 42
e6 = EmptyNest6()
e6.z = 42
e7 = EmptyNest7()
e7.z = 42
e8 = EmptyNest8()
e8.z = 42
wp.launch(empty_nest_kernel, dim=1, inputs=[e1])
wp.launch(empty_nest_kernel, dim=1, inputs=[e2])
wp.launch(empty_nest_kernel, dim=1, inputs=[e3])
wp.launch(empty_nest_kernel, dim=1, inputs=[e4])
wp.launch(empty_nest_kernel, dim=1, inputs=[e5])
wp.launch(empty_nest_kernel, dim=1, inputs=[e6])
wp.launch(empty_nest_kernel, dim=1, inputs=[e7])
wp.launch(empty_nest_kernel, dim=1, inputs=[e8])
wp.synchronize_device()
@wp.struct
class DependentModuleImport_A:
s: StructFromAnotherModule
@wp.struct
class DependentModuleImport_B:
s: StructFromAnotherModule
@wp.struct
class DependentModuleImport_C:
a: DependentModuleImport_A
b: DependentModuleImport_B
@wp.kernel
def test_dependent_module_import(c: DependentModuleImport_C):
wp.tid() # nop, we're just testing codegen
devices = get_test_devices()
class TestStruct(unittest.TestCase):
# check structs default initialized in Python correctly
def test_struct_default_attributes_python(self):
s = DefaultAttribStruct()
wp.launch(check_default_attributes_kernel, dim=1, inputs=[s])
def test_nested_vec_assignment(self):
v = VecStruct()
v.value[0] = 1.0
v.value[1] = 2.0
v.value[2] = 3.0
arr = wp.array([v], dtype=VecStruct)
expected = np.array(([1.0, 2.0, 3.0],))
assert np.all(arr.numpy().tolist() == expected)
add_function_test(TestStruct, "test_step", test_step, devices=devices)
add_function_test(TestStruct, "test_step_grad", test_step_grad, devices=devices)
add_kernel_test(TestStruct, kernel=test_empty, name="test_empty", dim=1, inputs=[Empty()], devices=devices)
add_kernel_test(
TestStruct,
kernel=test_uninitialized,
name="test_uninitialized",
dim=1,
inputs=[Uninitialized()],
devices=devices,
)
add_kernel_test(TestStruct, kernel=test_return, name="test_return", dim=1, inputs=[], devices=devices)
add_function_test(TestStruct, "test_nested_struct", test_nested_struct, devices=devices)
add_function_test(TestStruct, "test_nested_array_struct", test_nested_array_struct, devices=devices)
add_function_test(TestStruct, "test_convert_to_device", test_convert_to_device, devices=devices)
add_function_test(TestStruct, "test_nested_empty_struct", test_nested_empty_struct, devices=devices)
add_function_test(TestStruct, "test_struct_math_conversions", test_struct_math_conversions, devices=devices)
add_kernel_test(
TestStruct,
name="test_struct_default_attributes",
kernel=test_struct_default_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
add_kernel_test(
TestStruct,
name="test_struct_mutate_attributes",
kernel=test_struct_mutate_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
add_kernel_test(
TestStruct,
kernel=test_uninitialized,
name="test_uninitialized",
dim=1,
inputs=[Uninitialized()],
devices=devices,
)
add_kernel_test(TestStruct, kernel=test_return, name="test_return", dim=1, inputs=[], devices=devices)
add_function_test(TestStruct, "test_nested_struct", test_nested_struct, devices=devices)
add_function_test(TestStruct, "test_nested_array_struct", test_nested_array_struct, devices=devices)
add_function_test(TestStruct, "test_nested_empty_struct", test_nested_empty_struct, devices=devices)
add_function_test(TestStruct, "test_struct_math_conversions", test_struct_math_conversions, devices=devices)
add_kernel_test(
TestStruct,
name="test_struct_default_attributes",
kernel=test_struct_default_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
add_kernel_test(
TestStruct,
name="test_struct_mutate_attributes",
kernel=test_struct_mutate_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
for device in devices:
add_kernel_test(
TestStruct,
kernel=test_struct_instantiate,
name="test_struct_instantiate",
dim=1,
inputs=[wp.array([1], dtype=int, device=device)],
devices=[device],
)
add_kernel_test(
TestStruct,
kernel=test_return_struct,
name="test_return_struct",
dim=1,
inputs=[wp.zeros(10, dtype=int, device=device)],
devices=[device],
)
add_kernel_test(
TestStruct,
kernel=test_dependent_module_import,
name="test_dependent_module_import",
dim=1,
inputs=[DependentModuleImport_C()],
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 16,581 | Python | 22.38787 | 112 | 0.628189 |
NVIDIA/warp/warp/tests/test_import.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
import warp.tests.test_func as test_func
from warp.tests.unittest_utils import *
@wp.kernel
def test_import_func():
# test a cross-module function reference is resolved correctly
x = test_func.sqr(2.0)
y = test_func.cube(2.0)
wp.expect_eq(x, 4.0)
wp.expect_eq(y, 8.0)
devices = get_test_devices()
class TestImport(unittest.TestCase):
pass
add_kernel_test(TestImport, kernel=test_import_func, name="test_import_func", dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,027 | Python | 26.052631 | 101 | 0.740993 |
NVIDIA/warp/warp/tests/test_mempool.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
def get_device_pair_with_mempool_access_support():
devices = wp.get_cuda_devices()
for target_device in devices:
for peer_device in devices:
if target_device != peer_device:
if wp.is_mempool_access_supported(target_device, peer_device):
return (target_device, peer_device)
return None
def get_device_pair_without_mempool_access_support():
devices = wp.get_cuda_devices()
for target_device in devices:
for peer_device in devices:
if target_device != peer_device:
if not wp.is_mempool_access_supported(target_device, peer_device):
return (target_device, peer_device)
return None
def test_mempool_release_threshold(test, device):
device = wp.get_device(device)
assert device.is_mempool_supported
test.assertEqual(wp.is_mempool_supported(device), device.is_mempool_supported)
was_enabled = wp.is_mempool_enabled(device)
# toggle
wp.set_mempool_enabled(device, not was_enabled)
test.assertEqual(wp.is_mempool_enabled(device), not was_enabled)
# restore
wp.set_mempool_enabled(device, was_enabled)
test.assertEqual(wp.is_mempool_enabled(device), was_enabled)
saved_threshold = wp.get_mempool_release_threshold(device)
# set new absolute threshold
wp.set_mempool_release_threshold(device, 42000)
test.assertEqual(wp.get_mempool_release_threshold(device), 42000)
# set new fractional threshold
wp.set_mempool_release_threshold(device, 0.5)
test.assertEqual(wp.get_mempool_release_threshold(device), int(0.5 * device.total_memory))
# restore threshold
wp.set_mempool_release_threshold(device, saved_threshold)
test.assertEqual(wp.get_mempool_release_threshold(device), saved_threshold)
def test_mempool_exceptions(test, device):
device = wp.get_device(device)
assert not device.is_mempool_supported
if device.is_cuda:
expected_error = RuntimeError
else:
expected_error = ValueError
with test.assertRaises(expected_error):
wp.get_mempool_release_threshold(device)
with test.assertRaises(expected_error):
wp.set_mempool_release_threshold(device, 42000)
def test_mempool_access_self(test, device):
device = wp.get_device(device)
assert device.is_mempool_supported
# setting mempool access to self is a no-op
wp.set_mempool_access_enabled(device, device, True)
wp.set_mempool_access_enabled(device, device, False)
# should always be enabled
enabled = wp.is_mempool_access_enabled(device, device)
test.assertTrue(enabled)
@unittest.skipUnless(get_device_pair_with_mempool_access_support(), "Requires devices with mempool access support")
def test_mempool_access(test, _):
target_device, peer_device = get_device_pair_with_mempool_access_support()
was_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
if was_enabled:
# try disabling
wp.set_mempool_access_enabled(target_device, peer_device, False)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertFalse(is_enabled)
# try re-enabling
wp.set_mempool_access_enabled(target_device, peer_device, True)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertTrue(is_enabled)
else:
# try enabling
wp.set_mempool_access_enabled(target_device, peer_device, True)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertTrue(is_enabled)
# try re-disabling
wp.set_mempool_access_enabled(target_device, peer_device, False)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertFalse(is_enabled)
@unittest.skipUnless(
get_device_pair_without_mempool_access_support(), "Requires devices without mempool access support"
)
def test_mempool_access_exceptions_unsupported(test, _):
# get a CUDA device pair without mempool access support
target_device, peer_device = get_device_pair_without_mempool_access_support()
# querying is ok, but must return False
test.assertFalse(wp.is_mempool_access_enabled(target_device, peer_device))
# enabling should raise RuntimeError
with test.assertRaises(RuntimeError):
wp.set_mempool_access_enabled(target_device, peer_device, True)
# disabling should not raise an error
wp.set_mempool_access_enabled(target_device, peer_device, False)
@unittest.skipUnless(wp.is_cpu_available() and wp.is_cuda_available(), "Requires both CUDA and CPU devices")
def test_mempool_access_exceptions_cpu(test, _):
# querying is ok, but must return False
test.assertFalse(wp.is_mempool_access_enabled("cuda:0", "cpu"))
test.assertFalse(wp.is_mempool_access_enabled("cpu", "cuda:0"))
# enabling should raise ValueError
with test.assertRaises(ValueError):
wp.set_mempool_access_enabled("cpu", "cuda:0", True)
with test.assertRaises(ValueError):
wp.set_mempool_access_enabled("cuda:0", "cpu", True)
# disabling should not raise an error
wp.set_mempool_access_enabled("cpu", "cuda:0", False)
wp.set_mempool_access_enabled("cuda:0", "cpu", False)
class TestMempool(unittest.TestCase):
pass
devices_with_mempools = [d for d in get_test_devices() if d.is_mempool_supported]
devices_without_mempools = [d for d in get_test_devices() if not d.is_mempool_supported]
# test devices with mempool support
add_function_test(
TestMempool, "test_mempool_release_threshold", test_mempool_release_threshold, devices=devices_with_mempools
)
add_function_test(TestMempool, "test_mempool_access_self", test_mempool_access_self, devices=devices_with_mempools)
# test devices without mempool support
add_function_test(TestMempool, "test_mempool_exceptions", test_mempool_exceptions, devices=devices_without_mempools)
# mempool access tests
add_function_test(TestMempool, "test_mempool_access", test_mempool_access)
# mempool access exceptions
add_function_test(TestMempool, "test_mempool_access_exceptions_unsupported", test_mempool_access_exceptions_unsupported)
add_function_test(TestMempool, "test_mempool_access_exceptions_cpu", test_mempool_access_exceptions_cpu)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,901 | Python | 35.909091 | 120 | 0.721055 |
NVIDIA/warp/warp/tests/test_runlength_encode.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from functools import partial
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import runlength_encode
def test_runlength_encode_int(test, device, n):
rng = np.random.default_rng(123)
values_np = np.sort(rng.integers(-10, high=10, size=n, dtype=int))
unique_values_np, unique_counts_np = np.unique(values_np, return_counts=True)
values = wp.array(values_np, device=device, dtype=int)
unique_values = wp.empty_like(values)
unique_counts = wp.empty_like(values)
run_count = runlength_encode(values, unique_values, unique_counts)
test.assertEqual(run_count, len(unique_values_np))
assert_np_equal(unique_values.numpy()[:run_count], unique_values_np[:run_count])
assert_np_equal(unique_counts.numpy()[:run_count], unique_counts_np[:run_count])
def test_runlength_encode_error_insufficient_storage(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty(1, dtype=int, device=device)
run_lengths = wp.empty(123, dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Output array storage sizes must be at least equal to value_count$",
):
runlength_encode(values, run_values, run_lengths)
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty(123, dtype=int, device="cpu")
run_lengths = wp.empty(1, dtype=int, device="cpu")
with test.assertRaisesRegex(
RuntimeError,
r"Output array storage sizes must be at least equal to value_count$",
):
runlength_encode(values, run_values, run_lengths)
def test_runlength_encode_error_dtypes_mismatch(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty(123, dtype=float, device=device)
run_lengths = wp.empty_like(values, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"values and run_values data types do not match$",
):
runlength_encode(values, run_values, run_lengths)
def test_runlength_encode_error_run_length_unsupported_dtype(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty(123, dtype=int, device=device)
run_lengths = wp.empty(123, dtype=float, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"run_lengths array must be of type int32$",
):
runlength_encode(values, run_values, run_lengths)
def test_runlength_encode_error_run_count_unsupported_dtype(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty_like(values, device=device)
run_lengths = wp.empty_like(values, device=device)
run_count = wp.empty(shape=(1,), dtype=float, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"run_count array must be of type int32$",
):
runlength_encode(values, run_values, run_lengths, run_count=run_count)
def test_runlength_encode_error_unsupported_dtype(test, device):
values = wp.zeros(123, dtype=float, device=device)
run_values = wp.empty(123, dtype=float, device=device)
run_lengths = wp.empty(123, dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Unsupported data type$",
):
runlength_encode(values, run_values, run_lengths)
devices = get_test_devices()
class TestRunlengthEncode(unittest.TestCase):
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_runlength_encode_error_devices_mismatch(self):
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cuda:0")
run_lengths = wp.empty_like(values, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
runlength_encode(values, run_values, run_lengths)
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cpu")
run_lengths = wp.empty_like(values, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
runlength_encode(values, run_values, run_lengths)
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cuda:0")
run_lengths = wp.empty_like(values, device="cpu")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
runlength_encode(values, run_values, run_lengths)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_runlength_encode_error_run_count_device_mismatch(self):
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cpu")
run_lengths = wp.empty_like(values, device="cpu")
run_count = wp.empty(shape=(1,), dtype=int, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"run_count storage device does not match other arrays$",
):
runlength_encode(values, run_values, run_lengths, run_count=run_count)
add_function_test(
TestRunlengthEncode, "test_runlength_encode_int", partial(test_runlength_encode_int, n=100), devices=devices
)
add_function_test(
TestRunlengthEncode, "test_runlength_encode_empty", partial(test_runlength_encode_int, n=0), devices=devices
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_insufficient_storage",
test_runlength_encode_error_insufficient_storage,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_dtypes_mismatch",
test_runlength_encode_error_dtypes_mismatch,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_run_length_unsupported_dtype",
test_runlength_encode_error_run_length_unsupported_dtype,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_run_count_unsupported_dtype",
test_runlength_encode_error_run_count_unsupported_dtype,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_unsupported_dtype",
test_runlength_encode_error_unsupported_dtype,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,954 | Python | 35.798942 | 112 | 0.688668 |
NVIDIA/warp/warp/tests/test_print.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_print_kernel():
wp.print(1.0)
wp.print("this is a string")
wp.printf("this is a float %f\n", 457.5)
wp.printf("this is an int %d\n", 123)
def test_print(test, device):
wp.load_module(device=device)
capture = StdOutCapture()
capture.begin()
wp.launch(kernel=test_print_kernel, dim=1, inputs=[], device=device)
wp.synchronize_device(device)
s = capture.end()
# We skip the win32 comparison for now since the capture sometimes is an empty string
if sys.platform != "win32":
test.assertRegex(
s,
rf"1{os.linesep}"
rf"this is a string{os.linesep}"
rf"this is a float 457\.500000{os.linesep}"
rf"this is an int 123",
)
class TestPrint(unittest.TestCase):
pass
devices = get_test_devices()
add_function_test(TestPrint, "test_print", test_print, devices=devices, check_output=False)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,536 | Python | 27.999999 | 91 | 0.686198 |
NVIDIA/warp/warp/tests/test_lerp.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from dataclasses import dataclass
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@dataclass
class TestData:
a: Any
b: Any
t: float
expected: Any
expected_adj_a: Any = None
expected_adj_b: Any = None
expected_adj_t: float = None
def check_backwards(self):
return self.expected_adj_a is not None and self.expected_adj_b is not None and self.expected_adj_t is not None
TEST_DATA = {
wp.float32: (
TestData(
a=1.0,
b=5.0,
t=0.75,
expected=4.0,
expected_adj_a=0.25,
expected_adj_b=0.75,
expected_adj_t=4.0,
),
TestData(
a=-2.0,
b=5.0,
t=0.25,
expected=-0.25,
expected_adj_a=0.75,
expected_adj_b=0.25,
expected_adj_t=7.0,
),
TestData(
a=1.23,
b=2.34,
t=0.5,
expected=1.785,
expected_adj_a=0.5,
expected_adj_b=0.5,
expected_adj_t=1.11,
),
),
wp.vec2: (
TestData(
a=[1, 2],
b=[3, 4],
t=0.5,
expected=[2, 3],
),
),
wp.vec3: (
TestData(
a=[1, 2, 3],
b=[3, 4, 5],
t=0.5,
expected=[2, 3, 4],
),
),
wp.vec4: (
TestData(
a=[1, 2, 3, 4],
b=[3, 4, 5, 6],
t=0.5,
expected=[2, 3, 4, 5],
),
),
wp.mat22: (
TestData(
a=[[1, 2], [2, 1]],
b=[[3, 4], [4, 3]],
t=0.5,
expected=[[2, 3], [3, 2]],
),
),
wp.mat33: (
TestData(
a=[[1, 2, 3], [3, 1, 2], [2, 3, 1]],
b=[[3, 4, 5], [5, 3, 4], [4, 5, 3]],
t=0.5,
expected=[[2, 3, 4], [4, 2, 3], [3, 4, 2]],
),
),
wp.mat44: (
TestData(
a=[[1, 2, 3, 4], [4, 1, 2, 3], [3, 4, 1, 2], [2, 3, 4, 1]],
b=[[3, 4, 5, 6], [6, 3, 4, 5], [5, 6, 3, 4], [4, 5, 6, 3]],
t=0.5,
expected=[[2, 3, 4, 5], [5, 2, 3, 4], [4, 5, 2, 3], [3, 4, 5, 2]],
),
),
wp.quat: (
TestData(
a=[1, 2, 3, 4],
b=[3, 4, 5, 6],
t=0.5,
expected=[2, 3, 4, 5],
),
),
wp.transform: (
TestData(
a=[1, 2, 3, 4, 5, 6, 7],
b=[3, 4, 5, 6, 7, 8, 9],
t=0.5,
expected=[2, 3, 4, 5, 6, 7, 8],
),
),
wp.spatial_vector: (
TestData(
a=[1, 2, 3, 4, 5, 6],
b=[3, 4, 5, 6, 7, 8],
t=0.5,
expected=[2, 3, 4, 5, 6, 7],
),
),
wp.spatial_matrix: (
TestData(
a=[
[1, 2, 3, 4, 5, 6],
[6, 1, 2, 3, 4, 5],
[5, 6, 1, 2, 3, 4],
[4, 5, 6, 1, 2, 3],
[3, 4, 5, 6, 1, 2],
[2, 3, 4, 5, 6, 1],
],
b=[
[3, 4, 5, 6, 7, 8],
[8, 3, 4, 5, 6, 7],
[7, 8, 3, 4, 5, 6],
[6, 7, 8, 3, 4, 5],
[5, 6, 7, 8, 3, 4],
[4, 5, 6, 7, 8, 3],
],
t=0.5,
expected=[
[2, 3, 4, 5, 6, 7],
[7, 2, 3, 4, 5, 6],
[6, 7, 2, 3, 4, 5],
[5, 6, 7, 2, 3, 4],
[4, 5, 6, 7, 2, 3],
[3, 4, 5, 6, 7, 2],
],
),
),
}
def test_lerp(test, device):
def make_kernel_fn(data_type):
def fn(
a: wp.array(dtype=data_type),
b: wp.array(dtype=data_type),
t: wp.array(dtype=float),
out: wp.array(dtype=data_type),
):
out[0] = wp.lerp(a[0], b[0], t[0])
return fn
for data_type in TEST_DATA:
kernel_fn = make_kernel_fn(data_type)
kernel = wp.Kernel(func=kernel_fn, key=f"test_lerp_{data_type.__name__}_kernel")
with test.subTest(data_type=data_type):
for test_data in TEST_DATA[data_type]:
a = wp.array([test_data.a], dtype=data_type, device=device, requires_grad=True)
b = wp.array([test_data.b], dtype=data_type, device=device, requires_grad=True)
t = wp.array([test_data.t], dtype=float, device=device, requires_grad=True)
out = wp.array(
[0] * wp.types.type_length(data_type), dtype=data_type, device=device, requires_grad=True
)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[a, b, t, out], device=device)
assert_np_equal(out.numpy(), np.array([test_data.expected]), tol=1e-6)
if test_data.check_backwards():
tape.backward(out)
assert_np_equal(tape.gradients[a].numpy(), np.array([test_data.expected_adj_a]), tol=1e-6)
assert_np_equal(tape.gradients[b].numpy(), np.array([test_data.expected_adj_b]), tol=1e-6)
assert_np_equal(tape.gradients[t].numpy(), np.array([test_data.expected_adj_t]), tol=1e-6)
devices = get_test_devices()
class TestLerp(unittest.TestCase):
pass
add_function_test(TestLerp, "test_lerp", test_lerp, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,038 | Python | 26.701835 | 118 | 0.424644 |
NVIDIA/warp/warp/tests/test_special_values.py | # Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
import warp as wp
from warp.tests.unittest_utils import *
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def test_infinity_scalar(test, device, dtype, register_kernels=False):
def check_infinity(outputs: wp.array(dtype=dtype), bool_outputs: wp.array(dtype=wp.bool)):
outputs[0] = dtype(wp.inf)
outputs[1] = dtype(-wp.inf)
outputs[2] = dtype(2.0 * wp.inf)
outputs[3] = dtype(-2.0 * wp.inf)
outputs[4] = dtype(2.0 / 0.0)
outputs[5] = dtype(-2.0 / 0.0)
outputs[6] = wp.log(dtype(0))
outputs[7] = wp.exp(dtype(800))
# Fill out bool outputs
bool_outputs[0] = wp.isinf(dtype(wp.inf))
bool_outputs[1] = wp.isfinite(dtype(wp.inf))
bool_outputs[2] = wp.isinf(dtype(-wp.inf))
bool_outputs[3] = wp.isfinite(dtype(-wp.inf))
bool_outputs[4] = wp.isinf(dtype(0))
bool_outputs[5] = wp.isinf(wp.exp(dtype(800)))
kernel = getkernel(check_infinity, suffix=dtype.__name__)
if register_kernels:
return
outputs = wp.empty(8, dtype=dtype, device=device)
outputs_bool = wp.empty(6, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[], outputs=[outputs, outputs_bool], device=device)
outputs_cpu = outputs.to("cpu").list()
test.assertEqual(outputs_cpu[0], math.inf)
test.assertEqual(outputs_cpu[1], -math.inf)
test.assertEqual(outputs_cpu[2], math.inf)
test.assertEqual(outputs_cpu[3], -math.inf)
test.assertEqual(outputs_cpu[4], math.inf)
test.assertEqual(outputs_cpu[5], -math.inf)
test.assertEqual(outputs_cpu[6], -math.inf)
test.assertEqual(outputs_cpu[7], math.inf)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isinf(wp.inf) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isfinite(wp.inf) is not False")
test.assertTrue(outputs_bool_cpu[2], "wp.isinf(-wp.inf) is not True")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(-wp.inf) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(0) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isinf(wp.exp(800)) is not True")
def test_nan_scalar(test, device, dtype, register_kernels=False):
def check_nan(outputs: wp.array(dtype=dtype), bool_outputs: wp.array(dtype=wp.bool)):
outputs[0] = dtype(wp.nan)
outputs[1] = dtype(-wp.nan)
outputs[2] = dtype(2.0 * wp.nan)
outputs[3] = dtype(2.0 + wp.nan)
outputs[4] = dtype(0.0 / 0.0)
outputs[5] = wp.sqrt(dtype(-1))
outputs[6] = wp.log(dtype(-1))
outputs[7] = dtype(wp.inf) - dtype(wp.inf)
# Fill out bool outputs
bool_outputs[0] = dtype(wp.nan) == dtype(wp.nan)
bool_outputs[1] = dtype(wp.nan) != dtype(wp.nan)
bool_outputs[2] = dtype(wp.nan) == dtype(1)
bool_outputs[3] = dtype(wp.nan) != dtype(1)
bool_outputs[4] = wp.isnan(wp.nan)
bool_outputs[5] = wp.isnan(dtype(0.0))
bool_outputs[6] = wp.isnan(dtype(wp.inf))
bool_outputs[7] = dtype(wp.nan) > dtype(1)
bool_outputs[8] = dtype(wp.nan) >= dtype(1)
bool_outputs[9] = dtype(wp.nan) < dtype(1)
bool_outputs[10] = dtype(wp.nan) <= dtype(1)
bool_outputs[11] = dtype(wp.nan) > dtype(wp.nan)
bool_outputs[12] = dtype(wp.nan) >= dtype(wp.nan)
bool_outputs[13] = dtype(wp.nan) < dtype(wp.nan)
bool_outputs[14] = dtype(wp.nan) <= dtype(wp.nan)
bool_outputs[15] = wp.isfinite(dtype(wp.nan))
bool_outputs[16] = wp.isinf(dtype(wp.nan))
kernel = getkernel(check_nan, suffix=dtype.__name__)
if register_kernels:
return
outputs = wp.empty(8, dtype=dtype, device=device)
outputs_bool = wp.empty(17, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[], outputs=[outputs, outputs_bool], device=device)
outputs_cpu = outputs.to("cpu").list()
test.assertTrue(math.isnan(outputs_cpu[0]), "wp.nan is not NaN")
test.assertTrue(math.isnan(outputs_cpu[1]), "-wp.nan is not NaN")
test.assertTrue(math.isnan(outputs_cpu[2]), "2.0*wp.nan is not NaN")
test.assertTrue(math.isnan(outputs_cpu[3]), "2.0+wp.nan is not NaN ")
test.assertTrue(math.isnan(outputs_cpu[4]), "0.0/0.0 is not NaN")
test.assertTrue(math.isnan(outputs_cpu[5]), "Sqrt of a negative number is not NaN")
test.assertTrue(math.isnan(outputs_cpu[6]), "Log of a negative number is not NaN")
test.assertTrue(math.isnan(outputs_cpu[7]), "Subtracting infinity from infinity is not NaN")
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertFalse(outputs_bool_cpu[0], "wp.nan == wp.nan is not False")
test.assertTrue(outputs_bool_cpu[1], "wp.nan != wp.nan is not True")
test.assertFalse(outputs_bool_cpu[2], "wp.nan == 1 is not False")
test.assertTrue(outputs_bool_cpu[3], "wp.nan != 1 is not True")
test.assertTrue(outputs_bool_cpu[4], "isnan(wp.nan) is not True")
test.assertFalse(outputs_bool_cpu[5], "isnan(0.0) is not False")
test.assertFalse(outputs_bool_cpu[6], "isnan(wp.inf) is not False")
test.assertFalse(outputs_bool_cpu[7], "wp.nan > 1 is not False")
test.assertFalse(outputs_bool_cpu[8], "wp.nan >= 1 is not False")
test.assertFalse(outputs_bool_cpu[9], "wp.nan < 1 is not False")
test.assertFalse(outputs_bool_cpu[10], "wp.nan <= 1 is not False")
test.assertFalse(outputs_bool_cpu[11], "wp.nan > wp.nan is not False")
test.assertFalse(outputs_bool_cpu[12], "wp.nan >= wp.nan is not False")
test.assertFalse(outputs_bool_cpu[13], "wp.nan < wp.nan is not False")
test.assertFalse(outputs_bool_cpu[14], "wp.nan <= wp.nan is not False")
test.assertFalse(outputs_bool_cpu[15], "wp.isfinite(wp.nan) is not False")
test.assertFalse(outputs_bool_cpu[16], "wp.isinf(wp.nan) is not False")
def test_is_special_vec(test, device, dtype, register_kernels=False):
vector_type = wp.types.vector(5, dtype)
def check_special_vec(bool_outputs: wp.array(dtype=wp.bool)):
zeros_vector = vector_type()
bool_outputs[0] = wp.isfinite(zeros_vector)
bool_outputs[1] = wp.isinf(zeros_vector)
bool_outputs[2] = wp.isnan(zeros_vector)
nan_vector = vector_type()
nan_vector[0] = dtype(wp.NAN)
bool_outputs[3] = wp.isfinite(nan_vector)
bool_outputs[4] = wp.isinf(nan_vector)
bool_outputs[5] = wp.isnan(nan_vector)
inf_vector = vector_type()
inf_vector[0] = dtype(wp.inf)
bool_outputs[6] = wp.isfinite(inf_vector)
bool_outputs[7] = wp.isinf(inf_vector)
bool_outputs[8] = wp.isnan(inf_vector)
kernel = getkernel(check_special_vec, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(9, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(zeros_vector) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(zeros_vector) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(zeros_vector) is not False")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(nan_vector) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(nan_vector) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isnan(nan_vector) is not True")
test.assertFalse(outputs_bool_cpu[6], "wp.isfinite(inf_vector) is not False")
test.assertTrue(outputs_bool_cpu[7], "wp.isinf(inf_vector) is not True")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(inf_vector) is not False")
def test_is_special_mat(test, device, dtype, register_kernels=False):
mat_type = wp.types.matrix((5, 5), dtype)
def check_special_mat(bool_outputs: wp.array(dtype=wp.bool)):
zeros_mat = mat_type()
bool_outputs[0] = wp.isfinite(zeros_mat)
bool_outputs[1] = wp.isinf(zeros_mat)
bool_outputs[2] = wp.isnan(zeros_mat)
nan_mat = mat_type()
nan_mat[0, 0] = dtype(wp.NAN)
bool_outputs[3] = wp.isfinite(nan_mat)
bool_outputs[4] = wp.isinf(nan_mat)
bool_outputs[5] = wp.isnan(nan_mat)
inf_mat = mat_type()
inf_mat[0, 0] = dtype(wp.inf)
bool_outputs[6] = wp.isfinite(inf_mat)
bool_outputs[7] = wp.isinf(inf_mat)
bool_outputs[8] = wp.isnan(inf_mat)
kernel = getkernel(check_special_mat, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(9, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(zeros_mat) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(zeros_mat) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(zeros_mat) is not False")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(nan_mat) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(nan_mat) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isnan(nan_mat) is not True")
test.assertFalse(outputs_bool_cpu[6], "wp.isfinite(inf_mat) is not False")
test.assertTrue(outputs_bool_cpu[7], "wp.isinf(inf_mat) is not True")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(inf_mat) is not False")
def test_is_special_quat(test, device, dtype, register_kernels=False):
quat_type = wp.types.quaternion(dtype)
def check_special_quat(bool_outputs: wp.array(dtype=wp.bool)):
zeros_quat = quat_type()
bool_outputs[0] = wp.isfinite(zeros_quat)
bool_outputs[1] = wp.isinf(zeros_quat)
bool_outputs[2] = wp.isnan(zeros_quat)
nan_quat = quat_type(dtype(wp.NAN), dtype(0), dtype(0), dtype(0))
bool_outputs[3] = wp.isfinite(nan_quat)
bool_outputs[4] = wp.isinf(nan_quat)
bool_outputs[5] = wp.isnan(nan_quat)
inf_quat = quat_type(dtype(wp.INF), dtype(0), dtype(0), dtype(0))
bool_outputs[6] = wp.isfinite(inf_quat)
bool_outputs[7] = wp.isinf(inf_quat)
bool_outputs[8] = wp.isnan(inf_quat)
kernel = getkernel(check_special_quat, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(9, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(zeros_quat) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(zeros_quat) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(zeros_quat) is not False")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(nan_quat) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(nan_quat) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isnan(nan_quat) is not True")
test.assertFalse(outputs_bool_cpu[6], "wp.isfinite(inf_quat) is not False")
test.assertTrue(outputs_bool_cpu[7], "wp.isinf(inf_quat) is not True")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(inf_quat) is not False")
def test_is_special_int(test, device, dtype, register_kernels=False):
vector_type = wp.types.vector(5, dtype)
matrix_type = wp.types.matrix((5, 5), dtype)
quat_type = wp.types.quaternion(dtype)
def check_is_special_int(bool_outputs: wp.array(dtype=wp.bool)):
bool_outputs[0] = wp.isfinite(dtype(0))
bool_outputs[1] = wp.isnan(dtype(0))
bool_outputs[2] = wp.isinf(dtype(0))
bool_outputs[3] = wp.isfinite(vector_type())
bool_outputs[4] = wp.isnan(vector_type())
bool_outputs[5] = wp.isinf(vector_type())
bool_outputs[6] = wp.isfinite(matrix_type())
bool_outputs[7] = wp.isnan(matrix_type())
bool_outputs[8] = wp.isinf(matrix_type())
bool_outputs[9] = wp.isfinite(quat_type())
bool_outputs[10] = wp.isnan(quat_type())
bool_outputs[11] = wp.isinf(quat_type())
kernel = getkernel(check_is_special_int, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(12, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(0) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(0) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(0) is not False")
test.assertTrue(outputs_bool_cpu[3], "wp.isfinite(vec) is not True")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(vec) is not False")
test.assertFalse(outputs_bool_cpu[5], "wp.isnan(vec) is not False")
test.assertTrue(outputs_bool_cpu[6], "wp.isfinite(matrix) is not True")
test.assertFalse(outputs_bool_cpu[7], "wp.isinf(matrix) is not False")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(matrix) is not False")
test.assertTrue(outputs_bool_cpu[9], "wp.isfinite(quat) is not True")
test.assertFalse(outputs_bool_cpu[10], "wp.isinf(quat) is not False")
test.assertFalse(outputs_bool_cpu[11], "wp.isnan(quat) is not False")
devices = get_test_devices()
class TestSpecialValues(unittest.TestCase):
pass
for dtype in [wp.float16, wp.float32, wp.float64]:
add_function_test_register_kernel(
TestSpecialValues, f"test_infinity_{dtype.__name__}", test_infinity_scalar, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_nan_{dtype.__name__}", test_nan_scalar, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_vec_{dtype.__name__}", test_is_special_vec, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_mat_{dtype.__name__}", test_is_special_mat, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_quat_{dtype.__name__}", test_is_special_quat, devices=devices, dtype=dtype
)
# Ensure functions like wp.isfinite work on integer types
for dtype in wp.types.int_types:
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_int_{dtype.__name__}", test_is_special_int, devices=devices, dtype=dtype
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 15,163 | Python | 40.774105 | 119 | 0.65429 |
NVIDIA/warp/warp/tests/test_model.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.sim import ModelBuilder
from warp.tests.unittest_utils import *
class TestModel(unittest.TestCase):
def test_add_triangles(self):
rng = np.random.default_rng(123)
pts = np.array(
[
[-0.00585869, 0.34189449, -1.17415233],
[-1.894547, 0.1788074, 0.9251329],
[-1.26141048, 0.16140787, 0.08823282],
[-0.08609255, -0.82722546, 0.65995427],
[0.78827592, -1.77375711, -0.55582718],
]
)
tris = np.array([[0, 3, 4], [0, 2, 3], [2, 1, 3], [1, 4, 3]])
builder1 = ModelBuilder()
builder2 = ModelBuilder()
for pt in pts:
builder1.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
builder2.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
# test add_triangle(s) with default arguments:
areas = builder2.add_triangles(tris[:, 0], tris[:, 1], tris[:, 2])
for i, t in enumerate(tris):
area = builder1.add_triangle(t[0], t[1], t[2])
self.assertAlmostEqual(area, areas[i], places=6)
# test add_triangle(s) with non default arguments:
tri_ke = rng.standard_normal(size=pts.shape[0])
tri_ka = rng.standard_normal(size=pts.shape[0])
tri_kd = rng.standard_normal(size=pts.shape[0])
tri_drag = rng.standard_normal(size=pts.shape[0])
tri_lift = rng.standard_normal(size=pts.shape[0])
for i, t in enumerate(tris):
builder1.add_triangle(
t[0],
t[1],
t[2],
tri_ke[i],
tri_ka[i],
tri_kd[i],
tri_drag[i],
tri_lift[i],
)
builder2.add_triangles(tris[:, 0], tris[:, 1], tris[:, 2], tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
assert_np_equal(np.array(builder1.tri_indices), np.array(builder2.tri_indices))
assert_np_equal(np.array(builder1.tri_poses), np.array(builder2.tri_poses), tol=1.0e-6)
assert_np_equal(np.array(builder1.tri_activations), np.array(builder2.tri_activations))
assert_np_equal(np.array(builder1.tri_materials), np.array(builder2.tri_materials))
def test_add_edges(self):
rng = np.random.default_rng(123)
pts = np.array(
[
[-0.00585869, 0.34189449, -1.17415233],
[-1.894547, 0.1788074, 0.9251329],
[-1.26141048, 0.16140787, 0.08823282],
[-0.08609255, -0.82722546, 0.65995427],
[0.78827592, -1.77375711, -0.55582718],
]
)
edges = np.array([[0, 4, 3, 1], [3, 2, 4, 1]])
builder1 = ModelBuilder()
builder2 = ModelBuilder()
for pt in pts:
builder1.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
builder2.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
# test defaults:
for i in range(2):
builder1.add_edge(edges[i, 0], edges[i, 1], edges[i, 2], edges[i, 3])
builder2.add_edges(edges[:, 0], edges[:, 1], edges[:, 2], edges[:, 3])
# test non defaults:
rest = rng.standard_normal(size=2)
edge_ke = rng.standard_normal(size=2)
edge_kd = rng.standard_normal(size=2)
for i in range(2):
builder1.add_edge(edges[i, 0], edges[i, 1], edges[i, 2], edges[i, 3], rest[i], edge_ke[i], edge_kd[i])
builder2.add_edges(edges[:, 0], edges[:, 1], edges[:, 2], edges[:, 3], rest, edge_ke, edge_kd)
assert_np_equal(np.array(builder1.edge_indices), np.array(builder2.edge_indices))
assert_np_equal(np.array(builder1.edge_rest_angle), np.array(builder2.edge_rest_angle), tol=1.0e-4)
assert_np_equal(np.array(builder1.edge_bending_properties), np.array(builder2.edge_bending_properties))
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 4,409 | Python | 39.458715 | 114 | 0.576321 |
NVIDIA/warp/warp/tests/test_modules_lite.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
devices = get_test_devices()
class TestModuleLite(unittest.TestCase):
def test_module_lite_load(self):
# Load current module
wp.load_module()
# Load named module
wp.load_module(wp.config)
# Load named module (string)
wp.load_module(wp.config, recursive=True)
def test_module_lite_options(self):
wp.set_module_options({"max_unroll": 8})
module_options = wp.get_module_options()
self.assertIsInstance(module_options, dict)
self.assertEqual(module_options["max_unroll"], 8)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,158 | Python | 30.324324 | 76 | 0.709845 |
NVIDIA/warp/warp/tests/aux_test_conditional_unequal_types_kernels.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This file defines a kernel that fails on codegen.py"""
import warp as wp
@wp.kernel
def unequal_types_kernel():
x = wp.int32(10)
y = 10
z = True
# Throws a TypeError
if x == y == z:
pass
| 646 | Python | 28.40909 | 76 | 0.733746 |
NVIDIA/warp/warp/tests/test_rand.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_kernel(
kernel_seed: int,
int_a: wp.array(dtype=int),
int_ab: wp.array(dtype=int),
float_01: wp.array(dtype=float),
float_ab: wp.array(dtype=float),
):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
int_a[tid] = wp.randi(state)
int_ab[tid] = wp.randi(state, 0, 100)
float_01[tid] = wp.randf(state)
float_ab[tid] = wp.randf(state, 0.0, 100.0)
def test_rand(test, device):
N = 10
int_a_device = wp.zeros(N, dtype=int, device=device)
int_a_host = wp.zeros(N, dtype=int, device="cpu")
int_ab_device = wp.zeros(N, dtype=int, device=device)
int_ab_host = wp.zeros(N, dtype=int, device="cpu")
float_01_device = wp.zeros(N, dtype=float, device=device)
float_01_host = wp.zeros(N, dtype=float, device="cpu")
float_ab_device = wp.zeros(N, dtype=float, device=device)
float_ab_host = wp.zeros(N, dtype=float, device="cpu")
seed = 42
wp.launch(
kernel=test_kernel,
dim=N,
inputs=[seed, int_a_device, int_ab_device, float_01_device, float_ab_device],
outputs=[],
device=device,
)
wp.copy(int_a_host, int_a_device)
wp.copy(int_ab_host, int_ab_device)
wp.copy(float_01_host, float_01_device)
wp.copy(float_ab_host, float_ab_device)
wp.synchronize_device(device)
int_a = int_a_host.numpy()
int_ab = int_ab_host.numpy()
float_01 = float_01_host.numpy()
float_ab = float_ab_host.numpy()
int_a_true = np.array(
[
-575632308,
59537738,
1898992239,
442961864,
-1069147335,
-478445524,
1803659809,
2122909397,
-1888556360,
334603718,
]
)
int_ab_true = np.array([46, 58, 46, 83, 85, 39, 72, 99, 18, 41])
float_01_true = np.array(
[
0.72961855,
0.86200964,
0.28770837,
0.8187722,
0.186335,
0.6101239,
0.56432086,
0.70428324,
0.64812654,
0.27679986,
]
)
float_ab_true = np.array(
[96.04259, 73.33809, 63.601555, 38.647305, 71.813896, 64.65809, 77.79791, 46.579605, 94.614456, 91.921814]
)
test.assertTrue((int_a == int_a_true).all())
test.assertTrue((int_ab == int_ab_true).all())
err = np.max(np.abs(float_01 - float_01_true))
test.assertTrue(err < 1e-04)
err = np.max(np.abs(float_ab - float_ab_true))
test.assertTrue(err < 1e-04)
@wp.kernel
def sample_cdf_kernel(kernel_seed: int, cdf: wp.array(dtype=float), samples: wp.array(dtype=int)):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
samples[tid] = wp.sample_cdf(state, cdf)
def test_sample_cdf(test, device):
seed = 42
cdf = np.arange(0.0, 1.0, 0.01, dtype=float)
cdf = cdf * cdf
cdf = wp.array(cdf, dtype=float, device=device)
num_samples = 1000
samples = wp.zeros(num_samples, dtype=int, device=device)
wp.launch(kernel=sample_cdf_kernel, dim=num_samples, inputs=[seed, cdf, samples], device=device)
# histogram should be linear
# plt.hist(samples.numpy())
# plt.show()
@wp.kernel
def sampling_kernel(
kernel_seed: int,
triangle_samples: wp.array(dtype=wp.vec2),
square_samples: wp.array(dtype=wp.vec2),
ring_samples: wp.array(dtype=wp.vec2),
disk_samples: wp.array(dtype=wp.vec2),
sphere_surface_samples: wp.array(dtype=wp.vec3),
sphere_samples: wp.array(dtype=wp.vec3),
hemisphere_surface_samples: wp.array(dtype=wp.vec3),
hemisphere_samples: wp.array(dtype=wp.vec3),
cube_samples: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
triangle_samples[tid] = wp.sample_triangle(state)
ring_samples[tid] = wp.sample_unit_ring(state)
disk_samples[tid] = wp.sample_unit_disk(state)
sphere_surface_samples[tid] = wp.sample_unit_sphere_surface(state)
sphere_samples[tid] = wp.sample_unit_sphere(state)
hemisphere_surface_samples[tid] = wp.sample_unit_hemisphere_surface(state)
hemisphere_samples[tid] = wp.sample_unit_hemisphere(state)
square_samples[tid] = wp.sample_unit_square(state)
cube_samples[tid] = wp.sample_unit_cube(state)
def test_sampling_methods(test, device):
seed = 42
num_samples = 100
triangle_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
square_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
ring_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
disk_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
sphere_surface_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
sphere_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
hemisphere_surface_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
hemisphere_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
cube_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
wp.launch(
kernel=sampling_kernel,
dim=num_samples,
inputs=[
seed,
triangle_samples,
square_samples,
ring_samples,
disk_samples,
sphere_surface_samples,
sphere_samples,
hemisphere_surface_samples,
hemisphere_samples,
cube_samples,
],
device=device,
)
# bounds check
test.assertTrue((triangle_samples.numpy()[:, 0] <= 1.0).all())
test.assertTrue((triangle_samples.numpy()[:, 0] >= 0.0).all())
test.assertTrue((triangle_samples.numpy()[:, 1] >= 0.0).all())
test.assertTrue((triangle_samples.numpy()[:, 1] >= 0.0).all())
test.assertTrue((square_samples.numpy()[:, 0] >= -0.5).all())
test.assertTrue((square_samples.numpy()[:, 0] <= 1.5).all())
test.assertTrue((square_samples.numpy()[:, 1] >= -0.5).all())
test.assertTrue((square_samples.numpy()[:, 1] <= 0.5).all())
test.assertTrue((cube_samples.numpy()[:, 0] >= -0.5).all())
test.assertTrue((cube_samples.numpy()[:, 0] <= 0.5).all())
test.assertTrue((cube_samples.numpy()[:, 1] >= -0.5).all())
test.assertTrue((cube_samples.numpy()[:, 1] <= 0.5).all())
test.assertTrue((cube_samples.numpy()[:, 2] >= -0.5).all())
test.assertTrue((cube_samples.numpy()[:, 2] <= 0.5).all())
test.assertTrue((hemisphere_surface_samples.numpy()[:, 2] >= 0.0).all())
test.assertTrue((hemisphere_samples.numpy()[:, 2] >= 0.0).all())
test.assertTrue((np.linalg.norm(ring_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(disk_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(sphere_surface_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(sphere_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(hemisphere_surface_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(hemisphere_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
@wp.kernel
def sample_poisson_kernel(
kernel_seed: int, poisson_samples_low: wp.array(dtype=wp.uint32), poisson_samples_high: wp.array(dtype=wp.uint32)
):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
x = wp.poisson(state, 3.0)
y = wp.poisson(state, 42.0)
poisson_samples_low[tid] = x
poisson_samples_high[tid] = y
def test_poisson(test, device):
seed = 13
N = 20000
poisson_low = wp.zeros(N, dtype=wp.uint32, device=device)
poisson_high = wp.zeros(N, dtype=wp.uint32, device=device)
wp.launch(kernel=sample_poisson_kernel, dim=N, inputs=[seed, poisson_low, poisson_high], device=device)
# bins = np.arange(100)
# _ = plt.hist(poisson_high.numpy(), bins)
# plt.show()
rng = np.random.default_rng(seed)
np_poisson_low = rng.poisson(lam=3.0, size=N)
np_poisson_high = rng.poisson(lam=42.0, size=N)
poisson_low_mean = np.mean(poisson_low.numpy())
np_poisson_low_mean = np.mean(np_poisson_low)
poisson_high_mean = np.mean(poisson_high.numpy())
np_poisson_high_mean = np.mean(np_poisson_high)
poisson_low_std = np.std(poisson_low.numpy())
np_poisson_low_std = np.std(np_poisson_low)
poisson_high_std = np.std(poisson_high.numpy())
np_poisson_high_std = np.std(np_poisson_high)
# compare basic distribution characteristics
test.assertTrue(np.abs(poisson_low_mean - np_poisson_low_mean) <= 5e-1)
test.assertTrue(np.abs(poisson_high_mean - np_poisson_high_mean) <= 5e-1)
test.assertTrue(np.abs(poisson_low_std - np_poisson_low_std) <= 2e-1)
test.assertTrue(np.abs(poisson_high_std - np_poisson_high_std) <= 2e-1)
devices = get_test_devices()
class TestRand(unittest.TestCase):
pass
add_function_test(TestRand, "test_rand", test_rand, devices=devices)
add_function_test(TestRand, "test_sample_cdf", test_sample_cdf, devices=devices)
add_function_test(TestRand, "test_sampling_methods", test_sampling_methods, devices=devices)
add_function_test(TestRand, "test_poisson", test_poisson, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 9,760 | Python | 33.369718 | 117 | 0.636373 |
NVIDIA/warp/warp/tests/disabled_kinematics.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
import warp as wp
import warp.sim
from warp.tests.unittest_utils import *
def build_ant(num_envs):
builder = wp.sim.ModelBuilder()
for i in range(num_envs):
wp.sim.parse_mjcf(
os.path.join(os.path.dirname(__file__), "../../examples/assets/nv_ant.xml"),
builder,
up_axis="y",
)
coord_count = 15
dof_count = 14
coord_start = i * coord_count
dof_start = i * dof_count
# base
builder.joint_q[coord_start : coord_start + 3] = [i * 2.0, 0.70, 0.0]
builder.joint_q[coord_start + 3 : coord_start + 7] = wp.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5)
# joints
builder.joint_q[coord_start + 7 : coord_start + coord_count] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
builder.joint_qd[dof_start + 6 : dof_start + dof_count] = [1.0, 1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0]
return builder
def build_complex_joint_mechanism(chain_length):
builder = wp.sim.ModelBuilder()
com0 = wp.vec3(1.0, 2.0, 3.0)
com1 = wp.vec3(4.0, 5.0, 6.0)
com2 = wp.vec3(7.0, 8.0, 9.0)
ax0 = wp.normalize(wp.vec3(-1.0, 2.0, 3.0))
ax1 = wp.normalize(wp.vec3(4.0, -1.0, 2.0))
ax2 = wp.normalize(wp.vec3(-3.0, 4.0, -1.0))
# declare some transforms with nonzero translation and orientation
tf0 = wp.transform(wp.vec3(1.0, 2.0, 3.0), wp.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.25))
tf1 = wp.transform(wp.vec3(4.0, 5.0, 6.0), wp.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi * 0.5))
tf2 = wp.transform(wp.vec3(7.0, 8.0, 9.0), wp.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.75))
parent = -1
for _i in range(chain_length):
b0 = builder.add_body(com=com0)
builder.add_joint_fixed(parent=parent, child=b0, parent_xform=tf1, child_xform=tf0)
assert builder.articulation_count == 1
b1 = builder.add_body(com=com1)
builder.add_joint_revolute(parent=b0, child=b1, parent_xform=tf1, child_xform=tf2, axis=ax1)
builder.joint_q[-1] = 0.3
builder.joint_qd[-1] = 1.0
b2 = builder.add_body(com=com2)
builder.add_joint_universal(parent=b1, child=b2, parent_xform=tf2, child_xform=tf0, axis_0=ax0, axis_1=ax1)
builder.joint_q[-2:] = [0.3, 0.5]
builder.joint_qd[-2:] = [1.0, -1.0]
b3 = builder.add_body(com=com0)
builder.add_joint_ball(parent=b2, child=b3, parent_xform=tf0, child_xform=tf1)
builder.joint_q[-4:] = list(wp.quat_from_axis_angle(ax0, 0.7))
builder.joint_qd[-3:] = [1.0, -0.6, 1.5]
b4 = builder.add_body(com=com1)
builder.add_joint_compound(
parent=b3,
child=b4,
parent_xform=tf2,
child_xform=tf1,
axis_0=(0, 0, 1),
axis_1=(1, 0, 0),
axis_2=(0, 1, 0),
)
builder.joint_q[-3:] = [0.3, 0.5, 0.27]
builder.joint_qd[-3:] = [1.23, -1.0, 0.5]
b5 = builder.add_body(com=com2)
builder.add_joint_prismatic(
parent=b4,
child=b5,
parent_xform=tf2,
child_xform=tf0,
axis=ax0,
)
builder.joint_q[-1] = 0.92
builder.joint_qd[-1] = -0.63
b6 = builder.add_body(com=com0)
builder.add_joint_d6(
parent=b5,
child=b6,
parent_xform=tf0,
child_xform=tf2,
linear_axes=[ax0, ax1, wp.cross(ax0, ax1)],
angular_axes=[ax1, ax2, wp.cross(ax1, ax2)],
)
builder.joint_q[-6:] = [0.3, 0.5, 0.7, 0.9, 1.1, 1.3]
builder.joint_qd[-6:] = [1.0, -1.0, 0.5, 0.8, -0.3, 0.1]
b7 = builder.add_body(com=com1)
builder.add_joint_free(
parent=b6,
child=b7,
parent_xform=tf1,
child_xform=tf2,
)
builder.joint_q[-7:] = [0.5, -0.9, 1.4] + list(wp.quat_rpy(0.3, -0.5, 0.7))
builder.joint_qd[-6:] = [1.0, -1.0, 0.5, 0.8, -0.3, 0.1]
b8 = builder.add_body(com=com2)
builder.add_joint_distance(
parent=b7,
child=b8,
parent_xform=tf1,
child_xform=tf2,
)
builder.joint_q[-7:] = [-0.3, -0.7, 0.2] + list(wp.quat_rpy(0.1, 0.1, 0.4))
builder.joint_qd[-6:] = [-0.34, 0.5, -0.6, -0.4, 0.2, 0.1]
# D6 joint that behaves like a fixed joint
b9 = builder.add_body(com=com0)
builder.add_joint_d6(
parent=b8,
child=b9,
parent_xform=tf0,
child_xform=tf2,
linear_axes=[],
angular_axes=[],
)
b10 = builder.add_body(com=com0)
builder.add_joint_d6(
parent=b9,
child=b10,
parent_xform=tf1,
child_xform=tf2,
linear_axes=[ax1],
angular_axes=[ax2, ax0],
)
builder.joint_q[-3:] = [0.3, 0.5, 0.7]
builder.joint_qd[-3:] = [1.0, -1.0, 0.5]
b11 = builder.add_body(com=com1)
builder.add_joint_d6(
parent=b10,
child=b11,
parent_xform=tf1,
child_xform=tf2,
linear_axes=[ax1, ax0, wp.cross(ax1, ax0)],
angular_axes=[],
)
builder.joint_q[-3:] = [0.3, 0.5, 0.7]
builder.joint_qd[-3:] = [1.0, -1.0, 0.5]
b12 = builder.add_body(com=com2)
builder.add_joint_d6(
parent=b11,
child=b12,
parent_xform=tf1,
child_xform=tf2,
linear_axes=[],
angular_axes=[ax1, ax2, wp.cross(ax1, ax2)],
)
builder.joint_q[-3:] = [0.3, 0.5, 0.7]
builder.joint_qd[-3:] = [1.0, -1.0, 0.5]
parent = b12
return builder
def check_fk_ik(builder, device):
model = builder.finalize(device)
state = model.state()
q_fk = model.joint_q.numpy()
qd_fk = model.joint_qd.numpy()
wp.sim.eval_fk(model, model.joint_q, model.joint_qd, None, state)
q_ik = wp.zeros_like(model.joint_q)
qd_ik = wp.zeros_like(model.joint_qd)
wp.sim.eval_ik(model, state, q_ik, qd_ik)
# adjust numpy print settings
# np.set_printoptions(precision=4, floatmode="fixed", suppress=True)
# print("q:")
# print(np.array(q_fk))
# print(q_ik.numpy())
# print("qd:")
# print(np.array(qd_fk))
# print(qd_ik.numpy())
assert_np_equal(q_ik.numpy(), q_fk, tol=1e-4)
assert_np_equal(qd_ik.numpy(), qd_fk, tol=1e-4)
def test_fk_ik_ant(test, device):
builder = build_ant(3)
check_fk_ik(builder, device)
def test_fk_ik_complex_joint_mechanism(test, device):
builder = build_complex_joint_mechanism(2)
check_fk_ik(builder, device)
devices = get_test_devices()
class TestKinematics(unittest.TestCase):
pass
add_function_test(TestKinematics, "test_fk_ik_ant", test_fk_ik_ant, devices=devices)
add_function_test(
TestKinematics, "test_fk_ik_complex_joint_mechanism", test_fk_ik_complex_joint_mechanism, devices=devices
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 7,602 | Python | 30.945378 | 117 | 0.551697 |
NVIDIA/warp/warp/tests/test_array_reduce.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import array_inner, array_sum
def make_test_array_sum(dtype):
N = 1000
def test_array_sum(test, device):
rng = np.random.default_rng(123)
cols = wp.types.type_length(dtype)
values_np = rng.random(size=(N, cols))
values = wp.array(values_np, device=device, dtype=dtype)
vsum = array_sum(values)
ref_vsum = values_np.sum(axis=0)
assert_np_equal(vsum / N, ref_vsum / N, 0.0001)
return test_array_sum
def make_test_array_sum_axis(dtype):
I = 5
J = 10
K = 2
N = I * J * K
def test_array_sum(test, device):
rng = np.random.default_rng(123)
values_np = rng.random(size=(I, J, K))
values = wp.array(values_np, shape=(I, J, K), device=device, dtype=dtype)
for axis in range(3):
vsum = array_sum(values, axis=axis)
ref_vsum = values_np.sum(axis=axis)
assert_np_equal(vsum.numpy() / N, ref_vsum / N, 0.0001)
return test_array_sum
def test_array_sum_empty(test, device):
values = wp.array([], device=device, dtype=wp.vec2)
assert_np_equal(array_sum(values), np.zeros(2))
values = wp.array([], shape=(0, 3), device=device, dtype=float)
assert_np_equal(array_sum(values, axis=0).numpy(), np.zeros((1, 3)))
def make_test_array_inner(dtype):
N = 1000
def test_array_inner(test, device):
rng = np.random.default_rng(123)
cols = wp.types.type_length(dtype)
a_np = rng.random(size=(N, cols))
b_np = rng.random(size=(N, cols))
a = wp.array(a_np, device=device, dtype=dtype)
b = wp.array(b_np, device=device, dtype=dtype)
ab = array_inner(a, b)
ref_ab = np.dot(a_np.flatten(), b_np.flatten())
test.assertAlmostEqual(ab / N, ref_ab / N, places=5)
return test_array_inner
def make_test_array_inner_axis(dtype):
I = 5
J = 10
K = 2
N = I * J * K
def test_array_inner(test, device):
rng = np.random.default_rng(123)
a_np = rng.random(size=(I, J, K))
b_np = rng.random(size=(I, J, K))
a = wp.array(a_np, shape=(I, J, K), device=device, dtype=dtype)
b = wp.array(b_np, shape=(I, J, K), device=device, dtype=dtype)
ab = array_inner(a, b, axis=0)
ref_ab = np.einsum(a_np, [0, 1, 2], b_np, [0, 1, 2], [1, 2])
assert_np_equal(ab.numpy() / N, ref_ab / N, 0.0001)
ab = array_inner(a, b, axis=1)
ref_ab = np.einsum(a_np, [0, 1, 2], b_np, [0, 1, 2], [0, 2])
assert_np_equal(ab.numpy() / N, ref_ab / N, 0.0001)
ab = array_inner(a, b, axis=2)
ref_ab = np.einsum(a_np, [0, 1, 2], b_np, [0, 1, 2], [0, 1])
assert_np_equal(ab.numpy() / N, ref_ab / N, 0.0001)
return test_array_inner
def test_array_inner_empty(test, device):
values = wp.array([], device=device, dtype=wp.vec2)
test.assertEqual(array_inner(values, values), 0.0)
values = wp.array([], shape=(0, 3), device=device, dtype=float)
assert_np_equal(array_inner(values, values, axis=0).numpy(), np.zeros((1, 3)))
devices = get_test_devices()
class TestArrayReduce(unittest.TestCase):
pass
add_function_test(TestArrayReduce, "test_array_sum_double", make_test_array_sum(wp.float64), devices=devices)
add_function_test(TestArrayReduce, "test_array_sum_vec3", make_test_array_sum(wp.vec3), devices=devices)
add_function_test(TestArrayReduce, "test_array_sum_axis_float", make_test_array_sum_axis(wp.float32), devices=devices)
add_function_test(TestArrayReduce, "test_array_sum_empty", test_array_sum_empty, devices=devices)
add_function_test(TestArrayReduce, "test_array_inner_double", make_test_array_inner(wp.float64), devices=devices)
add_function_test(TestArrayReduce, "test_array_inner_vec3", make_test_array_inner(wp.vec3), devices=devices)
add_function_test(
TestArrayReduce, "test_array_inner_axis_float", make_test_array_inner_axis(wp.float32), devices=devices
)
add_function_test(TestArrayReduce, "test_array_inner_empty", test_array_inner_empty, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 4,684 | Python | 30.442953 | 118 | 0.639197 |
NVIDIA/warp/warp/tests/test_operators.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_operators_scalar_float():
a = 1.0
b = 2.0
c = a * b
d = a + b
e = a / b
f = a - b
g = b**8.0
h = 10.0 // 3.0
expect_eq(c, 2.0)
expect_eq(d, 3.0)
expect_eq(e, 0.5)
expect_eq(f, -1.0)
expect_eq(g, 256.0)
expect_eq(h, 3.0)
@wp.kernel
def test_operators_scalar_int():
a = 1
b = 2
c = a * b
d = a + b
e = a / b
f = a - b
# g = b**8 # integer pow not implemented
h = 10 // 3
i = 10 % 3
j = 2 << 3
k = 16 >> 1
expect_eq(c, 2)
expect_eq(d, 3)
expect_eq(e, 0)
expect_eq(f, -1)
# expect_eq(g, 256)
expect_eq(h, 3)
expect_eq(i, 1)
expect_eq(j, 16)
expect_eq(k, 8)
f0 = wp.uint32(1 << 0)
f1 = wp.uint32(1 << 3)
expect_eq(f0 | f1, f0 + f1)
expect_eq(f0 & f1, wp.uint32(0))
l = wp.uint8(0)
for n in range(8):
l |= wp.uint8(1 << n)
expect_eq(l, ~wp.uint8(0))
@wp.kernel
def test_operators_vector_index():
v = wp.vec4(1.0, 2.0, 3.0, 4.0)
expect_eq(v[0], 1.0)
expect_eq(v[1], 2.0)
expect_eq(v[2], 3.0)
expect_eq(v[3], 4.0)
@wp.kernel
def test_operators_matrix_index():
m22 = wp.mat22(1.0, 2.0, 3.0, 4.0)
expect_eq(m22[0, 0], 1.0)
expect_eq(m22[0, 1], 2.0)
expect_eq(m22[1, 0], 3.0)
expect_eq(m22[1, 1], 4.0)
@wp.kernel
def test_operators_vec3():
v = vec3(1.0, 2.0, 3.0)
r0 = v * 3.0
r1 = 3.0 * v
expect_eq(r0, vec3(3.0, 6.0, 9.0))
expect_eq(r1, vec3(3.0, 6.0, 9.0))
col0 = vec3(1.0, 0.0, 0.0)
col1 = vec3(0.0, 2.0, 0.0)
col2 = vec3(0.0, 0.0, 3.0)
m = mat33(col0, col1, col2)
expect_eq(m * vec3(1.0, 0.0, 0.0), col0)
expect_eq(m * vec3(0.0, 1.0, 0.0), col1)
expect_eq(m * vec3(0.0, 0.0, 1.0), col2)
two = vec3(1.0) * 2.0
expect_eq(two, vec3(2.0, 2.0, 2.0))
@wp.kernel
def test_operators_vec4():
v = vec4(1.0, 2.0, 3.0, 4.0)
r0 = v * 3.0
r1 = 3.0 * v
expect_eq(r0, vec4(3.0, 6.0, 9.0, 12.0))
expect_eq(r1, vec4(3.0, 6.0, 9.0, 12.0))
col0 = vec4(1.0, 0.0, 0.0, 0.0)
col1 = vec4(0.0, 2.0, 0.0, 0.0)
col2 = vec4(0.0, 0.0, 3.0, 0.0)
col3 = vec4(0.0, 0.0, 0.0, 4.0)
m = mat44(col0, col1, col2, col3)
expect_eq(m * vec4(1.0, 0.0, 0.0, 0.0), col0)
expect_eq(m * vec4(0.0, 1.0, 0.0, 0.0), col1)
expect_eq(m * vec4(0.0, 0.0, 1.0, 0.0), col2)
expect_eq(m * vec4(0.0, 0.0, 0.0, 1.0), col3)
two = vec4(1.0) * 2.0
expect_eq(two, vec4(2.0, 2.0, 2.0, 2.0))
@wp.kernel
def test_operators_mat22():
m = mat22(1.0, 2.0, 3.0, 4.0)
r = mat22(3.0, 6.0, 9.0, 12.0)
r0 = m * 3.0
r1 = 3.0 * m
expect_eq(r0, r)
expect_eq(r1, r)
expect_eq(r0[0, 0], 3.0)
expect_eq(r0[0, 1], 6.0)
expect_eq(r0[1, 0], 9.0)
expect_eq(r0[1, 1], 12.0)
expect_eq(r0[0], wp.vec2(3.0, 6.0))
expect_eq(r0[1], wp.vec2(9.0, 12.0))
@wp.kernel
def test_operators_mat33():
m = mat33(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
r = mat33(3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0)
r0 = m * 3.0
r1 = 3.0 * m
expect_eq(r0, r)
expect_eq(r1, r)
expect_eq(r0[0, 0], 3.0)
expect_eq(r0[0, 1], 6.0)
expect_eq(r0[0, 2], 9.0)
expect_eq(r0[1, 0], 12.0)
expect_eq(r0[1, 1], 15.0)
expect_eq(r0[1, 2], 18.0)
expect_eq(r0[2, 0], 21.0)
expect_eq(r0[2, 1], 24.0)
expect_eq(r0[2, 2], 27.0)
expect_eq(r0[0], wp.vec3(3.0, 6.0, 9.0))
expect_eq(r0[1], wp.vec3(12.0, 15.0, 18.0))
expect_eq(r0[2], wp.vec3(21.0, 24.0, 27.0))
@wp.kernel
def test_operators_mat44():
m = mat44(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0)
r = mat44(3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0, 36.0, 39.0, 42.0, 45.0, 48.0)
r0 = m * 3.0
r1 = 3.0 * m
expect_eq(r0, r)
expect_eq(r1, r)
expect_eq(r0[0, 0], 3.0)
expect_eq(r0[0, 1], 6.0)
expect_eq(r0[0, 2], 9.0)
expect_eq(r0[0, 3], 12.0)
expect_eq(r0[1, 0], 15.0)
expect_eq(r0[1, 1], 18.0)
expect_eq(r0[1, 2], 21.0)
expect_eq(r0[1, 3], 24.0)
expect_eq(r0[2, 0], 27.0)
expect_eq(r0[2, 1], 30.0)
expect_eq(r0[2, 2], 33.0)
expect_eq(r0[2, 3], 36.0)
expect_eq(r0[3, 0], 39.0)
expect_eq(r0[3, 1], 42.0)
expect_eq(r0[3, 2], 45.0)
expect_eq(r0[3, 3], 48.0)
expect_eq(r0[0], wp.vec4(3.0, 6.0, 9.0, 12.0))
expect_eq(r0[1], wp.vec4(15.0, 18.0, 21.0, 24.0))
expect_eq(r0[2], wp.vec4(27.0, 30.0, 33.0, 36.0))
expect_eq(r0[3], wp.vec4(39.0, 42.0, 45.0, 48.0))
devices = get_test_devices()
class TestOperators(unittest.TestCase):
pass
add_kernel_test(TestOperators, test_operators_scalar_float, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_scalar_int, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_matrix_index, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vector_index, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vec3, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vec4, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat22, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat33, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat44, dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,972 | Python | 22.987952 | 106 | 0.55576 |
NVIDIA/warp/warp/tests/aux_test_reference_reference.py | # This file is used to test reloading module references.
import warp as wp
@wp.func
def more_magic():
return 2.0
| 120 | Python | 12.444443 | 56 | 0.708333 |
NVIDIA/warp/warp/tests/test_codegen.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_rename():
a = 0
b = 1
a = b
a = 2
wp.expect_eq(a, 2)
wp.expect_eq(b, 1)
@wp.kernel
def test_inplace():
a = 1.0
a += 2.0
wp.expect_eq(a, 3.0)
@wp.kernel
def test_constant(c: float):
a = 0.0
a = c + 1.0
wp.expect_eq(a, 2.0)
@wp.kernel
def test_dynamic_for_rename(n: int):
f0 = int(0.0)
f1 = int(1.0)
for _i in range(0, n):
f = f0 + f1
f0 = f1
f1 = f
wp.expect_eq(f1, 89)
@wp.kernel
def test_dynamic_for_inplace(n: int):
a = float(0.0)
for _i in range(0, n):
a += 1.0
wp.expect_eq(a, float(n))
@wp.kernel
def test_reassign():
f0 = 1.0
f1 = f0
f1 = f1 + 2.0
wp.expect_eq(f1, 3.0)
wp.expect_eq(f0, 1.0)
@wp.kernel
def test_dynamic_reassign(n: int):
f0 = wp.vec3()
f1 = f0
for _i in range(0, n):
f1 = f1 - wp.vec3(2.0, 0.0, 0.0)
wp.expect_eq(f1, wp.vec3(-4.0, 0.0, 0.0))
wp.expect_eq(f0, wp.vec3())
@wp.kernel
def test_range_static_sum(result: wp.array(dtype=int)):
a = int(0)
for _i in range(10):
a = a + 1
b = int(0)
for _i in range(0, 10):
b = b + 1
c = int(0)
for _i in range(0, 20, 2):
c = c + 1
result[0] = a
result[1] = b
result[2] = c
@wp.kernel
def test_range_dynamic_sum(start: int, end: int, step: int, result: wp.array(dtype=int)):
a = int(0)
for _i in range(end):
a = a + 1
b = int(0)
for _i in range(start, end):
b = b + 1
c = int(0)
for _i in range(start, end * step, step):
c = c + 1
d = int(0)
for _i in range(end * step, start, -step):
d = d + 1
result[0] = a
result[1] = b
result[2] = c
result[3] = d
@wp.kernel
def test_range_dynamic(start: int, end: int, step: int, result: wp.array(dtype=int)):
output = int(0)
for i in range(start, end, step):
result[output] = i
output += 1
@wp.kernel
def test_range_dynamic_nested(n: int):
sum1 = float(0.0)
sum2 = float(0.0)
sum3 = float(0.0)
for _i in range(n):
sum1 = sum1 + 1.0
sum3 = sum3 + 1.0
for _j in range(n):
sum2 = sum2 + 1.0
sum3 = sum3 + 1.0
sum3 = sum3 + 1.0
wp.expect_eq(sum1, float(n))
wp.expect_eq(sum2, float(n * n))
wp.expect_eq(sum3, float(n * n + 2 * n))
@wp.kernel
def test_while(n: int):
i = int(0)
while i < n:
i = i + 1
wp.expect_eq(i, n)
@wp.kernel
def test_pass(n: int):
i = int(0)
while i < n:
if False:
pass
else:
i = i + 1
wp.expect_eq(i, n)
@wp.kernel
def test_break(n: int):
a = int(0)
for _i in range(0, n):
if a == 5:
break
a += 1
wp.expect_eq(a, 5)
@wp.kernel
def test_break_early(n: int):
a = int(0)
for i in range(0, n):
if i > 5:
a = 1
break
wp.expect_eq(a, 1)
@wp.kernel
def test_break_unroll():
a = int(0)
for i in range(0, 10):
if i > 5:
a = i
break
wp.expect_eq(a, 6)
@wp.kernel
def test_break_while():
a = int(0)
while a < 10:
if a > 5:
break
a += 1
wp.expect_eq(a, 6)
@wp.kernel
def test_break_multiple(n: int):
a = int(0)
for i in range(0, n):
if i == 6:
a = 1
break
if i == 5:
a = 2
break
if i == 7:
a = 3
break
wp.expect_eq(a, 2)
@wp.kernel
def test_continue(n: int):
a = int(0)
for i in range(0, n):
if i == 5:
continue
a += 1
wp.expect_eq(a, n - 1)
@wp.kernel
def test_continue_unroll():
a = int(0)
for i in range(0, 10):
if i == 5:
continue
a += 1
wp.expect_eq(a, 9)
lower = wp.constant(-3)
upper = wp.constant(3)
step = wp.constant(2)
# test unrolling of loops with constant size params
# we can't easily test if unrolling has occurred
# so just verify correctness at this stage
@wp.kernel
def test_range_constant():
s = 0
for i in range(upper):
s += i
# sum [0, 3)
wp.expect_eq(s, 3)
s = 0
for i in range(lower, upper):
s += i
# sum [-3, 3)
wp.expect_eq(s, -3)
s = 0
for i in range(lower, upper, step):
s += i
# sum [-3, 3)
wp.expect_eq(s, -3)
N = wp.constant(3)
# test a dynamic loop nested between loops expected to be unrolled.
@wp.kernel
def test_range_constant_dynamic_nested(m: int):
s = int(0)
for _i in range(N):
for _k in range(m):
for _j in range(N):
s += 1
wp.expect_eq(s, N * m * N)
@wp.kernel
def test_range_expression():
idx = 1
batch_size = 100
a = wp.float(0.0)
c = wp.float(1.0)
# constant expression with a function
for _i in range(4 * idx, wp.min(4 * idx + 4, batch_size)):
a += c
for _i in range(4 * idx, min(4 * idx + 4, batch_size)):
a += c
tid = wp.tid()
# dynamic expression with a function
for _i in range(4 * idx, wp.min(4 * idx, tid + 1000)):
a += c
for _i in range(4 * idx, min(4 * idx, tid + 1000)):
a += c
wp.expect_eq(a, 8.0)
def test_unresolved_func(test, device):
# kernel with unresolved function must be in a separate module, otherwise the current module would fail to load
from warp.tests.aux_test_unresolved_func import unresolved_func_kernel
# ensure that an appropriate exception is raised when the bad module gets loaded
with test.assertRaisesRegex(RuntimeError, "Could not find function wp.missing_func"):
wp.launch(unresolved_func_kernel, dim=1, inputs=[], device=device)
# remove all references to the bad module so that subsequent calls to wp.force_load()
# won't try to load it unless we explicitly re-import it again
del wp.context.user_modules["warp.tests.aux_test_unresolved_func"]
del sys.modules["warp.tests.aux_test_unresolved_func"]
def test_unresolved_symbol(test, device):
# kernel with unresolved symbol must be in a separate module, otherwise the current module would fail to load
from warp.tests.aux_test_unresolved_symbol import unresolved_symbol_kernel
# ensure that an appropriate exception is raised when the bad module gets loaded
with test.assertRaisesRegex(KeyError, "Referencing undefined symbol: missing_symbol"):
wp.launch(unresolved_symbol_kernel, dim=1, inputs=[], device=device)
# remove all references to the bad module so that subsequent calls to wp.force_load()
# won't try to load it unless we explicitly re-import it again
del wp.context.user_modules["warp.tests.aux_test_unresolved_symbol"]
del sys.modules["warp.tests.aux_test_unresolved_symbol"]
def test_error_global_var(test, device):
arr = wp.array(
(1.0, 2.0, 3.0),
dtype=float,
device=device,
)
def kernel_1_fn(
out: wp.array(dtype=float),
):
out[0] = arr[0]
def kernel_2_fn(
out: wp.array(dtype=float),
):
out[0] = arr
def kernel_3_fn(
out: wp.array(dtype=float),
):
out[0] = wp.lower_bound(arr, 2.0)
out = wp.empty_like(arr)
kernel = wp.Kernel(func=kernel_1_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Cannot reference a global variable from a kernel unless `wp.constant\(\)` is being used",
):
wp.launch(kernel, dim=out.shape, inputs=(), outputs=(out,))
kernel = wp.Kernel(func=kernel_2_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Cannot reference a global variable from a kernel unless `wp.constant\(\)` is being used",
):
wp.launch(kernel, dim=out.shape, inputs=(), outputs=(out,))
kernel = wp.Kernel(func=kernel_3_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Cannot reference a global variable from a kernel unless `wp.constant\(\)` is being used",
):
wp.launch(kernel, dim=out.shape, inputs=(), outputs=(out,))
class TestCodeGen(unittest.TestCase):
pass
devices = get_test_devices()
add_kernel_test(TestCodeGen, name="test_inplace", kernel=test_inplace, dim=1, devices=devices)
add_kernel_test(TestCodeGen, name="test_rename", kernel=test_rename, dim=1, devices=devices)
add_kernel_test(TestCodeGen, name="test_constant", kernel=test_constant, inputs=[1.0], dim=1, devices=devices)
add_kernel_test(
TestCodeGen, name="test_dynamic_for_rename", kernel=test_dynamic_for_rename, inputs=[10], dim=1, devices=devices
)
add_kernel_test(
TestCodeGen,
name="test_dynamic_for_inplace",
kernel=test_dynamic_for_inplace,
inputs=[10],
dim=1,
devices=devices,
)
add_kernel_test(TestCodeGen, name="test_reassign", kernel=test_reassign, dim=1, devices=devices)
add_kernel_test(
TestCodeGen, name="test_dynamic_reassign", kernel=test_dynamic_reassign, inputs=[2], dim=1, devices=devices
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_forward",
kernel=test_range_dynamic,
dim=1,
inputs=[0, 4, 1],
expect=[0, 1, 2, 3],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_reverse",
kernel=test_range_dynamic,
dim=1,
inputs=[4, 0, -1],
expect=[4, 3, 2, 1],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_forward_step",
kernel=test_range_dynamic,
dim=1,
inputs=[0, 8, 2],
expect=[0, 2, 4, 6],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_reverse_step",
kernel=test_range_dynamic,
dim=1,
inputs=[8, 0, -2],
expect=[8, 6, 4, 2],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_static_sum",
kernel=test_range_static_sum,
dim=1,
expect=[10, 10, 10],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_sum",
kernel=test_range_dynamic_sum,
dim=1,
inputs=[0, 10, 2],
expect=[10, 10, 10, 10],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_sum_zero",
kernel=test_range_dynamic_sum,
dim=1,
inputs=[0, 0, 1],
expect=[0, 0, 0, 0],
devices=devices,
)
add_kernel_test(TestCodeGen, name="test_range_constant", kernel=test_range_constant, dim=1, devices=devices)
add_kernel_test(
TestCodeGen,
name="test_range_constant_dynamic_nested",
kernel=test_range_constant_dynamic_nested,
dim=1,
inputs=[10],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_nested",
kernel=test_range_dynamic_nested,
dim=1,
inputs=[4],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_expression",
kernel=test_range_expression,
dim=1,
devices=devices,
)
add_kernel_test(TestCodeGen, name="test_while_zero", kernel=test_while, dim=1, inputs=[0], devices=devices)
add_kernel_test(TestCodeGen, name="test_while_positive", kernel=test_while, dim=1, inputs=[16], devices=devices)
add_kernel_test(TestCodeGen, name="test_pass", kernel=test_pass, dim=1, inputs=[16], devices=devices)
add_kernel_test(TestCodeGen, name="test_break", kernel=test_break, dim=1, inputs=[10], devices=devices)
add_kernel_test(TestCodeGen, name="test_break_early", kernel=test_break_early, dim=1, inputs=[10], devices=devices)
add_kernel_test(TestCodeGen, name="test_break_unroll", kernel=test_break_unroll, dim=1, devices=devices)
add_kernel_test(TestCodeGen, name="test_break_while", kernel=test_break_while, dim=1, devices=devices)
add_kernel_test(
TestCodeGen, name="test_break_multiple", kernel=test_break_multiple, dim=1, inputs=[10], devices=devices
)
add_kernel_test(TestCodeGen, name="test_continue", kernel=test_continue, dim=1, inputs=[10], devices=devices)
add_kernel_test(TestCodeGen, name="test_continue_unroll", kernel=test_continue_unroll, dim=1, devices=devices)
add_function_test(TestCodeGen, func=test_unresolved_func, name="test_unresolved_func", devices=devices)
add_function_test(TestCodeGen, func=test_unresolved_symbol, name="test_unresolved_symbol", devices=devices)
add_function_test(TestCodeGen, func=test_error_global_var, name="test_error_global_var", devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 12,967 | Python | 22.033748 | 116 | 0.608391 |
NVIDIA/warp/warp/tests/test_fabricarray.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# types to test fabric arrays
_fabric_types = [
*wp.types.scalar_types,
*[wp.types.vector(2, T) for T in wp.types.scalar_types],
*[wp.types.vector(3, T) for T in wp.types.scalar_types],
*[wp.types.vector(4, T) for T in wp.types.scalar_types],
*[wp.types.matrix((2, 2), T) for T in wp.types.scalar_types],
*[wp.types.matrix((3, 3), T) for T in wp.types.scalar_types],
*[wp.types.matrix((4, 4), T) for T in wp.types.scalar_types],
*[wp.types.quaternion(T) for T in wp.types.float_types],
]
def _warp_type_to_fabric(dtype, is_array=False):
scalar_map = {
wp.bool: "b",
wp.int8: "i1",
wp.int16: "i2",
wp.int32: "i4",
wp.int64: "i8",
wp.uint8: "u1",
wp.uint16: "u2",
wp.uint32: "u4",
wp.uint64: "u8",
wp.float16: "f2",
wp.float32: "f4",
wp.float64: "f8",
}
if hasattr(dtype, "_wp_scalar_type_"):
type_str = scalar_map[dtype._wp_scalar_type_]
if len(dtype._shape_) == 1:
role = "vector"
else:
role = "matrix"
else:
type_str = scalar_map[dtype]
role = ""
if is_array:
array_depth = 1
else:
array_depth = 0
return (True, type_str, dtype._length_, array_depth, role)
# returns a fabric array interface constructed from a regular array
def _create_fabric_array_interface(data: wp.array, attrib: str, bucket_sizes: list = None, copy=False):
assert isinstance(data, wp.array)
assert data.ndim == 1
assert isinstance(attrib, str)
if copy:
data = wp.clone(data)
if bucket_sizes is not None:
assert hasattr(bucket_sizes, "__len__")
# verify total size
total_size = 0
for bucket_size in bucket_sizes:
total_size += bucket_size
if total_size != data.size:
raise RuntimeError("Bucket sizes don't add up to the size of data array")
elif data.size > 0:
rng = np.random.default_rng(123)
# generate random bucket sizes
bucket_min = 1
bucket_max = math.ceil(0.5 * data.size)
total_size = data.size
size_remaining = total_size
bucket_sizes = []
while size_remaining >= bucket_max:
bucket_size = rng.integers(bucket_min, high=bucket_max, dtype=int)
bucket_sizes.append(bucket_size)
size_remaining -= bucket_size
if size_remaining > 0:
bucket_sizes.append(size_remaining)
else:
# empty data array
bucket_sizes = []
dtype_size = wp.types.type_size_in_bytes(data.dtype)
p = int(data.ptr) if data.ptr else 0
pointers = []
counts = []
for bucket_size in bucket_sizes:
pointers.append(p)
counts.append(bucket_size)
p += bucket_size * dtype_size
attrib_info = {}
attrib_info["type"] = _warp_type_to_fabric(data.dtype)
attrib_info["access"] = 2 # ReadWrite
attrib_info["pointers"] = pointers
attrib_info["counts"] = counts
iface = {}
iface["version"] = 1
iface["device"] = str(data.device)
iface["attribs"] = {attrib: attrib_info}
iface["_ref"] = data # backref to keep the array alive
return iface
# returns a fabric array array interface constructed from a list of regular arrays
def _create_fabric_array_array_interface(data: list, attrib: str, bucket_sizes: list = None):
# data should be a list of arrays
assert isinstance(data, list)
num_arrays = len(data)
assert num_arrays > 0
device = data[0].device
dtype = data[0].dtype
assert isinstance(attrib, str)
if bucket_sizes is not None:
assert hasattr(bucket_sizes, "__len__")
# verify total size
total_size = 0
for bucket_size in bucket_sizes:
total_size += bucket_size
if total_size != num_arrays:
raise RuntimeError("Bucket sizes don't add up to the number of given arrays")
else:
rng = np.random.default_rng(123)
# generate random bucket sizes
bucket_min = 1
bucket_max = math.ceil(0.5 * num_arrays)
total_size = num_arrays
size_remaining = total_size
bucket_sizes = []
while size_remaining >= bucket_max:
bucket_size = rng.integers(bucket_min, high=bucket_max, dtype=int)
bucket_sizes.append(bucket_size)
size_remaining -= bucket_size
if size_remaining > 0:
bucket_sizes.append(size_remaining)
# initialize array of pointers to arrays and their lengths
_array_pointers = []
_array_lengths = []
for i in range(num_arrays):
_array_pointers.append(data[i].ptr)
_array_lengths.append(data[i].size)
array_pointers = wp.array(_array_pointers, dtype=wp.uint64, device=device)
pointer_size = wp.types.type_size_in_bytes(array_pointers.dtype)
lengths = wp.array(_array_lengths, dtype=wp.uint64, device=device)
length_size = wp.types.type_size_in_bytes(lengths.dtype)
p_pointers = int(array_pointers.ptr)
p_lengths = int(lengths.ptr)
pointers = []
counts = []
array_lengths = []
for bucket_size in bucket_sizes:
pointers.append(p_pointers)
counts.append(bucket_size)
array_lengths.append(p_lengths)
p_pointers += bucket_size * pointer_size
p_lengths += bucket_size * length_size
attrib_info = {}
attrib_info["type"] = _warp_type_to_fabric(dtype, is_array=True)
attrib_info["access"] = 2 # ReadWrite
attrib_info["pointers"] = pointers
attrib_info["counts"] = counts
attrib_info["array_lengths"] = array_lengths
iface = {}
iface["version"] = 1
iface["device"] = str(device)
iface["attribs"] = {attrib: attrib_info}
iface["_ref"] = data # backref to keep the data arrays alive
iface["_ref_pointers"] = array_pointers # backref to keep the array pointers alive
iface["_ref_lengths"] = lengths # backref to keep the lengths array alive
return iface
@wp.kernel
def fa_kernel(a: wp.fabricarray(dtype=float), expected: wp.array(dtype=float)):
i = wp.tid()
wp.expect_eq(a[i], expected[i])
a[i] = 2.0 * a[i]
wp.atomic_add(a, i, 1.0)
wp.expect_eq(a[i], 2.0 * expected[i] + 1.0)
@wp.kernel
def fa_kernel_indexed(a: wp.indexedfabricarray(dtype=float), expected: wp.indexedarray(dtype=float)):
i = wp.tid()
wp.expect_eq(a[i], expected[i])
a[i] = 2.0 * a[i]
wp.atomic_add(a, i, 1.0)
wp.expect_eq(a[i], 2.0 * expected[i] + 1.0)
def test_fabricarray_kernel(test, device):
data = wp.array(data=np.arange(100, dtype=np.float32), device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
test.assertEqual(fa.dtype, data.dtype)
test.assertEqual(fa.ndim, 1)
test.assertEqual(fa.shape, data.shape)
test.assertEqual(fa.size, data.size)
wp.launch(fa_kernel, dim=fa.size, inputs=[fa, data], device=device)
# reset data
wp.copy(fa, data)
# test indexed
indices = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices]
idata = data[indices]
test.assertEqual(ifa.dtype, idata.dtype)
test.assertEqual(ifa.ndim, 1)
test.assertEqual(ifa.shape, idata.shape)
test.assertEqual(ifa.size, idata.size)
wp.launch(fa_kernel_indexed, dim=ifa.size, inputs=[ifa, idata], device=device)
wp.synchronize_device(device)
@wp.kernel
def fa_generic_dtype_kernel(a: wp.fabricarray(dtype=Any), b: wp.fabricarray(dtype=Any)):
i = wp.tid()
b[i] = a[i] + a[i]
@wp.kernel
def fa_generic_dtype_kernel_indexed(a: wp.indexedfabricarray(dtype=Any), b: wp.indexedfabricarray(dtype=Any)):
i = wp.tid()
b[i] = a[i] + a[i]
def test_fabricarray_generic_dtype(test, device):
for T in _fabric_types:
if hasattr(T, "_wp_scalar_type_"):
nptype = wp.types.warp_type_to_np_dtype[T._wp_scalar_type_]
else:
nptype = wp.types.warp_type_to_np_dtype[T]
data = wp.array(data=np.arange(10, dtype=nptype), device=device)
data_iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=data_iface, attrib="foo")
result = wp.zeros_like(data)
result_iface = _create_fabric_array_interface(result, "foo", copy=True)
fb = wp.fabricarray(data=result_iface, attrib="foo")
test.assertEqual(fa.dtype, fb.dtype)
test.assertEqual(fa.ndim, fb.ndim)
test.assertEqual(fa.shape, fb.shape)
test.assertEqual(fa.size, fb.size)
wp.launch(fa_generic_dtype_kernel, dim=fa.size, inputs=[fa, fb], device=device)
assert_np_equal(fb.numpy(), 2 * fa.numpy())
# reset data
wp.copy(fa, data)
wp.copy(fb, result)
# test indexed
indices = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices]
ifb = fb[indices]
test.assertEqual(ifa.dtype, ifb.dtype)
test.assertEqual(ifa.ndim, ifb.ndim)
test.assertEqual(ifa.shape, ifb.shape)
test.assertEqual(ifa.size, ifb.size)
wp.launch(fa_generic_dtype_kernel_indexed, dim=ifa.size, inputs=[ifa, ifb], device=device)
assert_np_equal(ifb.numpy(), 2 * ifa.numpy())
@wp.kernel
def fa_generic_array_kernel(a: Any, b: Any):
i = wp.tid()
b[i] = a[i] + a[i]
def test_fabricarray_generic_array(test, device):
for T in _fabric_types:
if hasattr(T, "_wp_scalar_type_"):
nptype = wp.types.warp_type_to_np_dtype[T._wp_scalar_type_]
else:
nptype = wp.types.warp_type_to_np_dtype[T]
data = wp.array(data=np.arange(100, dtype=nptype), device=device)
data_iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=data_iface, attrib="foo")
result = wp.zeros_like(data)
result_iface = _create_fabric_array_interface(result, "foo", copy=True)
fb = wp.fabricarray(data=result_iface, attrib="foo")
test.assertEqual(fa.dtype, fb.dtype)
test.assertEqual(fa.ndim, fb.ndim)
test.assertEqual(fa.shape, fb.shape)
test.assertEqual(fa.size, fb.size)
wp.launch(fa_generic_array_kernel, dim=fa.size, inputs=[fa, fb], device=device)
assert_np_equal(fb.numpy(), 2 * fa.numpy())
# reset data
wp.copy(fa, data)
wp.copy(fb, result)
# test indexed
indices = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices]
ifb = fb[indices]
test.assertEqual(ifa.dtype, ifb.dtype)
test.assertEqual(ifa.ndim, ifb.ndim)
test.assertEqual(ifa.shape, ifb.shape)
test.assertEqual(ifa.size, ifb.size)
wp.launch(fa_generic_array_kernel, dim=ifa.size, inputs=[ifa, ifb], device=device)
assert_np_equal(ifb.numpy(), 2 * ifa.numpy())
def test_fabricarray_empty(test, device):
# Test whether common operations work with empty (zero-sized) indexed arrays
# without throwing exceptions.
def test_empty_ops(nrows, ncols, wptype, nptype):
# scalar, vector, or matrix
if ncols > 0:
if nrows > 0:
wptype = wp.types.matrix((nrows, ncols), wptype)
else:
wptype = wp.types.vector(ncols, wptype)
dtype_shape = wptype._shape_
else:
dtype_shape = ()
fill_value = wptype(42)
# create an empty data array
data = wp.empty(0, dtype=wptype, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
test.assertEqual(fa.size, 0)
test.assertEqual(fa.shape, (0,))
# all of these methods should succeed with zero-sized arrays
fa.zero_()
fa.fill_(fill_value)
fb = fa.contiguous()
fb = wp.empty_like(fa)
fb = wp.zeros_like(fa)
fb = wp.full_like(fa, fill_value)
fb = wp.clone(fa)
wp.copy(fa, fb)
fa.assign(fb)
na = fa.numpy()
test.assertEqual(na.size, 0)
test.assertEqual(na.shape, (0, *dtype_shape))
test.assertEqual(na.dtype, nptype)
test.assertEqual(fa.list(), [])
# test indexed
# create a zero-sized array of indices
indices = wp.empty(0, dtype=int, device=device)
ifa = fa[indices]
test.assertEqual(ifa.size, 0)
test.assertEqual(ifa.shape, (0,))
# all of these methods should succeed with zero-sized arrays
ifa.zero_()
ifa.fill_(fill_value)
ifb = ifa.contiguous()
ifb = wp.empty_like(ifa)
ifb = wp.zeros_like(ifa)
ifb = wp.full_like(ifa, fill_value)
ifb = wp.clone(ifa)
wp.copy(ifa, ifb)
ifa.assign(ifb)
na = ifa.numpy()
test.assertEqual(na.size, 0)
test.assertEqual(na.shape, (0, *dtype_shape))
test.assertEqual(na.dtype, nptype)
test.assertEqual(ifa.list(), [])
# test with scalars, vectors, and matrices
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# scalars
test_empty_ops(0, 0, wptype, nptype)
for ncols in [2, 3, 4, 5]:
# vectors
test_empty_ops(0, ncols, wptype, nptype)
# square matrices (the Fabric interface only supports square matrices right now)
test_empty_ops(ncols, ncols, wptype, nptype)
def test_fabricarray_fill_scalar(test, device):
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# create a data array
data = wp.zeros(100, dtype=wptype, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
assert_np_equal(fa.numpy(), np.zeros(fa.shape, dtype=nptype))
# fill with int value
fill_value = 42
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full(fa.shape, fill_value, dtype=nptype))
fa.zero_()
assert_np_equal(fa.numpy(), np.zeros(fa.shape, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value
fill_value = 13.37
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full(fa.shape, fill_value, dtype=nptype))
# fill with Warp scalar value
fill_value = wptype(17)
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full(fa.shape, fill_value.value, dtype=nptype))
# reset data
wp.copy(fa, data)
# test indexed
indices1 = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices1]
# ensure that the other indices remain unchanged
indices2 = wp.array(data=np.arange(0, data.size, 2, dtype=np.int32), device=device)
ifb = fa[indices2]
assert_np_equal(ifa.numpy(), np.zeros(ifa.shape, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
# fill with int value
fill_value = 42
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full(ifa.shape, fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
ifa.zero_()
assert_np_equal(ifa.numpy(), np.zeros(ifa.shape, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value
fill_value = 13.37
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full(ifa.shape, fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
# fill with Warp scalar value
fill_value = wptype(17)
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full(ifa.shape, fill_value.value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
def test_fabricarray_fill_vector(test, device):
# test filling a vector array with scalar or vector values (vec_type, list, or numpy array)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# vector types
vector_types = [
wp.types.vector(2, wptype),
wp.types.vector(3, wptype),
wp.types.vector(4, wptype),
wp.types.vector(5, wptype),
]
for vec_type in vector_types:
vec_len = vec_type._length_
data = wp.zeros(100, dtype=vec_type, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, vec_len), dtype=nptype))
# fill with int scalar
fill_value = 42
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full((*fa.shape, vec_len), fill_value, dtype=nptype))
# test zeroing
fa.zero_()
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, vec_len), dtype=nptype))
# vector values can be passed as a list, numpy array, or Warp vector instance
fill_list = [17, 42, 99, 101, 127][:vec_len]
fill_arr = np.array(fill_list, dtype=nptype)
fill_vec = vec_type(fill_list)
expected = np.tile(fill_arr, fa.size).reshape((*fa.shape, vec_len))
# fill with list of vector length
fa.fill_(fill_list)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with numpy array of vector length
fa.fill_(fill_arr)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with vec instance
fa.fill_(fill_vec)
assert_np_equal(fa.numpy(), expected)
if wptype in wp.types.float_types:
# fill with float scalar
fill_value = 13.37
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full((*fa.shape, vec_len), fill_value, dtype=nptype))
# fill with float list of vector length
fill_list = [-2.5, -1.25, 1.25, 2.5, 5.0][:vec_len]
fa.fill_(fill_list)
expected = np.tile(np.array(fill_list, dtype=nptype), fa.size).reshape((*fa.shape, vec_len))
assert_np_equal(fa.numpy(), expected)
# reset data
wp.copy(fa, data)
# test indexed
indices1 = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices1]
# ensure that the other indices remain unchanged
indices2 = wp.array(data=np.arange(0, data.size, 2, dtype=np.int32), device=device)
ifb = fa[indices2]
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, vec_len), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# fill with int scalar
fill_value = 42
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full((*ifa.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# test zeroing
ifa.zero_()
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, vec_len), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# vector values can be passed as a list, numpy array, or Warp vector instance
fill_list = [17, 42, 99, 101, 127][:vec_len]
fill_arr = np.array(fill_list, dtype=nptype)
fill_vec = vec_type(fill_list)
expected = np.tile(fill_arr, ifa.size).reshape((*ifa.shape, vec_len))
# fill with list of vector length
ifa.fill_(fill_list)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# clear
ifa.zero_()
# fill with numpy array of vector length
ifa.fill_(fill_arr)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# clear
ifa.zero_()
# fill with vec instance
ifa.fill_(fill_vec)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
if wptype in wp.types.float_types:
# fill with float scalar
fill_value = 13.37
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full((*ifa.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# fill with float list of vector length
fill_list = [-2.5, -1.25, 1.25, 2.5, 5.0][:vec_len]
ifa.fill_(fill_list)
expected = np.tile(np.array(fill_list, dtype=nptype), ifa.size).reshape((*ifa.shape, vec_len))
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
def test_fabricarray_fill_matrix(test, device):
# test filling a matrix array with scalar or matrix values (mat_type, nested list, or 2d numpy array)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# matrix types
matrix_types = [
# square matrices only
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
]
for mat_type in matrix_types:
mat_len = mat_type._length_
mat_shape = mat_type._shape_
data = wp.zeros(100, dtype=mat_type, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, *mat_shape), dtype=nptype))
# fill with scalar
fill_value = 42
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full((*fa.shape, *mat_shape), fill_value, dtype=nptype))
# test zeroing
fa.zero_()
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, *mat_shape), dtype=nptype))
# matrix values can be passed as a 1d numpy array, 2d numpy array, flat list, nested list, or Warp matrix instance
if wptype != wp.bool:
fill_arr1 = np.arange(mat_len, dtype=nptype)
else:
fill_arr1 = np.ones(mat_len, dtype=nptype)
fill_arr2 = fill_arr1.reshape(mat_shape)
fill_list1 = list(fill_arr1)
fill_list2 = [list(row) for row in fill_arr2]
fill_mat = mat_type(fill_arr1)
expected = np.tile(fill_arr1, fa.size).reshape((*fa.shape, *mat_shape))
# fill with 1d numpy array
fa.fill_(fill_arr1)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with 2d numpy array
fa.fill_(fill_arr2)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with flat list
fa.fill_(fill_list1)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with nested list
fa.fill_(fill_list2)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with mat instance
fa.fill_(fill_mat)
assert_np_equal(fa.numpy(), expected)
# reset data
wp.copy(fa, data)
# test indexed
indices1 = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices1]
# ensure that the other indices remain unchanged
indices2 = wp.array(data=np.arange(0, data.size, 2, dtype=np.int32), device=device)
ifb = fa[indices2]
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, *mat_shape), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# fill with scalar
fill_value = 42
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full((*ifa.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# test zeroing
ifa.zero_()
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, *mat_shape), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# matrix values can be passed as a 1d numpy array, 2d numpy array, flat list, nested list, or Warp matrix instance
if wptype != wp.bool:
fill_arr1 = np.arange(mat_len, dtype=nptype)
else:
fill_arr1 = np.ones(mat_len, dtype=nptype)
fill_arr2 = fill_arr1.reshape(mat_shape)
fill_list1 = list(fill_arr1)
fill_list2 = [list(row) for row in fill_arr2]
fill_mat = mat_type(fill_arr1)
expected = np.tile(fill_arr1, ifa.size).reshape((*ifa.shape, *mat_shape))
# fill with 1d numpy array
ifa.fill_(fill_arr1)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with 2d numpy array
ifa.fill_(fill_arr2)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with flat list
ifa.fill_(fill_list1)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with nested list
ifa.fill_(fill_list2)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with mat instance
ifa.fill_(fill_mat)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
@wp.kernel
def fa_generic_sums_kernel(a: wp.fabricarrayarray(dtype=Any), sums: wp.array(dtype=Any)):
i = wp.tid()
# get sub-array using wp::view()
row = a[i]
# get sub-array length
count = row.shape[0]
# compute sub-array sum
for j in range(count):
sums[i] = sums[i] + row[j]
@wp.kernel
def fa_generic_sums_kernel_indexed(a: wp.indexedfabricarrayarray(dtype=Any), sums: wp.array(dtype=Any)):
i = wp.tid()
# get sub-array using wp::view()
row = a[i]
# get sub-array length
count = row.shape[0]
# compute sub-array sum
for j in range(count):
sums[i] = sums[i] + row[j]
def test_fabricarrayarray(test, device):
for T in _fabric_types:
if hasattr(T, "_wp_scalar_type_"):
nptype = wp.types.warp_type_to_np_dtype[T._wp_scalar_type_]
else:
nptype = wp.types.warp_type_to_np_dtype[T]
n = 100
min_length = 1
max_length = 10
arrays = []
expected_sums = []
expected_sums_indexed = []
# generate data arrays
length = min_length
for i in range(n):
if length > max_length:
length = min_length
na = np.arange(1, length + 1, dtype=nptype)
arrays.append(wp.array(data=na, device=device))
expected_sums.append(na.sum())
# every second index
if i % 2 == 0:
expected_sums_indexed.append(na.sum())
length += 1
data_iface = _create_fabric_array_array_interface(arrays, "foo")
fa = wp.fabricarrayarray(data=data_iface, attrib="foo")
sums = wp.zeros_like(fa)
test.assertEqual(fa.dtype, sums.dtype)
test.assertEqual(fa.ndim, 2)
test.assertEqual(sums.ndim, 1)
test.assertEqual(fa.shape, sums.shape)
test.assertEqual(fa.size, sums.size)
wp.launch(fa_generic_sums_kernel, dim=fa.size, inputs=[fa, sums], device=device)
assert_np_equal(sums.numpy(), np.array(expected_sums, dtype=nptype))
# test indexed
indices = wp.array(data=np.arange(0, n, 2, dtype=np.int32), device=device)
ifa = fa[indices]
sums = wp.zeros_like(ifa)
test.assertEqual(ifa.dtype, sums.dtype)
test.assertEqual(ifa.ndim, 2)
test.assertEqual(sums.ndim, 1)
test.assertEqual(ifa.shape, sums.shape)
test.assertEqual(ifa.size, sums.size)
wp.launch(fa_generic_sums_kernel_indexed, dim=ifa.size, inputs=[ifa, sums], device=device)
assert_np_equal(sums.numpy(), np.array(expected_sums_indexed, dtype=nptype))
# explicit kernel overloads
for T in _fabric_types:
wp.overload(fa_generic_dtype_kernel, [wp.fabricarray(dtype=T), wp.fabricarray(dtype=T)])
wp.overload(fa_generic_dtype_kernel_indexed, [wp.indexedfabricarray(dtype=T), wp.indexedfabricarray(dtype=T)])
wp.overload(fa_generic_array_kernel, [wp.fabricarray(dtype=T), wp.fabricarray(dtype=T)])
wp.overload(fa_generic_array_kernel, [wp.indexedfabricarray(dtype=T), wp.indexedfabricarray(dtype=T)])
wp.overload(fa_generic_sums_kernel, [wp.fabricarrayarray(dtype=T), wp.array(dtype=T)])
wp.overload(fa_generic_sums_kernel_indexed, [wp.indexedfabricarrayarray(dtype=T), wp.array(dtype=T)])
devices = get_test_devices()
class TestFabricArray(unittest.TestCase):
pass
# fabric arrays
add_function_test(TestFabricArray, "test_fabricarray_kernel", test_fabricarray_kernel, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_empty", test_fabricarray_empty, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_generic_dtype", test_fabricarray_generic_dtype, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_generic_array", test_fabricarray_generic_array, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_fill_scalar", test_fabricarray_fill_scalar, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_fill_vector", test_fabricarray_fill_vector, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_fill_matrix", test_fabricarray_fill_matrix, devices=devices)
# fabric arrays of arrays
add_function_test(TestFabricArray, "test_fabricarrayarray", test_fabricarrayarray, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 32,138 | Python | 32.724029 | 126 | 0.591792 |
NVIDIA/warp/warp/tests/test_jax.py | # Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# basic kernel with one input and output
@wp.kernel
def triple_kernel(input: wp.array(dtype=float), output: wp.array(dtype=float)):
tid = wp.tid()
output[tid] = 3.0 * input[tid]
# generic kernel with one scalar input and output
@wp.kernel
def triple_kernel_scalar(input: wp.array(dtype=Any), output: wp.array(dtype=Any)):
tid = wp.tid()
output[tid] = input.dtype(3) * input[tid]
# generic kernel with one vector/matrix input and output
@wp.kernel
def triple_kernel_vecmat(input: wp.array(dtype=Any), output: wp.array(dtype=Any)):
tid = wp.tid()
output[tid] = input.dtype.dtype(3) * input[tid]
# kernel with multiple inputs and outputs
@wp.kernel
def multiarg_kernel(
# inputs
a: wp.array(dtype=float),
b: wp.array(dtype=float),
c: wp.array(dtype=float),
# outputs
ab: wp.array(dtype=float),
bc: wp.array(dtype=float),
):
tid = wp.tid()
ab[tid] = a[tid] + b[tid]
bc[tid] = b[tid] + c[tid]
# various types for testing
scalar_types = wp.types.scalar_types
vector_types = []
matrix_types = []
for dim in [2, 3, 4]:
for T in scalar_types:
vector_types.append(wp.vec(dim, T))
matrix_types.append(wp.mat((dim, dim), T))
# explicitly overload generic kernels to avoid module reloading during tests
for T in scalar_types:
wp.overload(triple_kernel_scalar, [wp.array(dtype=T), wp.array(dtype=T)])
for T in [*vector_types, *matrix_types]:
wp.overload(triple_kernel_vecmat, [wp.array(dtype=T), wp.array(dtype=T)])
def _jax_version():
try:
import jax
return jax.__version_info__
except ImportError:
return (0, 0, 0)
def test_dtype_from_jax(test, device):
import jax.numpy as jp
def test_conversions(jax_type, warp_type):
test.assertEqual(wp.dtype_from_jax(jax_type), warp_type)
test.assertEqual(wp.dtype_from_jax(jp.dtype(jax_type)), warp_type)
test_conversions(jp.float16, wp.float16)
test_conversions(jp.float32, wp.float32)
test_conversions(jp.float64, wp.float64)
test_conversions(jp.int8, wp.int8)
test_conversions(jp.int16, wp.int16)
test_conversions(jp.int32, wp.int32)
test_conversions(jp.int64, wp.int64)
test_conversions(jp.uint8, wp.uint8)
test_conversions(jp.uint16, wp.uint16)
test_conversions(jp.uint32, wp.uint32)
test_conversions(jp.uint64, wp.uint64)
test_conversions(jp.bool_, wp.bool)
def test_dtype_to_jax(test, device):
import jax.numpy as jp
def test_conversions(warp_type, jax_type):
test.assertEqual(wp.dtype_to_jax(warp_type), jax_type)
test_conversions(wp.float16, jp.float16)
test_conversions(wp.float32, jp.float32)
test_conversions(wp.float64, jp.float64)
test_conversions(wp.int8, jp.int8)
test_conversions(wp.int16, jp.int16)
test_conversions(wp.int32, jp.int32)
test_conversions(wp.int64, jp.int64)
test_conversions(wp.uint8, jp.uint8)
test_conversions(wp.uint16, jp.uint16)
test_conversions(wp.uint32, jp.uint32)
test_conversions(wp.uint64, jp.uint64)
test_conversions(wp.bool, jp.bool_)
def test_device_conversion(test, device):
jax_device = wp.device_to_jax(device)
warp_device = wp.device_from_jax(jax_device)
test.assertEqual(warp_device, device)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_basic(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
n = 64
jax_triple = jax_kernel(triple_kernel)
@jax.jit
def f():
x = jp.arange(n, dtype=jp.float32)
return jax_triple(x)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
y = f()
result = np.asarray(y).reshape((n,))
expected = 3 * np.arange(n, dtype=np.float32)
assert_np_equal(result, expected)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_scalar(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
n = 64
for T in scalar_types:
jp_dtype = wp.dtype_to_jax(T)
np_dtype = wp.dtype_to_numpy(T)
with test.subTest(msg=T.__name__):
# get the concrete overload
kernel_instance = triple_kernel_scalar.add_overload([wp.array(dtype=T), wp.array(dtype=T)])
jax_triple = jax_kernel(kernel_instance)
@jax.jit
def f(jax_triple=jax_triple, jp_dtype=jp_dtype):
x = jp.arange(n, dtype=jp_dtype)
return jax_triple(x)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
y = f()
result = np.asarray(y).reshape((n,))
expected = 3 * np.arange(n, dtype=np_dtype)
assert_np_equal(result, expected)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_vecmat(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
for T in [*vector_types, *matrix_types]:
jp_dtype = wp.dtype_to_jax(T._wp_scalar_type_)
np_dtype = wp.dtype_to_numpy(T._wp_scalar_type_)
n = 64 // T._length_
scalar_shape = (n, *T._shape_)
scalar_len = n * T._length_
with test.subTest(msg=T.__name__):
# get the concrete overload
kernel_instance = triple_kernel_vecmat.add_overload([wp.array(dtype=T), wp.array(dtype=T)])
jax_triple = jax_kernel(kernel_instance)
@jax.jit
def f(jax_triple=jax_triple, jp_dtype=jp_dtype, scalar_len=scalar_len, scalar_shape=scalar_shape):
x = jp.arange(scalar_len, dtype=jp_dtype).reshape(scalar_shape)
return jax_triple(x)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
y = f()
result = np.asarray(y).reshape(scalar_shape)
expected = 3 * np.arange(scalar_len, dtype=np_dtype).reshape(scalar_shape)
assert_np_equal(result, expected)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_multiarg(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
n = 64
jax_multiarg = jax_kernel(multiarg_kernel)
@jax.jit
def f():
a = jp.full(n, 1, dtype=jp.float32)
b = jp.full(n, 2, dtype=jp.float32)
c = jp.full(n, 3, dtype=jp.float32)
return jax_multiarg(a, b, c)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
x, y = f()
result_x, result_y = np.asarray(x), np.asarray(y)
expected_x = np.full(n, 3, dtype=np.float32)
expected_y = np.full(n, 5, dtype=np.float32)
assert_np_equal(result_x, expected_x)
assert_np_equal(result_y, expected_y)
class TestJax(unittest.TestCase):
pass
# try adding Jax tests if Jax is installed correctly
try:
# prevent Jax from gobbling up GPU memory
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
import jax
import jax.dlpack
# NOTE: we must enable 64-bit types in Jax to test the full gamut of types
jax.config.update("jax_enable_x64", True)
# check which Warp devices work with Jax
# CUDA devices may fail if Jax cannot find a CUDA Toolkit
test_devices = get_test_devices()
jax_compatible_devices = []
jax_compatible_cuda_devices = []
for d in test_devices:
try:
with jax.default_device(wp.device_to_jax(d)):
j = jax.numpy.arange(10, dtype=jax.numpy.float32)
j += 1
jax_compatible_devices.append(d)
if d.is_cuda:
jax_compatible_cuda_devices.append(d)
except Exception as e:
print(f"Skipping Jax DLPack tests on device '{d}' due to exception: {e}")
add_function_test(TestJax, "test_dtype_from_jax", test_dtype_from_jax, devices=None)
add_function_test(TestJax, "test_dtype_to_jax", test_dtype_to_jax, devices=None)
if jax_compatible_devices:
add_function_test(TestJax, "test_device_conversion", test_device_conversion, devices=jax_compatible_devices)
if jax_compatible_cuda_devices:
add_function_test(TestJax, "test_jax_kernel_basic", test_jax_kernel_basic, devices=jax_compatible_cuda_devices)
add_function_test(
TestJax, "test_jax_kernel_scalar", test_jax_kernel_scalar, devices=jax_compatible_cuda_devices
)
add_function_test(
TestJax, "test_jax_kernel_vecmat", test_jax_kernel_vecmat, devices=jax_compatible_cuda_devices
)
add_function_test(
TestJax, "test_jax_kernel_multiarg", test_jax_kernel_multiarg, devices=jax_compatible_cuda_devices
)
except Exception as e:
print(f"Skipping Jax tests due to exception: {e}")
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 9,634 | Python | 30.486928 | 119 | 0.647914 |
NVIDIA/warp/warp/tests/test_pinned.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
def test_pinned(test: unittest.TestCase, device):
assert wp.get_device(device).is_cuda, "Test device must be a CUDA device"
n = 1024 * 1024
ones = np.ones(n, dtype=np.float32)
# pageable host arrays for synchronous transfers
a_pageable1 = wp.array(ones, dtype=float, device="cpu")
a_pageable2 = wp.zeros_like(a_pageable1)
test.assertFalse(a_pageable1.pinned)
test.assertFalse(a_pageable2.pinned)
# pinned host arrays for asynchronous transfers
a_pinned1 = wp.array(ones, dtype=float, device="cpu", pinned=True)
a_pinned2 = wp.zeros_like(a_pinned1)
test.assertTrue(a_pinned1.pinned)
test.assertTrue(a_pinned2.pinned)
# device array
a_device = wp.zeros(n, dtype=float, device=device)
test.assertFalse(a_device.pinned)
wp.synchronize_device(device)
with wp.ScopedTimer("Synchronous copy", print=False) as pageable_timer:
wp.copy(a_device, a_pageable1)
wp.copy(a_pageable2, a_device)
wp.synchronize_device(device)
with wp.ScopedTimer("Asynchronous copy", print=False) as pinned_timer:
wp.copy(a_device, a_pinned1)
wp.copy(a_pinned2, a_device)
wp.synchronize_device(device)
# ensure correct results
assert_np_equal(a_pageable2.numpy(), ones)
assert_np_equal(a_pinned2.numpy(), ones)
# ensure that launching asynchronous transfers took less CPU time
test.assertTrue(pinned_timer.elapsed < pageable_timer.elapsed, "Pinned transfers did not take less CPU time")
devices = get_selected_cuda_test_devices()
class TestPinned(unittest.TestCase):
pass
add_function_test(TestPinned, "test_pinned", test_pinned, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 2,290 | Python | 28.753246 | 113 | 0.722271 |
NVIDIA/warp/warp/tests/test_adam.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
import warp.optim
import warp.sim
from warp.tests.unittest_utils import *
@wp.kernel
def objective(params: wp.array(dtype=float), score: wp.array(dtype=float)):
tid = wp.tid()
U = params[tid] * params[tid]
wp.atomic_add(score, 0, U)
# This test inspired by https://machinelearningmastery.com/adam-optimization-from-scratch/
def test_adam_solve_float(test, device):
with wp.ScopedDevice(device):
params_start = np.array([0.1, 0.2], dtype=float)
score = wp.zeros(1, dtype=float, requires_grad=True)
params = wp.array(params_start, dtype=float, requires_grad=True)
tape = wp.Tape()
opt = warp.optim.Adam([params], lr=0.02, betas=(0.8, 0.999))
def gradient_func():
tape.reset()
score.zero_()
with tape:
wp.launch(kernel=objective, dim=len(params), inputs=[params, score])
tape.backward(score)
return [tape.gradients[params]]
niters = 100
opt.reset_internal_state()
for _ in range(niters):
opt.step(gradient_func())
result = params.numpy()
# optimum is at the origin, so the result should be close to it in all N dimensions.
tol = 1e-5
for r in result:
test.assertLessEqual(r, tol)
@wp.kernel
def objective_vec3(params: wp.array(dtype=wp.vec3), score: wp.array(dtype=float)):
tid = wp.tid()
U = wp.dot(params[tid], params[tid])
wp.atomic_add(score, 0, U)
# This test inspired by https://machinelearningmastery.com/adam-optimization-from-scratch/
def test_adam_solve_vec3(test, device):
with wp.ScopedDevice(device):
params_start = np.array([[0.1, 0.2, -0.1]], dtype=float)
score = wp.zeros(1, dtype=float, requires_grad=True)
params = wp.array(params_start, dtype=wp.vec3, requires_grad=True)
tape = wp.Tape()
opt = warp.optim.Adam([params], lr=0.02, betas=(0.8, 0.999))
def gradient_func():
tape.reset()
score.zero_()
with tape:
wp.launch(kernel=objective_vec3, dim=len(params), inputs=[params, score])
tape.backward(score)
return [tape.gradients[params]]
niters = 100
opt.reset_internal_state()
for _ in range(niters):
opt.step(gradient_func())
result = params.numpy()
tol = 1e-5
# optimum is at the origin, so the result should be close to it in all N dimensions.
for r in result:
for v in r:
test.assertLessEqual(v, tol)
@wp.kernel
def objective_two_inputs_vec3(
params1: wp.array(dtype=wp.vec3), params2: wp.array(dtype=wp.vec3), score: wp.array(dtype=float)
):
tid = wp.tid()
U = wp.dot(params1[tid], params1[tid])
V = wp.dot(params2[tid], params2[tid])
wp.atomic_add(score, 0, U + V)
# This test inspired by https://machinelearningmastery.com/adam-optimization-from-scratch/
def test_adam_solve_two_inputs(test, device):
with wp.ScopedDevice(device):
params_start1 = np.array([[0.1, 0.2, -0.1]], dtype=float)
params_start2 = np.array([[0.2, 0.1, 0.1]], dtype=float)
score = wp.zeros(1, dtype=float, requires_grad=True)
params1 = wp.array(params_start1, dtype=wp.vec3, requires_grad=True)
params2 = wp.array(params_start2, dtype=wp.vec3, requires_grad=True)
tape = wp.Tape()
opt = warp.optim.Adam([params1, params2], lr=0.02, betas=(0.8, 0.999))
def gradient_func():
tape.reset()
score.zero_()
with tape:
wp.launch(kernel=objective_two_inputs_vec3, dim=len(params1), inputs=[params1, params2, score])
tape.backward(score)
return [tape.gradients[params1], tape.gradients[params2]]
niters = 100
opt.reset_internal_state()
for _ in range(niters):
opt.step(gradient_func())
result = params1.numpy()
tol = 1e-5
# optimum is at the origin, so the result should be close to it in all N dimensions.
for r in result:
for v in r:
test.assertLessEqual(v, tol)
result = params2.numpy()
tol = 1e-5
# optimum is at the origin, so the result should be close to it in all N dimensions.
for r in result:
for v in r:
test.assertLessEqual(v, tol)
devices = get_test_devices()
class TestAdam(unittest.TestCase):
pass
add_function_test(TestAdam, "test_adam_solve_float", test_adam_solve_float, devices=devices)
add_function_test(TestAdam, "test_adam_solve_vec3", test_adam_solve_vec3, devices=devices)
add_function_test(TestAdam, "test_adam_solve_two_inputs", test_adam_solve_two_inputs, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,367 | Python | 33.410256 | 111 | 0.626048 |
NVIDIA/warp/warp/tests/test_mat_lite.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
mat32d = wp.mat(shape=(3, 2), dtype=wp.float64)
@wp.kernel
def test_matrix_constructor_value_func():
a = wp.mat22()
b = wp.matrix(a, shape=(2, 2))
c = mat32d()
d = mat32d(c, shape=(3, 2))
e = mat32d(wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0))
f = mat32d(
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
)
# Test matrix constructors using explicit type (float16)
# note that these tests are specifically not using generics / closure
# args to create kernels dynamically (like the rest of this file)
# as those use different code paths to resolve arg types which
# has lead to regressions.
@wp.kernel
def test_constructors_explicit_precision():
# construction for custom matrix types
eye = wp.identity(dtype=wp.float16, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=wp.float16)
custom = wp.matrix(wp.float16(0.0), wp.float16(1.0), wp.float16(2.0), wp.float16(3.0), shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], wp.float16(1.0))
else:
wp.expect_eq(eye[i, j], wp.float16(0.0))
wp.expect_eq(zeros[i, j], wp.float16(0.0))
wp.expect_eq(custom[i, j], wp.float16(i) * wp.float16(2.0) + wp.float16(j))
# Same as above but with a default (float/int) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructors_default_precision():
# construction for default (float) matrix types
eye = wp.identity(dtype=float, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=float)
custom = wp.matrix(0.0, 1.0, 2.0, 3.0, shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], 1.0)
else:
wp.expect_eq(eye[i, j], 0.0)
wp.expect_eq(zeros[i, j], 0.0)
wp.expect_eq(custom[i, j], float(i) * 2.0 + float(j))
@wp.kernel
def test_matrix_mutation(expected: wp.types.matrix(shape=(10, 3), dtype=float)):
m = wp.matrix(shape=(10, 3), dtype=float)
# test direct element indexing
m[0, 0] = 1.0
m[0, 1] = 2.0
m[0, 2] = 3.0
# The nested indexing (matrix->vector->scalar) below does not
# currently modify m because m[0] returns row vector by
# value rather than reference, this is different from NumPy
# which always returns by ref. Not clear how we can support
# this as well as auto-diff.
# m[0][1] = 2.0
# m[0][2] = 3.0
# test setting rows
for i in range(1, 10):
m[i] = m[i - 1] + wp.vec3(1.0, 2.0, 3.0)
wp.expect_eq(m, expected)
devices = get_test_devices()
class TestMatLite(unittest.TestCase):
pass
add_kernel_test(TestMatLite, test_matrix_constructor_value_func, dim=1, devices=devices)
add_kernel_test(TestMatLite, test_constructors_explicit_precision, dim=1, devices=devices)
add_kernel_test(TestMatLite, test_constructors_default_precision, dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 3,783 | Python | 32.192982 | 116 | 0.646048 |
NVIDIA/warp/warp/tests/test_quat.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
import warp.sim
from warp.tests.unittest_utils import *
np_float_types = [np.float32, np.float64, np.float16]
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
############################################################
def test_constructors(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
quat = wp.types.quaternion(dtype=wptype)
def check_component_constructor(
input: wp.array(dtype=wptype),
q: wp.array(dtype=wptype),
):
qresult = quat(input[0], input[1], input[2], input[3])
# multiply the output by 2 so we've got something to backpropagate:
q[0] = wptype(2) * qresult[0]
q[1] = wptype(2) * qresult[1]
q[2] = wptype(2) * qresult[2]
q[3] = wptype(2) * qresult[3]
def check_vector_constructor(
input: wp.array(dtype=wptype),
q: wp.array(dtype=wptype),
):
qresult = quat(vec3(input[0], input[1], input[2]), input[3])
# multiply the output by 2 so we've got something to backpropagate:
q[0] = wptype(2) * qresult[0]
q[1] = wptype(2) * qresult[1]
q[2] = wptype(2) * qresult[2]
q[3] = wptype(2) * qresult[3]
kernel = getkernel(check_component_constructor, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
vec_kernel = getkernel(check_vector_constructor, suffix=dtype.__name__)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=4).astype(dtype), requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=tol)
for i in range(4):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
input = wp.array(rng.standard_normal(size=4).astype(dtype), requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(vec_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=tol)
for i in range(4):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(vec_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
def test_casting_constructors(test, device, dtype, register_kernels=False):
np_type = np.dtype(dtype)
wp_type = wp.types.np_dtype_to_warp_type[np_type]
quat = wp.types.quaternion(dtype=wp_type)
np16 = np.dtype(np.float16)
wp16 = wp.types.np_dtype_to_warp_type[np16]
np32 = np.dtype(np.float32)
wp32 = wp.types.np_dtype_to_warp_type[np32]
np64 = np.dtype(np.float64)
wp64 = wp.types.np_dtype_to_warp_type[np64]
def cast_float16(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp16, ndim=2)):
tid = wp.tid()
q1 = quat(a[tid, 0], a[tid, 1], a[tid, 2], a[tid, 3])
q2 = wp.quaternion(q1, dtype=wp16)
b[tid, 0] = q2[0]
b[tid, 1] = q2[1]
b[tid, 2] = q2[2]
b[tid, 3] = q2[3]
def cast_float32(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp32, ndim=2)):
tid = wp.tid()
q1 = quat(a[tid, 0], a[tid, 1], a[tid, 2], a[tid, 3])
q2 = wp.quaternion(q1, dtype=wp32)
b[tid, 0] = q2[0]
b[tid, 1] = q2[1]
b[tid, 2] = q2[2]
b[tid, 3] = q2[3]
def cast_float64(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp64, ndim=2)):
tid = wp.tid()
q1 = quat(a[tid, 0], a[tid, 1], a[tid, 2], a[tid, 3])
q2 = wp.quaternion(q1, dtype=wp64)
b[tid, 0] = q2[0]
b[tid, 1] = q2[1]
b[tid, 2] = q2[2]
b[tid, 3] = q2[3]
kernel_16 = getkernel(cast_float16, suffix=dtype.__name__)
kernel_32 = getkernel(cast_float32, suffix=dtype.__name__)
kernel_64 = getkernel(cast_float64, suffix=dtype.__name__)
if register_kernels:
return
# check casting to float 16
a = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 4), dtype=np16), dtype=wp16, requires_grad=True, device=device)
b_result = np.ones((1, 4), dtype=np16)
b_grad = wp.array(np.ones((1, 4), dtype=np16), dtype=wp16, device=device)
a_grad = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_16, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 32
a = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 4), dtype=np32), dtype=wp32, requires_grad=True, device=device)
b_result = np.ones((1, 4), dtype=np32)
b_grad = wp.array(np.ones((1, 4), dtype=np32), dtype=wp32, device=device)
a_grad = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_32, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 64
a = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 4), dtype=np64), dtype=wp64, requires_grad=True, device=device)
b_result = np.ones((1, 4), dtype=np64)
b_grad = wp.array(np.ones((1, 4), dtype=np64), dtype=wp64, device=device)
a_grad = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_64, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
def test_inverse(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_quat_inverse(
input: wp.array(dtype=wptype),
shouldbeidentity: wp.array(dtype=quat),
q: wp.array(dtype=wptype),
):
qread = quat(input[0], input[1], input[2], input[3])
qresult = wp.quat_inverse(qread)
# this inverse should work for normalized quaternions:
shouldbeidentity[0] = wp.normalize(qread) * wp.quat_inverse(wp.normalize(qread))
# multiply the output by 2 so we've got something to backpropagate:
q[0] = wptype(2) * qresult[0]
q[1] = wptype(2) * qresult[1]
q[2] = wptype(2) * qresult[2]
q[3] = wptype(2) * qresult[3]
kernel = getkernel(check_quat_inverse, suffix=dtype.__name__)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=4).astype(dtype), requires_grad=True, device=device)
shouldbeidentity = wp.array(np.zeros((1, 4)), dtype=quat, requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(kernel, dim=1, inputs=[input], outputs=[shouldbeidentity, output], device=device)
assert_np_equal(shouldbeidentity.numpy(), np.array([0, 0, 0, 1]), tol=tol)
for i in range(4):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[shouldbeidentity, output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = -2 if i != 3 else 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
def test_dotproduct(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_dot(
s: wp.array(dtype=quat),
v: wp.array(dtype=quat),
dot: wp.array(dtype=wptype),
):
dot[0] = wptype(2) * wp.dot(v[0], s[0])
dotkernel = getkernel(check_quat_dot, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
dot = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
dotkernel,
dim=1,
inputs=[
s,
v,
],
outputs=[dot],
device=device,
)
assert_np_equal(dot.numpy()[0], 2.0 * (v.numpy() * s.numpy()).sum(), tol=tol)
tape.backward(loss=dot)
sgrads = tape.gradients[s].numpy()[0]
expected_grads = 2.0 * v.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v].numpy()[0]
expected_grads = 2.0 * s.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
def test_length(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-7,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_length(
q: wp.array(dtype=quat),
l: wp.array(dtype=wptype),
l2: wp.array(dtype=wptype),
):
l[0] = wptype(2) * wp.length(q[0])
l2[0] = wptype(2) * wp.length_sq(q[0])
kernel = getkernel(check_quat_length, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
l = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
],
outputs=[l, l2],
device=device,
)
assert_np_equal(l.numpy()[0], 2 * np.linalg.norm(q.numpy()), tol=10 * tol)
assert_np_equal(l2.numpy()[0], 2 * np.linalg.norm(q.numpy()) ** 2, tol=10 * tol)
tape.backward(loss=l)
grad = tape.gradients[q].numpy()[0]
expected_grad = 2 * q.numpy()[0] / np.linalg.norm(q.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l2)
grad = tape.gradients[q].numpy()[0]
expected_grad = 4 * q.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
def test_normalize(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_normalize(
q: wp.array(dtype=quat),
n0: wp.array(dtype=wptype),
n1: wp.array(dtype=wptype),
n2: wp.array(dtype=wptype),
n3: wp.array(dtype=wptype),
):
n = wptype(2) * (wp.normalize(q[0]))
n0[0] = n[0]
n1[0] = n[1]
n2[0] = n[2]
n3[0] = n[3]
def check_normalize_alt(
q: wp.array(dtype=quat),
n0: wp.array(dtype=wptype),
n1: wp.array(dtype=wptype),
n2: wp.array(dtype=wptype),
n3: wp.array(dtype=wptype),
):
n = wptype(2) * (q[0] / wp.length(q[0]))
n0[0] = n[0]
n1[0] = n[1]
n2[0] = n[2]
n3[0] = n[3]
normalize_kernel = getkernel(check_normalize, suffix=dtype.__name__)
normalize_alt_kernel = getkernel(check_normalize_alt, suffix=dtype.__name__)
if register_kernels:
return
# I've already tested the things I'm using in check_normalize_alt, so I'll just
# make sure the two are giving the same results/gradients
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
n0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n0_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n1_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n2_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n3_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
outputs0 = [
n0,
n1,
n2,
n3,
]
tape0 = wp.Tape()
with tape0:
wp.launch(normalize_kernel, dim=1, inputs=[q], outputs=outputs0, device=device)
outputs1 = [
n0_alt,
n1_alt,
n2_alt,
n3_alt,
]
tape1 = wp.Tape()
with tape1:
wp.launch(
normalize_alt_kernel,
dim=1,
inputs=[
q,
],
outputs=outputs1,
device=device,
)
assert_np_equal(n0.numpy()[0], n0_alt.numpy()[0], tol=tol)
assert_np_equal(n1.numpy()[0], n1_alt.numpy()[0], tol=tol)
assert_np_equal(n2.numpy()[0], n2_alt.numpy()[0], tol=tol)
assert_np_equal(n3.numpy()[0], n3_alt.numpy()[0], tol=tol)
for ncmp, ncmpalt in zip(outputs0, outputs1):
tape0.backward(loss=ncmp)
tape1.backward(loss=ncmpalt)
assert_np_equal(tape0.gradients[q].numpy()[0], tape1.gradients[q].numpy()[0], tol=tol)
tape0.zero()
tape1.zero()
def test_addition(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_add(
q: wp.array(dtype=quat),
v: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = q[0] + v[0]
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_add, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
v,
],
outputs=[r0, r1, r2, r3],
device=device,
)
assert_np_equal(r0.numpy()[0], 2 * (v.numpy()[0, 0] + q.numpy()[0, 0]), tol=tol)
assert_np_equal(r1.numpy()[0], 2 * (v.numpy()[0, 1] + q.numpy()[0, 1]), tol=tol)
assert_np_equal(r2.numpy()[0], 2 * (v.numpy()[0, 2] + q.numpy()[0, 2]), tol=tol)
assert_np_equal(r3.numpy()[0], 2 * (v.numpy()[0, 3] + q.numpy()[0, 3]), tol=tol)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
qgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(qgrads)
expected_grads[i] = 2
assert_np_equal(qgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v].numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
tape.zero()
def test_subtraction(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_sub(
q: wp.array(dtype=quat),
v: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = v[0] - q[0]
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_sub, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
v,
],
outputs=[r0, r1, r2, r3],
device=device,
)
assert_np_equal(r0.numpy()[0], 2 * (v.numpy()[0, 0] - q.numpy()[0, 0]), tol=tol)
assert_np_equal(r1.numpy()[0], 2 * (v.numpy()[0, 1] - q.numpy()[0, 1]), tol=tol)
assert_np_equal(r2.numpy()[0], 2 * (v.numpy()[0, 2] - q.numpy()[0, 2]), tol=tol)
assert_np_equal(r3.numpy()[0], 2 * (v.numpy()[0, 3] - q.numpy()[0, 3]), tol=tol)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
qgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(qgrads)
expected_grads[i] = -2
assert_np_equal(qgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v].numpy()[0]
expected_grads[i] = 2
assert_np_equal(vgrads, expected_grads, tol=tol)
tape.zero()
def test_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_scalar_mul(
s: wp.array(dtype=wptype),
q: wp.array(dtype=quat),
l0: wp.array(dtype=wptype),
l1: wp.array(dtype=wptype),
l2: wp.array(dtype=wptype),
l3: wp.array(dtype=wptype),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
lresult = s[0] * q[0]
rresult = q[0] * s[0]
# multiply outputs by 2 so we've got something to backpropagate:
l0[0] = wptype(2) * lresult[0]
l1[0] = wptype(2) * lresult[1]
l2[0] = wptype(2) * lresult[2]
l3[0] = wptype(2) * lresult[3]
r0[0] = wptype(2) * rresult[0]
r1[0] = wptype(2) * rresult[1]
r2[0] = wptype(2) * rresult[2]
r3[0] = wptype(2) * rresult[3]
kernel = getkernel(check_quat_scalar_mul, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=1).astype(dtype), requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
l0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
l0,
l1,
l2,
l3,
r0,
r1,
r2,
r3,
],
device=device,
)
assert_np_equal(l0.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 0], tol=tol)
assert_np_equal(l1.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 1], tol=tol)
assert_np_equal(l2.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 2], tol=tol)
assert_np_equal(l3.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 3], tol=tol)
assert_np_equal(r0.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 0], tol=tol)
assert_np_equal(r1.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 1], tol=tol)
assert_np_equal(r2.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 2], tol=tol)
assert_np_equal(r3.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 3], tol=tol)
if dtype in np_float_types:
for i, outputs in enumerate([(l0, r0), (l1, r1), (l2, r2), (l3, r3)]):
for l in outputs:
tape.backward(loss=l)
sgrad = tape.gradients[s].numpy()[0]
assert_np_equal(sgrad, 2 * q.numpy()[0, i], tol=tol)
allgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = s.numpy()[0] * 2
assert_np_equal(allgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_scalar_division(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_scalar_div(
s: wp.array(dtype=wptype),
q: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = q[0] / s[0]
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_scalar_div, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=1).astype(dtype), requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
r0,
r1,
r2,
r3,
],
device=device,
)
assert_np_equal(r0.numpy()[0], 2 * q.numpy()[0, 0] / s.numpy()[0], tol=tol)
assert_np_equal(r1.numpy()[0], 2 * q.numpy()[0, 1] / s.numpy()[0], tol=tol)
assert_np_equal(r2.numpy()[0], 2 * q.numpy()[0, 2] / s.numpy()[0], tol=tol)
assert_np_equal(r3.numpy()[0], 2 * q.numpy()[0, 3] / s.numpy()[0], tol=tol)
if dtype in np_float_types:
for i, r in enumerate([r0, r1, r2, r3]):
tape.backward(loss=r)
sgrad = tape.gradients[s].numpy()[0]
assert_np_equal(sgrad, -2 * q.numpy()[0, i] / (s.numpy()[0] * s.numpy()[0]), tol=tol)
allgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = 2 / s.numpy()[0]
assert_np_equal(allgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_quat_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_mul(
s: wp.array(dtype=quat),
q: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = s[0] * q[0]
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_mul, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
r0,
r1,
r2,
r3,
],
device=device,
)
a = s.numpy()
b = q.numpy()
assert_np_equal(
r0.numpy()[0], 2 * (a[0, 3] * b[0, 0] + b[0, 3] * a[0, 0] + a[0, 1] * b[0, 2] - b[0, 1] * a[0, 2]), tol=tol
)
assert_np_equal(
r1.numpy()[0], 2 * (a[0, 3] * b[0, 1] + b[0, 3] * a[0, 1] + a[0, 2] * b[0, 0] - b[0, 2] * a[0, 0]), tol=tol
)
assert_np_equal(
r2.numpy()[0], 2 * (a[0, 3] * b[0, 2] + b[0, 3] * a[0, 2] + a[0, 0] * b[0, 1] - b[0, 0] * a[0, 1]), tol=tol
)
assert_np_equal(
r3.numpy()[0], 2 * (a[0, 3] * b[0, 3] - a[0, 0] * b[0, 0] - a[0, 1] * b[0, 1] - a[0, 2] * b[0, 2]), tol=tol
)
tape.backward(loss=r0)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([b[0, 3], b[0, 2], -b[0, 1], b[0, 0]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([a[0, 3], -a[0, 2], a[0, 1], a[0, 0]]), tol=tol)
tape.zero()
tape.backward(loss=r1)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([-b[0, 2], b[0, 3], b[0, 0], b[0, 1]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([a[0, 2], a[0, 3], -a[0, 0], a[0, 1]]), tol=tol)
tape.zero()
tape.backward(loss=r2)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([b[0, 1], -b[0, 0], b[0, 3], b[0, 2]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([-a[0, 1], a[0, 0], a[0, 3], a[0, 2]]), tol=tol)
tape.zero()
tape.backward(loss=r3)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([-b[0, 0], -b[0, 1], -b[0, 2], b[0, 3]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([-a[0, 0], -a[0, 1], -a[0, 2], a[0, 3]]), tol=tol)
tape.zero()
def test_indexing(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_indexing(
q: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * q[0][0]
r1[0] = wptype(2) * q[0][1]
r2[0] = wptype(2) * q[0][2]
r3[0] = wptype(2) * q[0][3]
kernel = getkernel(check_quat_indexing, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q], outputs=[r0, r1, r2, r3], device=device)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
allgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = 2
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
assert_np_equal(r0.numpy()[0], 2.0 * q.numpy()[0, 0], tol=tol)
assert_np_equal(r1.numpy()[0], 2.0 * q.numpy()[0, 1], tol=tol)
assert_np_equal(r2.numpy()[0], 2.0 * q.numpy()[0, 2], tol=tol)
assert_np_equal(r3.numpy()[0], 2.0 * q.numpy()[0, 3], tol=tol)
def test_quat_lerp(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_lerp(
s: wp.array(dtype=quat),
q: wp.array(dtype=quat),
t: wp.array(dtype=wptype),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = wp.lerp(s[0], q[0], t[0])
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_lerp, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
t = wp.array(rng.uniform(size=1).astype(dtype), dtype=wptype, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q, t],
outputs=[
r0,
r1,
r2,
r3,
],
device=device,
)
a = s.numpy()
b = q.numpy()
tt = t.numpy()
assert_np_equal(r0.numpy()[0], 2 * ((1 - tt) * a[0, 0] + tt * b[0, 0]), tol=tol)
assert_np_equal(r1.numpy()[0], 2 * ((1 - tt) * a[0, 1] + tt * b[0, 1]), tol=tol)
assert_np_equal(r2.numpy()[0], 2 * ((1 - tt) * a[0, 2] + tt * b[0, 2]), tol=tol)
assert_np_equal(r3.numpy()[0], 2 * ((1 - tt) * a[0, 3] + tt * b[0, 3]), tol=tol)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
agrad = tape.gradients[s].numpy()[0]
bgrad = tape.gradients[q].numpy()[0]
tgrad = tape.gradients[t].numpy()[0]
expected_grads = np.zeros_like(agrad)
expected_grads[i] = 2 * (1 - tt)
assert_np_equal(agrad, expected_grads, tol=tol)
expected_grads[i] = 2 * tt
assert_np_equal(bgrad, expected_grads, tol=tol)
assert_np_equal(tgrad, 2 * (b[0, i] - a[0, i]), tol=tol)
tape.zero()
def test_quat_rotate(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
def check_quat_rotate(
q: wp.array(dtype=quat),
v: wp.array(dtype=vec3),
outputs: wp.array(dtype=wptype),
outputs_inv: wp.array(dtype=wptype),
outputs_manual: wp.array(dtype=wptype),
outputs_inv_manual: wp.array(dtype=wptype),
):
result = wp.quat_rotate(q[0], v[0])
result_inv = wp.quat_rotate_inv(q[0], v[0])
qv = vec3(q[0][0], q[0][1], q[0][2])
qw = q[0][3]
result_manual = v[0] * (wptype(2) * qw * qw - wptype(1))
result_manual += wp.cross(qv, v[0]) * qw * wptype(2)
result_manual += qv * wp.dot(qv, v[0]) * wptype(2)
result_inv_manual = v[0] * (wptype(2) * qw * qw - wptype(1))
result_inv_manual -= wp.cross(qv, v[0]) * qw * wptype(2)
result_inv_manual += qv * wp.dot(qv, v[0]) * wptype(2)
for i in range(3):
# multiply outputs by 2 so we've got something to backpropagate:
outputs[i] = wptype(2) * result[i]
outputs_inv[i] = wptype(2) * result_inv[i]
outputs_manual[i] = wptype(2) * result_manual[i]
outputs_inv_manual[i] = wptype(2) * result_inv_manual[i]
kernel = getkernel(check_quat_rotate, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = rng.standard_normal(size=(1, 4))
q /= np.linalg.norm(q)
q = wp.array(q.astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(0.5 * rng.standard_normal(size=(1, 3)).astype(dtype), dtype=vec3, requires_grad=True, device=device)
# test values against the manually computed result:
outputs = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_inv = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_manual = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_inv_manual = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[q, v],
outputs=[
outputs,
outputs_inv,
outputs_manual,
outputs_inv_manual,
],
device=device,
)
assert_np_equal(outputs.numpy(), outputs_manual.numpy(), tol=tol)
assert_np_equal(outputs_inv.numpy(), outputs_inv_manual.numpy(), tol=tol)
# test gradients against the manually computed result:
for i in range(3):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_inv = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_inv_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[q, v],
outputs=[
outputs,
outputs_inv,
outputs_manual,
outputs_inv_manual,
],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[cmp], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_inv, i], outputs=[cmp_inv], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_manual, i], outputs=[cmp_manual], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outputs_inv_manual, i], outputs=[cmp_inv_manual], device=device
)
tape.backward(loss=cmp)
qgrads = 1.0 * tape.gradients[q].numpy()
vgrads = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_inv)
qgrads_inv = 1.0 * tape.gradients[q].numpy()
vgrads_inv = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_manual)
qgrads_manual = 1.0 * tape.gradients[q].numpy()
vgrads_manual = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_inv_manual)
qgrads_inv_manual = 1.0 * tape.gradients[q].numpy()
vgrads_inv_manual = 1.0 * tape.gradients[v].numpy()
tape.zero()
assert_np_equal(qgrads, qgrads_manual, tol=tol)
assert_np_equal(vgrads, vgrads_manual, tol=tol)
assert_np_equal(qgrads_inv, qgrads_inv_manual, tol=tol)
assert_np_equal(vgrads_inv, vgrads_inv_manual, tol=tol)
def test_quat_to_matrix(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
mat3 = wp.types.matrix(shape=(3, 3), dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
def check_quat_to_matrix(
q: wp.array(dtype=quat),
outputs: wp.array(dtype=wptype),
outputs_manual: wp.array(dtype=wptype),
):
result = wp.quat_to_matrix(q[0])
xaxis = wp.quat_rotate(
q[0],
vec3(
wptype(1),
wptype(0),
wptype(0),
),
)
yaxis = wp.quat_rotate(
q[0],
vec3(
wptype(0),
wptype(1),
wptype(0),
),
)
zaxis = wp.quat_rotate(
q[0],
vec3(
wptype(0),
wptype(0),
wptype(1),
),
)
result_manual = mat3(xaxis, yaxis, zaxis)
idx = 0
for i in range(3):
for j in range(3):
# multiply outputs by 2 so we've got something to backpropagate:
outputs[idx] = wptype(2) * result[i, j]
outputs_manual[idx] = wptype(2) * result_manual[i, j]
idx = idx + 1
kernel = getkernel(check_quat_to_matrix, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = rng.standard_normal(size=(1, 4))
q /= np.linalg.norm(q)
q = wp.array(q.astype(dtype), dtype=quat, requires_grad=True, device=device)
# test values against the manually computed result:
outputs = wp.zeros(3 * 3, dtype=wptype, requires_grad=True, device=device)
outputs_manual = wp.zeros(3 * 3, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[q],
outputs=[
outputs,
outputs_manual,
],
device=device,
)
assert_np_equal(outputs.numpy(), outputs_manual.numpy(), tol=tol)
# sanity check: divide by 2 to remove that scale factor we put in there, and
# it should be a rotation matrix
R = 0.5 * outputs.numpy().reshape(3, 3)
assert_np_equal(np.matmul(R, R.T), np.eye(3), tol=tol)
# test gradients against the manually computed result:
idx = 0
for _i in range(3):
for _j in range(3):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[q],
outputs=[
outputs,
outputs_manual,
],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, idx], outputs=[cmp], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outputs_manual, idx], outputs=[cmp_manual], device=device
)
tape.backward(loss=cmp)
qgrads = 1.0 * tape.gradients[q].numpy()
tape.zero()
tape.backward(loss=cmp_manual)
qgrads_manual = 1.0 * tape.gradients[q].numpy()
tape.zero()
assert_np_equal(qgrads, qgrads_manual, tol=tol)
idx = idx + 1
############################################################
def test_slerp_grad(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
seed = 42
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(3, wptype)
quat = wp.types.quaternion(wptype)
def slerp_kernel(
q0: wp.array(dtype=quat),
q1: wp.array(dtype=quat),
t: wp.array(dtype=wptype),
loss: wp.array(dtype=wptype),
index: int,
):
tid = wp.tid()
q = wp.quat_slerp(q0[tid], q1[tid], t[tid])
wp.atomic_add(loss, 0, q[index])
slerp_kernel = getkernel(slerp_kernel, suffix=dtype.__name__)
def slerp_kernel_forward(
q0: wp.array(dtype=quat),
q1: wp.array(dtype=quat),
t: wp.array(dtype=wptype),
loss: wp.array(dtype=wptype),
index: int,
):
tid = wp.tid()
axis = vec3()
angle = wptype(0.0)
wp.quat_to_axis_angle(wp.mul(wp.quat_inverse(q0[tid]), q1[tid]), axis, angle)
q = wp.mul(q0[tid], wp.quat_from_axis_angle(axis, t[tid] * angle))
wp.atomic_add(loss, 0, q[index])
slerp_kernel_forward = getkernel(slerp_kernel_forward, suffix=dtype.__name__)
def quat_sampler_slerp(kernel_seed: int, quats: wp.array(dtype=quat)):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
angle = wp.randf(state, 0.0, 2.0 * 3.1415926535)
dir = wp.sample_unit_sphere_surface(state) * wp.sin(angle * 0.5)
q = quat(wptype(dir[0]), wptype(dir[1]), wptype(dir[2]), wptype(wp.cos(angle * 0.5)))
qn = wp.normalize(q)
quats[tid] = qn
quat_sampler = getkernel(quat_sampler_slerp, suffix=dtype.__name__)
if register_kernels:
return
N = 50
q0 = wp.zeros(N, dtype=quat, device=device, requires_grad=True)
q1 = wp.zeros(N, dtype=quat, device=device, requires_grad=True)
wp.launch(kernel=quat_sampler, dim=N, inputs=[seed, q0], device=device)
wp.launch(kernel=quat_sampler, dim=N, inputs=[seed + 1, q1], device=device)
t = rng.uniform(low=0.0, high=1.0, size=N)
t = wp.array(t, dtype=wptype, device=device, requires_grad=True)
def compute_gradients(kernel, wrt, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=N, inputs=[q0, q1, t, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[wrt].numpy()
tape.zero()
return loss.numpy()[0], gradients
eps = {
np.float16: 2.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
# wrt t
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(slerp_kernel, t, 0)
ycmp, gradients_y = compute_gradients(slerp_kernel, t, 1)
zcmp, gradients_z = compute_gradients(slerp_kernel, t, 2)
wcmp, gradients_w = compute_gradients(slerp_kernel, t, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(slerp_kernel_forward, t, 0)
ycmp_auto, gradients_y_auto = compute_gradients(slerp_kernel_forward, t, 1)
zcmp_auto, gradients_z_auto = compute_gradients(slerp_kernel_forward, t, 2)
wcmp_auto, gradients_w_auto = compute_gradients(slerp_kernel_forward, t, 3)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
# wrt q0
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(slerp_kernel, q0, 0)
ycmp, gradients_y = compute_gradients(slerp_kernel, q0, 1)
zcmp, gradients_z = compute_gradients(slerp_kernel, q0, 2)
wcmp, gradients_w = compute_gradients(slerp_kernel, q0, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(slerp_kernel_forward, q0, 0)
ycmp_auto, gradients_y_auto = compute_gradients(slerp_kernel_forward, q0, 1)
zcmp_auto, gradients_z_auto = compute_gradients(slerp_kernel_forward, q0, 2)
wcmp_auto, gradients_w_auto = compute_gradients(slerp_kernel_forward, q0, 3)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
# wrt q1
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(slerp_kernel, q1, 0)
ycmp, gradients_y = compute_gradients(slerp_kernel, q1, 1)
zcmp, gradients_z = compute_gradients(slerp_kernel, q1, 2)
wcmp, gradients_w = compute_gradients(slerp_kernel, q1, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(slerp_kernel_forward, q1, 0)
ycmp_auto, gradients_y_auto = compute_gradients(slerp_kernel_forward, q1, 1)
zcmp_auto, gradients_z_auto = compute_gradients(slerp_kernel_forward, q1, 2)
wcmp_auto, gradients_w_auto = compute_gradients(slerp_kernel_forward, q1, 3)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
############################################################
def test_quat_to_axis_angle_grad(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
seed = 42
num_rand = 50
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(3, wptype)
vec4 = wp.types.vector(4, wptype)
quat = wp.types.quaternion(wptype)
def quat_to_axis_angle_kernel(quats: wp.array(dtype=quat), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
axis = vec3()
angle = wptype(0.0)
wp.quat_to_axis_angle(quats[tid], axis, angle)
a = vec4(axis[0], axis[1], axis[2], angle)
wp.atomic_add(loss, 0, a[coord_idx])
quat_to_axis_angle_kernel = getkernel(quat_to_axis_angle_kernel, suffix=dtype.__name__)
def quat_to_axis_angle_kernel_forward(quats: wp.array(dtype=quat), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
q = quats[tid]
axis = vec3()
angle = wptype(0.0)
v = vec3(q[0], q[1], q[2])
if q[3] < wptype(0):
axis = -wp.normalize(v)
else:
axis = wp.normalize(v)
angle = wptype(2) * wp.atan2(wp.length(v), wp.abs(q[3]))
a = vec4(axis[0], axis[1], axis[2], angle)
wp.atomic_add(loss, 0, a[coord_idx])
quat_to_axis_angle_kernel_forward = getkernel(quat_to_axis_angle_kernel_forward, suffix=dtype.__name__)
def quat_sampler(kernel_seed: int, angles: wp.array(dtype=float), quats: wp.array(dtype=quat)):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
angle = angles[tid]
dir = wp.sample_unit_sphere_surface(state) * wp.sin(angle * 0.5)
q = quat(wptype(dir[0]), wptype(dir[1]), wptype(dir[2]), wptype(wp.cos(angle * 0.5)))
qn = wp.normalize(q)
quats[tid] = qn
quat_sampler = getkernel(quat_sampler, suffix=dtype.__name__)
if register_kernels:
return
quats = wp.zeros(num_rand, dtype=quat, device=device, requires_grad=True)
angles = wp.array(
np.linspace(0.0, 2.0 * np.pi, num_rand, endpoint=False, dtype=np.float32), dtype=float, device=device
)
wp.launch(kernel=quat_sampler, dim=num_rand, inputs=[seed, angles, quats], device=device)
edge_cases = np.array(
[(1.0, 0.0, 0.0, 0.0), (0.0, 1.0 / np.sqrt(3), 1.0 / np.sqrt(3), 1.0 / np.sqrt(3)), (0.0, 0.0, 0.0, 0.0)]
)
num_edge = len(edge_cases)
edge_cases = wp.array(edge_cases, dtype=quat, device=device, requires_grad=True)
def compute_gradients(arr, kernel, dim, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=dim, inputs=[arr, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[arr].numpy()
tape.zero()
return loss.numpy()[0], gradients
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 0)
ycmp, gradients_y = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 1)
zcmp, gradients_z = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 2)
wcmp, gradients_w = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 0)
ycmp_auto, gradients_y_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 1)
zcmp_auto, gradients_z_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 2)
wcmp_auto, gradients_w_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 3)
# edge cases: gather gradients from builtin adjoints
_, edge_gradients_x = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 0)
_, edge_gradients_y = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 1)
_, edge_gradients_z = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 2)
_, edge_gradients_w = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 3)
# edge cases: gather gradients from autodiff
_, edge_gradients_x_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 0)
_, edge_gradients_y_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 1)
_, edge_gradients_z_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 2)
_, edge_gradients_w_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 3)
eps = {
np.float16: 2.0e-1,
np.float32: 2.0e-4,
np.float64: 2.0e-7,
}.get(dtype, 0)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(edge_gradients_x, edge_gradients_x_auto, tol=eps)
assert_np_equal(edge_gradients_y, edge_gradients_y_auto, tol=eps)
assert_np_equal(edge_gradients_z, edge_gradients_z_auto, tol=eps)
assert_np_equal(edge_gradients_w, edge_gradients_w_auto, tol=eps)
############################################################
def test_quat_rpy_grad(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
N = 3
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(3, wptype)
quat = wp.types.quaternion(wptype)
def rpy_to_quat_kernel(rpy_arr: wp.array(dtype=vec3), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
rpy = rpy_arr[tid]
roll = rpy[0]
pitch = rpy[1]
yaw = rpy[2]
q = wp.quat_rpy(roll, pitch, yaw)
wp.atomic_add(loss, 0, q[coord_idx])
rpy_to_quat_kernel = getkernel(rpy_to_quat_kernel, suffix=dtype.__name__)
def rpy_to_quat_kernel_forward(rpy_arr: wp.array(dtype=vec3), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
rpy = rpy_arr[tid]
roll = rpy[0]
pitch = rpy[1]
yaw = rpy[2]
cy = wp.cos(yaw * wptype(0.5))
sy = wp.sin(yaw * wptype(0.5))
cr = wp.cos(roll * wptype(0.5))
sr = wp.sin(roll * wptype(0.5))
cp = wp.cos(pitch * wptype(0.5))
sp = wp.sin(pitch * wptype(0.5))
w = cy * cr * cp + sy * sr * sp
x = cy * sr * cp - sy * cr * sp
y = cy * cr * sp + sy * sr * cp
z = sy * cr * cp - cy * sr * sp
q = quat(x, y, z, w)
wp.atomic_add(loss, 0, q[coord_idx])
rpy_to_quat_kernel_forward = getkernel(rpy_to_quat_kernel_forward, suffix=dtype.__name__)
if register_kernels:
return
rpy_arr = rng.uniform(low=-np.pi, high=np.pi, size=(N, 3))
rpy_arr = wp.array(rpy_arr, dtype=vec3, device=device, requires_grad=True)
def compute_gradients(kernel, wrt, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=N, inputs=[wrt, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[wrt].numpy()
tape.zero()
return loss.numpy()[0], gradients
# wrt rpy
# gather gradients from builtin adjoints
rcmp, gradients_r = compute_gradients(rpy_to_quat_kernel, rpy_arr, 0)
pcmp, gradients_p = compute_gradients(rpy_to_quat_kernel, rpy_arr, 1)
ycmp, gradients_y = compute_gradients(rpy_to_quat_kernel, rpy_arr, 2)
# gather gradients from autodiff
rcmp_auto, gradients_r_auto = compute_gradients(rpy_to_quat_kernel_forward, rpy_arr, 0)
pcmp_auto, gradients_p_auto = compute_gradients(rpy_to_quat_kernel_forward, rpy_arr, 1)
ycmp_auto, gradients_y_auto = compute_gradients(rpy_to_quat_kernel_forward, rpy_arr, 2)
eps = {
np.float16: 2.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
assert_np_equal(rcmp, rcmp_auto, tol=eps)
assert_np_equal(pcmp, pcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(gradients_r, gradients_r_auto, tol=eps)
assert_np_equal(gradients_p, gradients_p_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
############################################################
def test_quat_from_matrix(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat33 = wp.types.matrix((3, 3), wptype)
quat = wp.types.quaternion(wptype)
def quat_from_matrix(m: wp.array2d(dtype=wptype), loss: wp.array(dtype=wptype), idx: int):
tid = wp.tid()
matrix = mat33(
m[tid, 0], m[tid, 1], m[tid, 2], m[tid, 3], m[tid, 4], m[tid, 5], m[tid, 6], m[tid, 7], m[tid, 8]
)
q = wp.quat_from_matrix(matrix)
wp.atomic_add(loss, 0, q[idx])
def quat_from_matrix_forward(mats: wp.array2d(dtype=wptype), loss: wp.array(dtype=wptype), idx: int):
tid = wp.tid()
m = mat33(
mats[tid, 0],
mats[tid, 1],
mats[tid, 2],
mats[tid, 3],
mats[tid, 4],
mats[tid, 5],
mats[tid, 6],
mats[tid, 7],
mats[tid, 8],
)
tr = m[0][0] + m[1][1] + m[2][2]
x = wptype(0)
y = wptype(0)
z = wptype(0)
w = wptype(0)
h = wptype(0)
if tr >= wptype(0):
h = wp.sqrt(tr + wptype(1))
w = wptype(0.5) * h
h = wptype(0.5) / h
x = (m[2][1] - m[1][2]) * h
y = (m[0][2] - m[2][0]) * h
z = (m[1][0] - m[0][1]) * h
else:
max_diag = 0
if m[1][1] > m[0][0]:
max_diag = 1
if m[2][2] > m[max_diag][max_diag]:
max_diag = 2
if max_diag == 0:
h = wp.sqrt((m[0][0] - (m[1][1] + m[2][2])) + wptype(1))
x = wptype(0.5) * h
h = wptype(0.5) / h
y = (m[0][1] + m[1][0]) * h
z = (m[2][0] + m[0][2]) * h
w = (m[2][1] - m[1][2]) * h
elif max_diag == 1:
h = wp.sqrt((m[1][1] - (m[2][2] + m[0][0])) + wptype(1))
y = wptype(0.5) * h
h = wptype(0.5) / h
z = (m[1][2] + m[2][1]) * h
x = (m[0][1] + m[1][0]) * h
w = (m[0][2] - m[2][0]) * h
if max_diag == 2:
h = wp.sqrt((m[2][2] - (m[0][0] + m[1][1])) + wptype(1))
z = wptype(0.5) * h
h = wptype(0.5) / h
x = (m[2][0] + m[0][2]) * h
y = (m[1][2] + m[2][1]) * h
w = (m[1][0] - m[0][1]) * h
q = wp.normalize(quat(x, y, z, w))
wp.atomic_add(loss, 0, q[idx])
quat_from_matrix = getkernel(quat_from_matrix, suffix=dtype.__name__)
quat_from_matrix_forward = getkernel(quat_from_matrix_forward, suffix=dtype.__name__)
if register_kernels:
return
m = np.array(
[
[1.0, 0.0, 0.0, 0.0, 0.5, 0.866, 0.0, -0.866, 0.5],
[0.866, 0.0, 0.25, -0.433, 0.5, 0.75, -0.25, -0.866, 0.433],
[0.866, -0.433, 0.25, 0.0, 0.5, 0.866, -0.5, -0.75, 0.433],
[-1.2, -1.6, -2.3, 0.25, -0.6, -0.33, 3.2, -1.0, -2.2],
]
)
m = wp.array2d(m, dtype=wptype, device=device, requires_grad=True)
N = m.shape[0]
def compute_gradients(kernel, wrt, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=N, inputs=[m, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[wrt].numpy()
tape.zero()
return loss.numpy()[0], gradients
# gather gradients from builtin adjoints
cmpx, gradients_x = compute_gradients(quat_from_matrix, m, 0)
cmpy, gradients_y = compute_gradients(quat_from_matrix, m, 1)
cmpz, gradients_z = compute_gradients(quat_from_matrix, m, 2)
cmpw, gradients_w = compute_gradients(quat_from_matrix, m, 3)
# gather gradients from autodiff
cmpx_auto, gradients_x_auto = compute_gradients(quat_from_matrix_forward, m, 0)
cmpy_auto, gradients_y_auto = compute_gradients(quat_from_matrix_forward, m, 1)
cmpz_auto, gradients_z_auto = compute_gradients(quat_from_matrix_forward, m, 2)
cmpw_auto, gradients_w_auto = compute_gradients(quat_from_matrix_forward, m, 3)
# compare
eps = 1.0e6
eps = {
np.float16: 2.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
assert_np_equal(cmpx, cmpx_auto, tol=eps)
assert_np_equal(cmpy, cmpy_auto, tol=eps)
assert_np_equal(cmpz, cmpz_auto, tol=eps)
assert_np_equal(cmpw, cmpw_auto, tol=eps)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
def test_quat_identity(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def quat_identity_test(output: wp.array(dtype=wptype)):
q = wp.quat_identity(dtype=wptype)
output[0] = q[0]
output[1] = q[1]
output[2] = q[2]
output[3] = q[3]
def quat_identity_test_default(output: wp.array(dtype=wp.float32)):
q = wp.quat_identity()
output[0] = q[0]
output[1] = q[1]
output[2] = q[2]
output[3] = q[3]
quat_identity_kernel = getkernel(quat_identity_test, suffix=dtype.__name__)
quat_identity_default_kernel = getkernel(quat_identity_test_default, suffix=np.float32.__name__)
if register_kernels:
return
output = wp.zeros(4, dtype=wptype, device=device)
wp.launch(quat_identity_kernel, dim=1, inputs=[], outputs=[output], device=device)
expected = np.zeros_like(output.numpy())
expected[3] = 1
assert_np_equal(output.numpy(), expected)
# let's just test that it defaults to float32:
output = wp.zeros(4, dtype=wp.float32, device=device)
wp.launch(quat_identity_default_kernel, dim=1, inputs=[], outputs=[output], device=device)
expected = np.zeros_like(output.numpy())
expected[3] = 1
assert_np_equal(output.numpy(), expected)
############################################################
def test_quat_euler_conversion(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
N = 3
rpy_arr = rng.uniform(low=-np.pi, high=np.pi, size=(N, 3))
quats_from_euler = [list(wp.sim.quat_from_euler(wp.vec3(*rpy), 0, 1, 2)) for rpy in rpy_arr]
quats_from_rpy = [list(wp.quat_rpy(rpy[0], rpy[1], rpy[2])) for rpy in rpy_arr]
assert_np_equal(np.array(quats_from_euler), np.array(quats_from_rpy), tol=1e-4)
def test_anon_type_instance(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def quat_create_test(input: wp.array(dtype=wptype), output: wp.array(dtype=wptype)):
# component constructor:
q = wp.quaternion(input[0], input[1], input[2], input[3])
output[0] = wptype(2) * q[0]
output[1] = wptype(2) * q[1]
output[2] = wptype(2) * q[2]
output[3] = wptype(2) * q[3]
# vector / scalar constructor:
q2 = wp.quaternion(wp.vector(input[4], input[5], input[6]), input[7])
output[4] = wptype(2) * q2[0]
output[5] = wptype(2) * q2[1]
output[6] = wptype(2) * q2[2]
output[7] = wptype(2) * q2[3]
quat_create_kernel = getkernel(quat_create_test, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=8).astype(dtype), requires_grad=True, device=device)
output = wp.zeros(8, dtype=wptype, requires_grad=True, device=device)
wp.launch(quat_create_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy())
for i in range(len(input)):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(quat_create_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
# Same as above but with a default (float) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructor_default():
qzero = wp.quat()
wp.expect_eq(qzero[0], 0.0)
wp.expect_eq(qzero[1], 0.0)
wp.expect_eq(qzero[2], 0.0)
wp.expect_eq(qzero[3], 0.0)
qval = wp.quat(1.0, 2.0, 3.0, 4.0)
wp.expect_eq(qval[0], 1.0)
wp.expect_eq(qval[1], 2.0)
wp.expect_eq(qval[2], 3.0)
wp.expect_eq(qval[3], 4.0)
qeye = wp.quat_identity()
wp.expect_eq(qeye[0], 0.0)
wp.expect_eq(qeye[1], 0.0)
wp.expect_eq(qeye[2], 0.0)
wp.expect_eq(qeye[3], 1.0)
def test_py_arithmetic_ops(test, device, dtype):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def make_quat(*args):
if wptype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(wptype._type_(x).value for x in args)
return args
quat_cls = wp.types.quaternion(wptype)
v = quat_cls(1, -2, 3, -4)
test.assertSequenceEqual(+v, make_quat(1, -2, 3, -4))
test.assertSequenceEqual(-v, make_quat(-1, 2, -3, 4))
test.assertSequenceEqual(v + quat_cls(5, 5, 5, 5), make_quat(6, 3, 8, 1))
test.assertSequenceEqual(v - quat_cls(5, 5, 5, 5), make_quat(-4, -7, -2, -9))
v = quat_cls(2, 4, 6, 8)
test.assertSequenceEqual(v * wptype(2), make_quat(4, 8, 12, 16))
test.assertSequenceEqual(wptype(2) * v, make_quat(4, 8, 12, 16))
test.assertSequenceEqual(v / wptype(2), make_quat(1, 2, 3, 4))
test.assertSequenceEqual(wptype(24) / v, make_quat(12, 6, 4, 3))
devices = get_test_devices()
class TestQuat(unittest.TestCase):
pass
add_kernel_test(TestQuat, test_constructor_default, dim=1, devices=devices)
for dtype in np_float_types:
add_function_test_register_kernel(
TestQuat, f"test_constructors_{dtype.__name__}", test_constructors, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_casting_constructors_{dtype.__name__}",
test_casting_constructors,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_anon_type_instance_{dtype.__name__}", test_anon_type_instance, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_inverse_{dtype.__name__}", test_inverse, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_identity_{dtype.__name__}", test_quat_identity, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_dotproduct_{dtype.__name__}", test_dotproduct, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_length_{dtype.__name__}", test_length, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_normalize_{dtype.__name__}", test_normalize, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_addition_{dtype.__name__}", test_addition, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_subtraction_{dtype.__name__}", test_subtraction, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_scalar_multiplication_{dtype.__name__}",
test_scalar_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_scalar_division_{dtype.__name__}", test_scalar_division, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_quat_multiplication_{dtype.__name__}",
test_quat_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_indexing_{dtype.__name__}", test_indexing, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_lerp_{dtype.__name__}", test_quat_lerp, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_quat_to_axis_angle_grad_{dtype.__name__}",
test_quat_to_axis_angle_grad,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_slerp_grad_{dtype.__name__}", test_slerp_grad, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_rpy_grad_{dtype.__name__}", test_quat_rpy_grad, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_from_matrix_{dtype.__name__}", test_quat_from_matrix, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_rotate_{dtype.__name__}", test_quat_rotate, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_to_matrix_{dtype.__name__}", test_quat_to_matrix, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_quat_euler_conversion_{dtype.__name__}",
test_quat_euler_conversion,
devices=devices,
dtype=dtype,
)
add_function_test(
TestQuat, f"test_py_arithmetic_ops_{dtype.__name__}", test_py_arithmetic_ops, devices=None, dtype=dtype
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 73,068 | Python | 34.045084 | 117 | 0.58061 |
NVIDIA/warp/warp/tests/test_async.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import check_iommu
class Capturable:
def __init__(self, use_graph=True, stream=None):
self.use_graph = use_graph
self.stream = stream
def __enter__(self):
if self.use_graph:
wp.capture_begin(stream=self.stream)
def __exit__(self, exc_type, exc_value, traceback):
if self.use_graph:
try:
# need to call capture_end() to terminate the CUDA stream capture
graph = wp.capture_end(stream=self.stream)
except Exception:
# capture_end() will raise if there was an error during capture, but we squash it here
# if we already had an exception so that the original exception percolates to the caller
if exc_type is None:
raise
else:
# capture can succeed despite some errors during capture (e.g. cudaInvalidValue during copy)
# but if we had an exception during capture, don't launch the graph
if exc_type is None:
wp.capture_launch(graph, stream=self.stream)
@wp.kernel
def inc(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] + 1.0
def test_async_empty(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
with Capturable(use_graph):
a = wp.empty(n, dtype=float)
test.assertIsInstance(a, wp.array)
test.assertIsNotNone(a.ptr)
test.assertEqual(a.size, n)
test.assertEqual(a.dtype, wp.float32)
test.assertEqual(a.device, device)
def test_async_zeros(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
with Capturable(use_graph):
a = wp.zeros(n, dtype=float)
assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
def test_async_zero_v1(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
with Capturable(use_graph):
a = wp.empty(n, dtype=float)
a.zero_()
assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
def test_async_zero_v2(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
a = wp.empty(n, dtype=float)
with Capturable(use_graph):
a.zero_()
assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
def test_async_full(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
value = 42
with Capturable(use_graph):
a = wp.full(n, value, dtype=float)
assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
def test_async_fill_v1(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
value = 17
with Capturable(use_graph):
a = wp.empty(n, dtype=float)
a.fill_(value)
assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
def test_async_fill_v2(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
value = 17
a = wp.empty(n, dtype=float)
with Capturable(use_graph):
a.fill_(value)
assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
def test_async_kernels_v1(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
num_iters = 10
with Capturable(use_graph):
a = wp.zeros(n, dtype=float)
for _i in range(num_iters):
wp.launch(inc, dim=a.size, inputs=[a])
assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
def test_async_kernels_v2(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
num_iters = 10
a = wp.zeros(n, dtype=float)
with Capturable(use_graph):
for _i in range(num_iters):
wp.launch(inc, dim=a.size, inputs=[a])
assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
class TestAsync(unittest.TestCase):
pass
# get all CUDA devices
cuda_devices = wp.get_cuda_devices()
# get CUDA devices that support mempools
cuda_devices_with_mempools = []
for d in cuda_devices:
if d.is_mempool_supported:
cuda_devices_with_mempools.append(d)
# get a pair of CUDA devices that support mempool access
cuda_devices_with_mempool_access = []
for target_device in cuda_devices_with_mempools:
for peer_device in cuda_devices_with_mempools:
if peer_device != target_device:
if wp.is_mempool_access_supported(target_device, peer_device):
cuda_devices_with_mempool_access = [target_device, peer_device]
break
if cuda_devices_with_mempool_access:
break
def add_test_variants(
func,
device_count=1,
graph_allocs=False,
requires_mempool_access_with_graph=False,
):
# test that works with default allocators
if not graph_allocs and device_count <= len(cuda_devices):
devices = cuda_devices[:device_count]
def func1(t, d):
return func(t, *devices, False, False)
def func2(t, d):
return func(t, *devices, False, True)
name1 = f"{func.__name__}_DefaultAlloc_NoGraph"
name2 = f"{func.__name__}_DefaultAlloc_WithGraph"
if device_count == 1:
add_function_test(TestAsync, name1, func1, devices=devices)
add_function_test(TestAsync, name2, func2, devices=devices)
else:
add_function_test(TestAsync, name1, func1)
add_function_test(TestAsync, name2, func2)
# test that works with mempool allocators
if device_count <= len(cuda_devices_with_mempools):
devices = cuda_devices_with_mempools[:device_count]
def func3(t, d):
return func(t, *devices, True, False)
name3 = f"{func.__name__}_MempoolAlloc_NoGraph"
if device_count == 1:
add_function_test(TestAsync, name3, func3, devices=devices)
else:
add_function_test(TestAsync, name3, func3)
# test that requires devices with mutual mempool access during graph capture (e.g., p2p memcpy limitation)
if requires_mempool_access_with_graph:
suitable_devices = cuda_devices_with_mempool_access
else:
suitable_devices = cuda_devices_with_mempools
if device_count <= len(suitable_devices):
devices = suitable_devices[:device_count]
def func4(t, d):
return func(t, *devices, True, True)
name4 = f"{func.__name__}_MempoolAlloc_WithGraph"
if device_count == 1:
add_function_test(TestAsync, name4, func4, devices=devices)
else:
add_function_test(TestAsync, name4, func4)
add_test_variants(test_async_empty, graph_allocs=True)
add_test_variants(test_async_zeros, graph_allocs=True)
add_test_variants(test_async_zero_v1, graph_allocs=True)
add_test_variants(test_async_zero_v2, graph_allocs=False)
add_test_variants(test_async_full, graph_allocs=True)
add_test_variants(test_async_fill_v1, graph_allocs=True)
add_test_variants(test_async_fill_v2, graph_allocs=False)
add_test_variants(test_async_kernels_v1, graph_allocs=True)
add_test_variants(test_async_kernels_v2, graph_allocs=False)
# =================================================================================
# wp.copy() tests
# =================================================================================
def as_contiguous_array(data, device=None, grad_data=None):
a = wp.array(data=data, device=device, copy=True)
if grad_data is not None:
a.grad = as_contiguous_array(grad_data, device=device)
return a
def as_strided_array(data, device=None, grad_data=None):
a = wp.array(data=data, device=device)
# make a copy with non-contiguous strides
strides = (*a.strides[:-1], 2 * a.strides[-1])
strided_a = wp.zeros(shape=a.shape, strides=strides, dtype=a.dtype, device=device)
wp.copy(strided_a, a)
if grad_data is not None:
strided_a.grad = as_strided_array(grad_data, device=device)
return strided_a
def as_indexed_array(data, device=None, **kwargs):
a = wp.array(data=data, device=device)
# allocate double the elements so we can index half of them
shape = (*a.shape[:-1], 2 * a.shape[-1])
big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
indexed_a = big_a[indices]
wp.copy(indexed_a, a)
return indexed_a
def as_fabric_array(data, device=None, **kwargs):
from warp.tests.test_fabricarray import _create_fabric_array_interface
a = wp.array(data=data, device=device)
iface = _create_fabric_array_interface(a, "foo")
fa = wp.fabricarray(data=iface, attrib="foo")
fa._iface = iface # save data reference
return fa
def as_indexed_fabric_array(data, device=None, **kwargs):
from warp.tests.test_fabricarray import _create_fabric_array_interface
a = wp.array(data=data, device=device)
shape = (*a.shape[:-1], 2 * a.shape[-1])
# allocate double the elements so we can index half of them
big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
iface = _create_fabric_array_interface(big_a, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
fa._iface = iface # save data reference
indexed_fa = fa[indices]
wp.copy(indexed_fa, a)
return indexed_fa
class CopyParams:
def __init__(
self,
with_grad=False, # whether to use arrays with gradients (contiguous and strided only)
src_use_mempool=False, # whether to enable memory pool on source device
dst_use_mempool=False, # whether to enable memory pool on destination device
access_dst_src=False, # whether destination device has access to the source mempool
access_src_dst=False, # whether source device has access to the destination mempool
stream_device=None, # the device for the stream (None for default behaviour)
use_graph=False, # whether to use a graph
value_offset=0, # unique offset for generated data values per test
):
self.with_grad = with_grad
self.src_use_mempool = src_use_mempool
self.dst_use_mempool = dst_use_mempool
self.access_dst_src = access_dst_src
self.access_src_dst = access_src_dst
self.stream_device = stream_device
self.use_graph = use_graph
self.value_offset = value_offset
def copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params: CopyParams):
# activate the given memory pool configuration
with wp.ScopedMempool(src_device, params.src_use_mempool), wp.ScopedMempool(
dst_device, params.dst_use_mempool
), wp.ScopedMempoolAccess(dst_device, src_device, params.access_dst_src), wp.ScopedMempoolAccess(
src_device, dst_device, params.access_src_dst
):
# make sure the data are different between tests by adding a unique offset
# this avoids aliasing issues with older memory
src_data = np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
dst_data = np.zeros(n, dtype=np.float32)
if params.with_grad:
src_grad_data = -np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
dst_grad_data = np.zeros(n, dtype=np.float32)
else:
src_grad_data = None
dst_grad_data = None
# create Warp arrays for the copy
src = src_ctor(src_data, device=src_device, grad_data=src_grad_data)
dst = dst_ctor(dst_data, device=dst_device, grad_data=dst_grad_data)
# determine the stream argument to pass to wp.copy()
if params.stream_device is not None:
stream_arg = wp.Stream(params.stream_device)
else:
stream_arg = None
# determine the actual stream used for the copy
if stream_arg is not None:
stream = stream_arg
else:
if dst_device.is_cuda:
stream = dst_device.stream
elif src_device.is_cuda:
stream = src_device.stream
else:
stream = None
# check if an exception is expected given the arguments and system configuration
expected_error_type = None
expected_error_regex = None
# restrictions on copying between different devices during graph capture
if params.use_graph and src_device != dst_device:
# errors with allocating staging buffer on source device
if not src.is_contiguous:
if src_device.is_cuda and not src_device.is_mempool_enabled:
# can't allocate staging buffer using default CUDA allocator during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
elif src_device.is_cpu:
# can't allocate CPU staging buffer during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
# errors with allocating staging buffer on destination device
if expected_error_type is None:
if not dst.is_contiguous:
if dst_device.is_cuda and not dst_device.is_mempool_enabled:
# can't allocate staging buffer using default CUDA allocator during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
elif dst_device.is_cpu and src_device.is_cuda:
# can't allocate CPU staging buffer during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
# p2p copies and mempool access
if expected_error_type is None and src_device.is_cuda and dst_device.is_cuda:
# If the source is a contiguous mempool allocation or a non-contiguous array
# AND the destination is a contiguous mempool allocation or a non-contiguous array,
# then memory pool access needs to be enabled EITHER from src_device to dst_device
# OR from dst_device to src_device.
if (
((src.is_contiguous and params.src_use_mempool) or not src.is_contiguous)
and ((dst.is_contiguous and params.dst_use_mempool) or not dst.is_contiguous)
and not wp.is_mempool_access_enabled(src_device, dst_device)
and not wp.is_mempool_access_enabled(dst_device, src_device)
):
expected_error_type, expected_error_regex = RuntimeError, r"^Warp copy error"
# synchronize before test
wp.synchronize()
if expected_error_type is not None:
# disable error output from Warp if we expect an exception
try:
saved_error_output_enabled = wp.context.runtime.core.is_error_output_enabled()
wp.context.runtime.core.set_error_output_enabled(False)
with test.assertRaisesRegex(expected_error_type, expected_error_regex):
with Capturable(use_graph=params.use_graph, stream=stream):
wp.copy(dst, src, stream=stream_arg)
finally:
wp.context.runtime.core.set_error_output_enabled(saved_error_output_enabled)
wp.synchronize()
# print(f"SUCCESSFUL ERROR PREDICTION: {expected_error_regex}")
else:
with Capturable(use_graph=params.use_graph, stream=stream):
wp.copy(dst, src, stream=stream_arg)
# synchronize the stream where the copy was running (None for h2h copies)
if stream is not None:
wp.synchronize_stream(stream)
assert_np_equal(dst.numpy(), src.numpy())
if params.with_grad:
assert_np_equal(dst.grad.numpy(), src.grad.numpy())
# print("SUCCESSFUL COPY")
array_constructors = {
"contiguous": as_contiguous_array,
"strided": as_strided_array,
"indexed": as_indexed_array,
"fabric": as_fabric_array,
"indexedfabric": as_indexed_fabric_array,
}
array_type_codes = {
"contiguous": "c",
"strided": "s",
"indexed": "i",
"fabric": "f",
"indexedfabric": "fi",
}
device_pairs = {}
cpu = None
cuda0 = None
cuda1 = None
cuda2 = None
if wp.is_cpu_available():
cpu = wp.get_device("cpu")
device_pairs["h2h"] = (cpu, cpu)
if wp.is_cuda_available():
cuda0 = wp.get_device("cuda:0")
device_pairs["d2d"] = (cuda0, cuda0)
if wp.is_cpu_available():
device_pairs["h2d"] = (cpu, cuda0)
device_pairs["d2h"] = (cuda0, cpu)
if wp.get_cuda_device_count() > 1:
cuda1 = wp.get_device("cuda:1")
device_pairs["p2p"] = (cuda0, cuda1)
if wp.get_cuda_device_count() > 2:
cuda2 = wp.get_device("cuda:2")
num_copy_elems = 1000000
num_copy_tests = 0
def add_copy_test(test_name, src_ctor, dst_ctor, src_device, dst_device, n, params):
def test_func(
test,
device,
src_ctor=src_ctor,
dst_ctor=dst_ctor,
src_device=src_device,
dst_device=dst_device,
n=n,
params=params,
):
return copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params)
add_function_test(TestAsync, test_name, test_func, check_output=False)
# Procedurally add tests with argument combinations supported by the system.
for src_type, src_ctor in array_constructors.items():
for dst_type, dst_ctor in array_constructors.items():
copy_type = f"{array_type_codes[src_type]}2{array_type_codes[dst_type]}"
for transfer_type, device_pair in device_pairs.items():
# skip p2p tests if IOMMU is enabled on Linux
if transfer_type == "p2p" and not check_iommu():
continue
src_device = device_pair[0]
dst_device = device_pair[1]
# basic copy arguments
copy_args = (src_ctor, dst_ctor, src_device, dst_device, num_copy_elems)
if src_device.is_cuda and src_device.is_mempool_supported:
src_mempool_flags = [False, True]
else:
src_mempool_flags = [False]
if dst_device.is_cuda and dst_device.is_mempool_supported:
dst_mempool_flags = [False, True]
else:
dst_mempool_flags = [False]
# stream options
if src_device.is_cuda:
if dst_device.is_cuda:
if src_device == dst_device:
# d2d
assert src_device == cuda0 and dst_device == cuda0
if cuda1 is not None:
stream_devices = [None, cuda0, cuda1]
else:
stream_devices = [None, cuda0]
else:
# p2p
assert src_device == cuda0 and dst_device == cuda1
if cuda2 is not None:
stream_devices = [None, cuda0, cuda1, cuda2]
else:
stream_devices = [None, cuda0, cuda1]
else:
# d2h
assert src_device == cuda0
if cuda1 is not None:
stream_devices = [None, cuda0, cuda1]
else:
stream_devices = [None, cuda0]
else:
if dst_device.is_cuda:
# h2d
assert dst_device == cuda0
if cuda1 is not None:
stream_devices = [None, cuda0, cuda1]
else:
stream_devices = [None, cuda0]
else:
# h2h
stream_devices = [None]
# gradient options (only supported with contiguous and strided arrays)
if src_type in ("contiguous", "strided") and dst_type in ("contiguous", "strided"):
grad_flags = [False, True]
else:
grad_flags = [False]
# graph capture options (only supported with CUDA devices)
if src_device.is_cuda or dst_device.is_cuda:
graph_flags = [False, True]
else:
graph_flags = [False]
# access from destination device to source mempool
if wp.is_mempool_access_supported(dst_device, src_device):
access_dst_src_flags = [False, True]
else:
access_dst_src_flags = [False]
# access from source device to destination mempool
if wp.is_mempool_access_supported(src_device, dst_device):
access_src_dst_flags = [False, True]
else:
access_src_dst_flags = [False]
for src_use_mempool in src_mempool_flags:
for dst_use_mempool in dst_mempool_flags:
for stream_device in stream_devices:
for access_dst_src in access_dst_src_flags:
for access_src_dst in access_src_dst_flags:
for with_grad in grad_flags:
for use_graph in graph_flags:
test_name = f"test_copy_{copy_type}_{transfer_type}"
if src_use_mempool:
test_name += "_SrcPoolOn"
else:
test_name += "_SrcPoolOff"
if dst_use_mempool:
test_name += "_DstPoolOn"
else:
test_name += "_DstPoolOff"
if stream_device is None:
test_name += "_NoStream"
elif stream_device == cuda0:
test_name += "_Stream0"
elif stream_device == cuda1:
test_name += "_Stream1"
elif stream_device == cuda2:
test_name += "_Stream2"
else:
raise AssertionError
if with_grad:
test_name += "_Grad"
else:
test_name += "_NoGrad"
if use_graph:
test_name += "_Graph"
else:
test_name += "_NoGraph"
if access_dst_src and access_src_dst:
test_name += "_AccessBoth"
elif access_dst_src and not access_src_dst:
test_name += "_AccessDstSrc"
elif not access_dst_src and access_src_dst:
test_name += "_AccessSrcDst"
else:
test_name += "_AccessNone"
copy_params = CopyParams(
src_use_mempool=src_use_mempool,
dst_use_mempool=dst_use_mempool,
access_dst_src=access_dst_src,
access_src_dst=access_src_dst,
stream_device=stream_device,
with_grad=with_grad,
use_graph=use_graph,
value_offset=num_copy_tests,
)
add_copy_test(test_name, *copy_args, copy_params)
num_copy_tests += 1
# Specify individual test(s) for debugging purposes
# add_copy_test("test_a", as_contiguous_array, as_strided_array, cuda0, cuda1, num_copy_elems,
# CopyParams(
# src_use_mempool=True,
# dst_use_mempool=True,
# access_dst_src=False,
# access_src_dst=False,
# stream_device=cuda0,
# with_grad=False,
# use_graph=True,
# value_offset=0))
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 26,378 | Python | 38.548726 | 110 | 0.556676 |
NVIDIA/warp/warp/tests/walkthrough_debug.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
####################################################################################################
#
# This file demonstrates step-through debugging support of the C++ code generated for a Warp kernel
# running on the CPU.
#
# This is not a unit test; it should be run interactively.
#
# For a fully integrated experience use Visual Studio Code and install the "Python C++ Debugger"
# and "CodeLLDB" extensions. Add the following configurations to your .vscode/launch.json file:
#
"""
{
"name": "Warp Debugger",
"type": "pythoncpp",
"request": "launch",
"pythonLaunchName": "Python: Current File",
"cppAttachName": "(lldb) Attach",
},
{
"name": "Python: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"stopOnEntry": false,
},
{
"name": "(lldb) Attach",
"type": "lldb",
"request": "attach",
},
"""
#
# Then run this .py file using the "Warp Debugger" configuration.
#
# Check out the following resources for more information about launch configurations and
# troubleshooting common VSCode debugger integration issues:
# • https://vscode-docs.readthedocs.io/en/stable/editor/debugging/#launch-configurations
# • https://code.visualstudio.com/docs/cpp/cpp-debug#_debugging
#
####################################################################################################
import warp as wp
# The init() function prints the directory of the kernel cache which contains the .cpp files
# generated from Warp kernels. You can put breakpoints in these C++ files through Visual Studio Code,
# but it's generally more convenient to use wp.breakpoint(). See the example below.
wp.init()
# Enable kernels to be compiled with debug info and disable optimizations
wp.config.mode = "debug"
# Make sure Warp was built with `build_lib.py --mode=debug`
assert wp.context.runtime.core.is_debug_enabled(), "Warp must be built in debug mode to enable debugging kernels"
@wp.kernel
def example_breakpoint(n: int):
a = int(0)
for _i in range(0, n):
if a == 5:
# Your debugger should halt at the C++ code corresponding with the next line,
# namely a call to the __debugbreak() intrinsic function.
wp.breakpoint()
break
a += 1
wp.expect_eq(a, 5)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
wp.launch(example_breakpoint, dim=1, inputs=[10], device="cpu")
| 2,889 | Python | 32.604651 | 113 | 0.650052 |
NVIDIA/warp/warp/tests/unittest_serial.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
import warp.tests.unittest_suites
def run_suite() -> bool:
"""Run a test suite"""
# force rebuild of all kernels
wp.build.clear_kernel_cache()
print("Cleared Warp kernel cache")
runner = unittest.TextTestRunner(verbosity=2, failfast=True)
# Can swap out different suites
suite = warp.tests.unittest_suites.default_suite()
# suite = warp.tests.unittest_suites.auto_discover_suite()
# suite = warp.tests.unittest_suites.kit_suite()
print(f"Test suite has {suite.countTestCases()} tests")
ret = not runner.run(suite).wasSuccessful()
return ret
if __name__ == "__main__":
ret = run_suite()
import sys
sys.exit(ret)
| 1,140 | Python | 28.25641 | 76 | 0.723684 |
NVIDIA/warp/warp/tests/test_snippet.py | import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
def test_basic(test, device):
snippet = """
out[tid] = a * x[tid] + y[tid];
"""
adj_snippet = """
adj_a += x[tid] * adj_out[tid];
adj_x[tid] += a * adj_out[tid];
adj_y[tid] += adj_out[tid];
"""
@wp.func_native(snippet, adj_snippet)
def saxpy(
a: wp.float32,
x: wp.array(dtype=wp.float32),
y: wp.array(dtype=wp.float32),
out: wp.array(dtype=wp.float32),
tid: int,
): # fmt: skip
...
@wp.kernel
def saxpy_cu(
a: wp.float32, x: wp.array(dtype=wp.float32), y: wp.array(dtype=wp.float32), out: wp.array(dtype=wp.float32)
):
tid = wp.tid()
saxpy(a, x, y, out, tid)
@wp.kernel
def saxpy_py(
a: wp.float32, x: wp.array(dtype=wp.float32), y: wp.array(dtype=wp.float32), out: wp.array(dtype=wp.float32)
):
tid = wp.tid()
out[tid] = a * x[tid] + y[tid]
N = 128
a1 = 2.0
x1 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device, requires_grad=True)
y1 = wp.zeros_like(x1)
out1 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device)
adj_out1 = wp.array(np.ones(N, dtype=np.float32), dtype=wp.float32, device=device)
a2 = 2.0
x2 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device, requires_grad=True)
y2 = wp.zeros_like(x2)
out2 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device)
adj_out2 = wp.array(np.ones(N, dtype=np.float32), dtype=wp.float32, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=saxpy_cu, dim=N, inputs=[a1, x1, y1], outputs=[out1], device=device)
wp.launch(kernel=saxpy_py, dim=N, inputs=[a2, x2, y2], outputs=[out2], device=device)
tape.backward(grads={out1: adj_out1, out2: adj_out2})
# test forward snippet
assert_np_equal(out1.numpy(), out2.numpy())
# test backward snippet
assert_np_equal(x1.grad.numpy(), a1 * np.ones(N, dtype=np.float32))
assert_np_equal(x1.grad.numpy(), x2.grad.numpy())
assert_np_equal(y1.grad.numpy(), np.ones(N, dtype=np.float32))
assert_np_equal(y1.grad.numpy(), y2.grad.numpy())
def test_shared_memory(test, device):
snippet = """
__shared__ int s[128];
s[tid] = d[tid];
__syncthreads();
d[tid] = s[N - tid - 1];
"""
@wp.func_native(snippet)
def reverse(d: wp.array(dtype=int), N: int, tid: int):
"""Reverse the array d in place using shared memory."""
return
@wp.kernel
def reverse_kernel(d: wp.array(dtype=int), N: int):
tid = wp.tid()
reverse(d, N, tid)
N = 128
x = wp.array(np.arange(N, dtype=int), dtype=int, device=device)
y = np.arange(127, -1, -1, dtype=int)
wp.launch(kernel=reverse_kernel, dim=N, inputs=[x, N], device=device)
assert_np_equal(x.numpy(), y)
assert reverse.__doc__ == "Reverse the array d in place using shared memory."
def test_cpu_snippet(test, device):
snippet = """
int inc = 1;
out[tid] = x[tid] + inc;
"""
@wp.func_native(snippet)
def increment_snippet(
x: wp.array(dtype=wp.int32),
out: wp.array(dtype=wp.int32),
tid: int,
): # fmt: skip
...
@wp.kernel
def increment(x: wp.array(dtype=wp.int32), out: wp.array(dtype=wp.int32)):
tid = wp.tid()
increment_snippet(x, out, tid)
N = 128
x = wp.array(np.arange(N, dtype=np.int32), dtype=wp.int32, device=device)
out = wp.zeros(N, dtype=wp.int32, device=device)
wp.launch(kernel=increment, dim=N, inputs=[x], outputs=[out], device=device)
assert_np_equal(out.numpy(), np.arange(1, N + 1, 1, dtype=np.int32))
def test_custom_replay_grad(test, device):
num_threads = 16
counter = wp.zeros(1, dtype=wp.int32, device=device)
thread_ids = wp.zeros(num_threads, dtype=wp.int32, device=device)
inputs = wp.array(np.arange(num_threads, dtype=np.float32), device=device, requires_grad=True)
outputs = wp.zeros_like(inputs)
snippet = """
int next_index = atomicAdd(counter, 1);
thread_values[tid] = next_index;
"""
replay_snippet = ""
@wp.func_native(snippet, replay_snippet=replay_snippet)
def reversible_increment(counter: wp.array(dtype=int), thread_values: wp.array(dtype=int), tid: int): # fmt: skip
...
@wp.kernel
def run_atomic_add(
input: wp.array(dtype=float),
counter: wp.array(dtype=int),
thread_values: wp.array(dtype=int),
output: wp.array(dtype=float),
):
tid = wp.tid()
reversible_increment(counter, thread_values, tid)
idx = thread_values[tid]
output[idx] = input[idx] ** 2.0
tape = wp.Tape()
with tape:
wp.launch(
run_atomic_add, dim=num_threads, inputs=[inputs, counter, thread_ids], outputs=[outputs], device=device
)
tape.backward(grads={outputs: wp.array(np.ones(num_threads, dtype=np.float32), device=device)})
assert_np_equal(inputs.grad.numpy(), 2.0 * inputs.numpy(), tol=1e-4)
def test_replay_simplification(test, device):
num_threads = 8
x = wp.array(1.0 + np.arange(num_threads, dtype=np.float32), device=device, requires_grad=True)
y = wp.zeros_like(x)
z = wp.zeros_like(x)
snippet = "y[tid] = powf(x[tid], 2.0);"
replay_snippet = "y[tid] = x[tid];"
adj_snippet = "adj_x[tid] += 2.0 * adj_y[tid];"
@wp.func_native(snippet, adj_snippet=adj_snippet, replay_snippet=replay_snippet)
def square(x: wp.array(dtype=float), y: wp.array(dtype=float), tid: int): # fmt: skip
...
@wp.kernel
def log_square_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float), z: wp.array(dtype=float)):
tid = wp.tid()
square(x, y, tid)
z[tid] = wp.log(y[tid])
tape = wp.Tape()
with tape:
wp.launch(log_square_kernel, dim=num_threads, inputs=[x, y], outputs=[z], device=device)
tape.backward(grads={z: wp.array(np.ones(num_threads, dtype=np.float32), device=device)})
assert_np_equal(x.grad.numpy(), 2.0 / (1.0 + np.arange(num_threads)), tol=1e-6)
def test_recompile_snippet(test, device):
snippet = """
int inc = 1;
out[tid] = x[tid] + inc;
"""
@wp.func_native(snippet)
def increment_snippet(
x: wp.array(dtype=wp.int32),
out: wp.array(dtype=wp.int32),
tid: int,
): # fmt: skip
...
@wp.kernel
def increment(x: wp.array(dtype=wp.int32), out: wp.array(dtype=wp.int32)):
tid = wp.tid()
increment_snippet(x, out, tid)
N = 128
x = wp.array(np.arange(N, dtype=np.int32), dtype=wp.int32, device=device)
out = wp.zeros(N, dtype=wp.int32, device=device)
wp.launch(kernel=increment, dim=N, inputs=[x], outputs=[out], device=device)
assert_np_equal(out.numpy(), np.arange(1, N + 1, 1, dtype=np.int32))
snippet = """
int inc = 2;
out[tid] = x[tid] + inc;
"""
@wp.func_native(snippet)
def increment_snippet(
x: wp.array(dtype=wp.int32),
out: wp.array(dtype=wp.int32),
tid: int,
): # fmt: skip
...
wp.launch(kernel=increment, dim=N, inputs=[x], outputs=[out], device=device)
assert_np_equal(out.numpy(), 1 + np.arange(1, N + 1, 1, dtype=np.int32))
def test_return_type(test, device):
snippet = """
float sq = x * x;
return sq;
"""
adj_snippet = """
adj_x += 2 * x * adj_ret;
"""
# check python built-in return type compilation
@wp.func_native(snippet, adj_snippet)
def square(x: float) -> float: ...
# check warp built-in return type compilation
@wp.func_native(snippet, adj_snippet)
def square(x: wp.float32) -> wp.float32: ...
@wp.kernel
def square_kernel(i: wp.array(dtype=float), o: wp.array(dtype=float)):
tid = wp.tid()
x = i[tid]
o[tid] = square(x)
N = 5
x = wp.array(np.arange(N, dtype=float), dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
tape = wp.Tape()
with tape:
wp.launch(kernel=square_kernel, dim=N, inputs=[x, y], device=device)
y.grad = wp.ones(N, dtype=float, device=device)
tape.backward()
assert_np_equal(y.numpy(), np.array([0.0, 1.0, 4.0, 9.0, 16.0]))
assert_np_equal(x.grad.numpy(), np.array([0.0, 2.0, 4.0, 6.0, 8.0]))
class TestSnippets(unittest.TestCase):
pass
add_function_test(TestSnippets, "test_basic", test_basic, devices=get_selected_cuda_test_devices())
add_function_test(TestSnippets, "test_shared_memory", test_shared_memory, devices=get_selected_cuda_test_devices())
add_function_test(TestSnippets, "test_cpu_snippet", test_cpu_snippet, devices=["cpu"])
add_function_test(
TestSnippets, "test_custom_replay_grad", test_custom_replay_grad, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestSnippets, "test_replay_simplification", test_replay_simplification, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestSnippets, "test_recompile_snippet", test_recompile_snippet, devices=get_selected_cuda_test_devices()
)
add_function_test(TestSnippets, "test_return_type", test_return_type, devices=get_selected_cuda_test_devices())
if __name__ == "__main__":
unittest.main(verbosity=2)
| 9,443 | Python | 30.065789 | 118 | 0.607328 |
NVIDIA/warp/warp/tests/test_vec.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_signed_int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.byte,
]
np_unsigned_int_types = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.ubyte,
]
np_float_types = [np.float16, np.float32, np.float64]
def randvals(rng, shape, dtype):
if dtype in np_float_types:
return rng.standard_normal(size=shape).astype(dtype)
elif dtype in [np.int8, np.uint8, np.byte, np.ubyte]:
return rng.integers(1, high=3, size=shape, dtype=dtype)
return rng.integers(1, high=5, size=shape, dtype=dtype)
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def test_anon_constructor_error_dtype_keyword_missing(test, device):
@wp.kernel
def kernel():
wp.vector(length=123)
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) must have dtype as a keyword argument if it has no positional arguments, e.g.: wp.vector\(length=5, dtype=wp.float32\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_length_mismatch(test, device):
@wp.kernel
def kernel():
wp.vector(
wp.vector(length=2, dtype=float),
length=3,
dtype=float,
)
with test.assertRaisesRegex(
RuntimeError,
r"Incompatible vector lengths for casting copy constructor, 3 vs 2$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_numeric_arg_missing_1(test, device):
@wp.kernel
def kernel():
wp.vector(1.0, 2.0, length=12345)
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) must have one scalar argument or the dtype keyword argument if the length keyword argument is specified, e.g.: wp.vec\(1.0, length=5\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_numeric_arg_missing_2(test, device):
@wp.kernel
def kernel():
wp.vector()
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) must have at least one numeric argument, if it's length, dtype is not specified$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_dtype_keyword_extraneous(test, device):
@wp.kernel
def kernel():
wp.vector(1.0, 2.0, 3.0, dtype=float)
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) should not have dtype specified if numeric arguments are given, the dtype will be inferred from the argument types$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_numeric_args_mismatch(test, device):
@wp.kernel
def kernel():
wp.vector(1.0, 2)
with test.assertRaisesRegex(
RuntimeError,
r"All numeric arguments to vec\(\) constructor should have the same "
r"type, expected 2 arg_types of type <class 'warp.types.float32'>, "
r"received <class 'warp.types.float32'>,<class 'warp.types.int32'>$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_incompatible_sizes(test, device):
@wp.kernel
def kernel():
wp.vec3(wp.vec2(1.0, 2.0))
with test.assertRaisesRegex(RuntimeError, r"Incompatible matrix sizes for casting copy constructor, 3 vs 2"):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_numeric_args_mismatch(test, device):
@wp.kernel
def kernel():
wp.vec2(1.0, 2)
with test.assertRaisesRegex(
RuntimeError,
r"All numeric arguments to vec\(\) constructor should have the same "
r"type, expected 2 arg_types of type <class 'warp.types.float32'>, "
r"received <class 'warp.types.float32'>,<class 'warp.types.int32'>$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_negation(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_negation(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v2out: wp.array(dtype=vec2),
v3out: wp.array(dtype=vec3),
v4out: wp.array(dtype=vec4),
v5out: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = -v2[0]
v3result = -v3[0]
v4result = -v4[0]
v5result = -v5[0]
v2out[0] = v2result
v3out[0] = v3result
v4out[0] = v4result
v5out[0] = v5result
# multiply these outputs by 2 so we've got something to backpropagate:
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_negation, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5_np = randvals(rng, (1, 5), dtype)
v5 = wp.array(v5_np, dtype=vec5, requires_grad=True, device=device)
v2out = wp.zeros(1, dtype=vec2, device=device)
v3out = wp.zeros(1, dtype=vec3, device=device)
v4out = wp.zeros(1, dtype=vec4, device=device)
v5out = wp.zeros(1, dtype=vec5, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[v2, v3, v4, v5],
outputs=[v2out, v3out, v4out, v5out, v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = -2
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
assert_np_equal(v2out.numpy()[0], -v2.numpy()[0], tol=tol)
assert_np_equal(v3out.numpy()[0], -v3.numpy()[0], tol=tol)
assert_np_equal(v4out.numpy()[0], -v4.numpy()[0], tol=tol)
assert_np_equal(v5out.numpy()[0], -v5.numpy()[0], tol=tol)
def test_subtraction_unsigned(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_subtraction_unsigned():
wp.expect_eq(vec2(wptype(3), wptype(4)) - vec2(wptype(1), wptype(2)), vec2(wptype(2), wptype(2)))
wp.expect_eq(
vec3(
wptype(3),
wptype(4),
wptype(4),
)
- vec3(wptype(1), wptype(2), wptype(3)),
vec3(wptype(2), wptype(2), wptype(1)),
)
wp.expect_eq(
vec4(
wptype(3),
wptype(4),
wptype(4),
wptype(5),
)
- vec4(wptype(1), wptype(2), wptype(3), wptype(4)),
vec4(wptype(2), wptype(2), wptype(1), wptype(1)),
)
wp.expect_eq(
vec5(
wptype(3),
wptype(4),
wptype(4),
wptype(5),
wptype(4),
)
- vec5(wptype(1), wptype(2), wptype(3), wptype(4), wptype(4)),
vec5(wptype(2), wptype(2), wptype(1), wptype(1), wptype(0)),
)
kernel = getkernel(check_subtraction_unsigned, suffix=dtype.__name__)
if register_kernels:
return
wp.launch(kernel, dim=1, inputs=[], outputs=[], device=device)
def test_subtraction(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_subtraction(
s2: wp.array(dtype=vec2),
s3: wp.array(dtype=vec3),
s4: wp.array(dtype=vec4),
s5: wp.array(dtype=vec5),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = v2[0] - s2[0]
v3result = v3[0] - s3[0]
v4result = v4[0] - s4[0]
v5result = v5[0] - s5[0]
# multiply outputs by 2 so there's something to backpropagate:
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_subtraction, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
assert_np_equal(v20.numpy()[0], 2 * (v2.numpy()[0, 0] - s2.numpy()[0, 0]), tol=tol)
assert_np_equal(v21.numpy()[0], 2 * (v2.numpy()[0, 1] - s2.numpy()[0, 1]), tol=tol)
assert_np_equal(v30.numpy()[0], 2 * (v3.numpy()[0, 0] - s3.numpy()[0, 0]), tol=tol)
assert_np_equal(v31.numpy()[0], 2 * (v3.numpy()[0, 1] - s3.numpy()[0, 1]), tol=tol)
assert_np_equal(v32.numpy()[0], 2 * (v3.numpy()[0, 2] - s3.numpy()[0, 2]), tol=tol)
assert_np_equal(v40.numpy()[0], 2 * (v4.numpy()[0, 0] - s4.numpy()[0, 0]), tol=2 * tol)
assert_np_equal(v41.numpy()[0], 2 * (v4.numpy()[0, 1] - s4.numpy()[0, 1]), tol=2 * tol)
assert_np_equal(v42.numpy()[0], 2 * (v4.numpy()[0, 2] - s4.numpy()[0, 2]), tol=2 * tol)
assert_np_equal(v43.numpy()[0], 2 * (v4.numpy()[0, 3] - s4.numpy()[0, 3]), tol=2 * tol)
assert_np_equal(v50.numpy()[0], 2 * (v5.numpy()[0, 0] - s5.numpy()[0, 0]), tol=tol)
assert_np_equal(v51.numpy()[0], 2 * (v5.numpy()[0, 1] - s5.numpy()[0, 1]), tol=tol)
assert_np_equal(v52.numpy()[0], 2 * (v5.numpy()[0, 2] - s5.numpy()[0, 2]), tol=tol)
assert_np_equal(v53.numpy()[0], 2 * (v5.numpy()[0, 3] - s5.numpy()[0, 3]), tol=tol)
assert_np_equal(v54.numpy()[0], 2 * (v5.numpy()[0, 4] - s5.numpy()[0, 4]), tol=tol)
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
sgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [s2, s3, s4, s5]])
expected_grads = np.zeros_like(sgrads)
expected_grads[i] = -2
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
# d/dv v/s = 1/s
expected_grads[i] = 2
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
def test_length(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-7,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_length(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
l2: wp.array(dtype=wptype),
l3: wp.array(dtype=wptype),
l4: wp.array(dtype=wptype),
l5: wp.array(dtype=wptype),
l22: wp.array(dtype=wptype),
l23: wp.array(dtype=wptype),
l24: wp.array(dtype=wptype),
l25: wp.array(dtype=wptype),
):
l2[0] = wptype(2) * wp.length(v2[0])
l3[0] = wptype(2) * wp.length(v3[0])
l4[0] = wptype(2) * wp.length(v4[0])
l5[0] = wptype(2) * wp.length(v5[0])
l22[0] = wptype(2) * wp.length_sq(v2[0])
l23[0] = wptype(2) * wp.length_sq(v3[0])
l24[0] = wptype(2) * wp.length_sq(v4[0])
l25[0] = wptype(2) * wp.length_sq(v5[0])
kernel = getkernel(check_length, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
l2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l4 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l5 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l22 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l23 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l24 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l25 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
v4,
v5,
],
outputs=[l2, l3, l4, l5, l22, l23, l24, l25],
device=device,
)
assert_np_equal(l2.numpy()[0], 2 * np.linalg.norm(v2.numpy()), tol=10 * tol)
assert_np_equal(l3.numpy()[0], 2 * np.linalg.norm(v3.numpy()), tol=10 * tol)
assert_np_equal(l4.numpy()[0], 2 * np.linalg.norm(v4.numpy()), tol=10 * tol)
assert_np_equal(l5.numpy()[0], 2 * np.linalg.norm(v5.numpy()), tol=10 * tol)
assert_np_equal(l22.numpy()[0], 2 * np.linalg.norm(v2.numpy()) ** 2, tol=10 * tol)
assert_np_equal(l23.numpy()[0], 2 * np.linalg.norm(v3.numpy()) ** 2, tol=10 * tol)
assert_np_equal(l24.numpy()[0], 2 * np.linalg.norm(v4.numpy()) ** 2, tol=10 * tol)
assert_np_equal(l25.numpy()[0], 2 * np.linalg.norm(v5.numpy()) ** 2, tol=10 * tol)
tape.backward(loss=l2)
grad = tape.gradients[v2].numpy()[0]
expected_grad = 2 * v2.numpy()[0] / np.linalg.norm(v2.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l3)
grad = tape.gradients[v3].numpy()[0]
expected_grad = 2 * v3.numpy()[0] / np.linalg.norm(v3.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l4)
grad = tape.gradients[v4].numpy()[0]
expected_grad = 2 * v4.numpy()[0] / np.linalg.norm(v4.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l5)
grad = tape.gradients[v5].numpy()[0]
expected_grad = 2 * v5.numpy()[0] / np.linalg.norm(v5.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l22)
grad = tape.gradients[v2].numpy()[0]
expected_grad = 4 * v2.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l23)
grad = tape.gradients[v3].numpy()[0]
expected_grad = 4 * v3.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l24)
grad = tape.gradients[v4].numpy()[0]
expected_grad = 4 * v4.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l25)
grad = tape.gradients[v5].numpy()[0]
expected_grad = 4 * v5.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
def test_normalize(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_normalize(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
n20: wp.array(dtype=wptype),
n21: wp.array(dtype=wptype),
n30: wp.array(dtype=wptype),
n31: wp.array(dtype=wptype),
n32: wp.array(dtype=wptype),
n40: wp.array(dtype=wptype),
n41: wp.array(dtype=wptype),
n42: wp.array(dtype=wptype),
n43: wp.array(dtype=wptype),
n50: wp.array(dtype=wptype),
n51: wp.array(dtype=wptype),
n52: wp.array(dtype=wptype),
n53: wp.array(dtype=wptype),
n54: wp.array(dtype=wptype),
):
n2 = wptype(2) * wp.normalize(v2[0])
n3 = wptype(2) * wp.normalize(v3[0])
n4 = wptype(2) * wp.normalize(v4[0])
n5 = wptype(2) * wp.normalize(v5[0])
n20[0] = n2[0]
n21[0] = n2[1]
n30[0] = n3[0]
n31[0] = n3[1]
n32[0] = n3[2]
n40[0] = n4[0]
n41[0] = n4[1]
n42[0] = n4[2]
n43[0] = n4[3]
n50[0] = n5[0]
n51[0] = n5[1]
n52[0] = n5[2]
n53[0] = n5[3]
n54[0] = n5[4]
def check_normalize_alt(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
n20: wp.array(dtype=wptype),
n21: wp.array(dtype=wptype),
n30: wp.array(dtype=wptype),
n31: wp.array(dtype=wptype),
n32: wp.array(dtype=wptype),
n40: wp.array(dtype=wptype),
n41: wp.array(dtype=wptype),
n42: wp.array(dtype=wptype),
n43: wp.array(dtype=wptype),
n50: wp.array(dtype=wptype),
n51: wp.array(dtype=wptype),
n52: wp.array(dtype=wptype),
n53: wp.array(dtype=wptype),
n54: wp.array(dtype=wptype),
):
n2 = wptype(2) * v2[0] / wp.length(v2[0])
n3 = wptype(2) * v3[0] / wp.length(v3[0])
n4 = wptype(2) * v4[0] / wp.length(v4[0])
n5 = wptype(2) * v5[0] / wp.length(v5[0])
n20[0] = n2[0]
n21[0] = n2[1]
n30[0] = n3[0]
n31[0] = n3[1]
n32[0] = n3[2]
n40[0] = n4[0]
n41[0] = n4[1]
n42[0] = n4[2]
n43[0] = n4[3]
n50[0] = n5[0]
n51[0] = n5[1]
n52[0] = n5[2]
n53[0] = n5[3]
n54[0] = n5[4]
normalize_kernel = getkernel(check_normalize, suffix=dtype.__name__)
normalize_alt_kernel = getkernel(check_normalize_alt, suffix=dtype.__name__)
if register_kernels:
return
# I've already tested the things I'm using in check_normalize_alt, so I'll just
# make sure the two are giving the same results/gradients
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
n20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n20_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n21_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n30_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n31_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n32_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n40_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n41_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n42_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n43_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n50_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n51_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n52_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n53_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n54_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
outputs0 = [
n20,
n21,
n30,
n31,
n32,
n40,
n41,
n42,
n43,
n50,
n51,
n52,
n53,
n54,
]
tape0 = wp.Tape()
with tape0:
wp.launch(
normalize_kernel,
dim=1,
inputs=[
v2,
v3,
v4,
v5,
],
outputs=outputs0,
device=device,
)
outputs1 = [
n20_alt,
n21_alt,
n30_alt,
n31_alt,
n32_alt,
n40_alt,
n41_alt,
n42_alt,
n43_alt,
n50_alt,
n51_alt,
n52_alt,
n53_alt,
n54_alt,
]
tape1 = wp.Tape()
with tape1:
wp.launch(
normalize_alt_kernel,
dim=1,
inputs=[
v2,
v3,
v4,
v5,
],
outputs=outputs1,
device=device,
)
for ncmp, ncmpalt in zip(outputs0, outputs1):
assert_np_equal(ncmp.numpy()[0], ncmpalt.numpy()[0], tol=10 * tol)
invecs = [
v2,
v2,
v3,
v3,
v3,
v4,
v4,
v4,
v4,
v5,
v5,
v5,
v5,
v5,
]
for ncmp, ncmpalt, v in zip(outputs0, outputs1, invecs):
tape0.backward(loss=ncmp)
tape1.backward(loss=ncmpalt)
assert_np_equal(tape0.gradients[v].numpy()[0], tape1.gradients[v].numpy()[0], tol=10 * tol)
tape0.zero()
tape1.zero()
def test_crossproduct(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
def check_cross(
s3: wp.array(dtype=vec3),
v3: wp.array(dtype=vec3),
c0: wp.array(dtype=wptype),
c1: wp.array(dtype=wptype),
c2: wp.array(dtype=wptype),
):
c = wp.cross(s3[0], v3[0])
# multiply outputs by 2 so we've got something to backpropagate:
c0[0] = wptype(2) * c[0]
c1[0] = wptype(2) * c[1]
c2[0] = wptype(2) * c[2]
kernel = getkernel(check_cross, suffix=dtype.__name__)
if register_kernels:
return
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
c0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
c1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
c2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s3,
v3,
],
outputs=[c0, c1, c2],
device=device,
)
result = 2 * np.cross(s3.numpy(), v3.numpy())[0]
assert_np_equal(c0.numpy()[0], result[0], tol=10 * tol)
assert_np_equal(c1.numpy()[0], result[1], tol=10 * tol)
assert_np_equal(c2.numpy()[0], result[2], tol=10 * tol)
if dtype in np_float_types:
# c.x = sy vz - sz vy
# c.y = sz vx - sx vz
# c.z = sx vy - sy vx
# ( d/dsx d/dsy d/dsz )c.x = ( 0 vz -vy )
# ( d/dsx d/dsy d/dsz )c.y = ( -vz 0 vx )
# ( d/dsx d/dsy d/dsz )c.z = ( vy -vx 0 )
# ( d/dvx d/dvy d/dvz )c.x = (0 -sz sy)
# ( d/dvx d/dvy d/dvz )c.y = (sz 0 -sx)
# ( d/dvx d/dvy d/dvz )c.z = (-sy sx 0)
tape.backward(loss=c0)
assert_np_equal(
tape.gradients[s3].numpy(), 2.0 * np.array([0, v3.numpy()[0, 2], -v3.numpy()[0, 1]]), tol=10 * tol
)
assert_np_equal(
tape.gradients[v3].numpy(), 2.0 * np.array([0, -s3.numpy()[0, 2], s3.numpy()[0, 1]]), tol=10 * tol
)
tape.zero()
tape.backward(loss=c1)
assert_np_equal(
tape.gradients[s3].numpy(), 2.0 * np.array([-v3.numpy()[0, 2], 0, v3.numpy()[0, 0]]), tol=10 * tol
)
assert_np_equal(
tape.gradients[v3].numpy(), 2.0 * np.array([s3.numpy()[0, 2], 0, -s3.numpy()[0, 0]]), tol=10 * tol
)
tape.zero()
tape.backward(loss=c2)
assert_np_equal(
tape.gradients[s3].numpy(), 2.0 * np.array([v3.numpy()[0, 1], -v3.numpy()[0, 0], 0]), tol=10 * tol
)
assert_np_equal(
tape.gradients[v3].numpy(), 2.0 * np.array([-s3.numpy()[0, 1], s3.numpy()[0, 0], 0]), tol=10 * tol
)
tape.zero()
def test_casting_constructors(test, device, dtype, register_kernels=False):
np_type = np.dtype(dtype)
wp_type = wp.types.np_dtype_to_warp_type[np_type]
vec3 = wp.types.vector(length=3, dtype=wp_type)
np16 = np.dtype(np.float16)
wp16 = wp.types.np_dtype_to_warp_type[np16]
np32 = np.dtype(np.float32)
wp32 = wp.types.np_dtype_to_warp_type[np32]
np64 = np.dtype(np.float64)
wp64 = wp.types.np_dtype_to_warp_type[np64]
def cast_float16(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp16, ndim=2)):
tid = wp.tid()
v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
v2 = wp.vector(v1, dtype=wp16)
b[tid, 0] = v2[0]
b[tid, 1] = v2[1]
b[tid, 2] = v2[2]
def cast_float32(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp32, ndim=2)):
tid = wp.tid()
v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
v2 = wp.vector(v1, dtype=wp32)
b[tid, 0] = v2[0]
b[tid, 1] = v2[1]
b[tid, 2] = v2[2]
def cast_float64(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp64, ndim=2)):
tid = wp.tid()
v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
v2 = wp.vector(v1, dtype=wp64)
b[tid, 0] = v2[0]
b[tid, 1] = v2[1]
b[tid, 2] = v2[2]
kernel_16 = getkernel(cast_float16, suffix=dtype.__name__)
kernel_32 = getkernel(cast_float32, suffix=dtype.__name__)
kernel_64 = getkernel(cast_float64, suffix=dtype.__name__)
if register_kernels:
return
# check casting to float 16
a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 3), dtype=np16), dtype=wp16, requires_grad=True, device=device)
b_result = np.ones((1, 3), dtype=np16)
b_grad = wp.array(np.ones((1, 3), dtype=np16), dtype=wp16, device=device)
a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_16, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 32
a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 3), dtype=np32), dtype=wp32, requires_grad=True, device=device)
b_result = np.ones((1, 3), dtype=np32)
b_grad = wp.array(np.ones((1, 3), dtype=np32), dtype=wp32, device=device)
a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_32, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 64
a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 3), dtype=np64), dtype=wp64, requires_grad=True, device=device)
b_result = np.ones((1, 3), dtype=np64)
b_grad = wp.array(np.ones((1, 3), dtype=np64), dtype=wp64, device=device)
a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_64, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
@wp.kernel
def test_vector_constructor_value_func():
a = wp.vec2()
b = wp.vector(a, dtype=wp.float16)
c = wp.vector(a)
d = wp.vector(a, length=2)
# Test matrix constructors using explicit type (float16)
# note that these tests are specifically not using generics / closure
# args to create kernels dynamically (like the rest of this file)
# as those use different code paths to resolve arg types which
# has lead to regressions.
@wp.kernel
def test_constructors_explicit_precision():
# construction for custom matrix types
ones = wp.vector(wp.float16(1.0), length=2)
zeros = wp.vector(length=2, dtype=wp.float16)
custom = wp.vector(wp.float16(0.0), wp.float16(1.0))
for i in range(2):
wp.expect_eq(ones[i], wp.float16(1.0))
wp.expect_eq(zeros[i], wp.float16(0.0))
wp.expect_eq(custom[i], wp.float16(i))
# Same as above but with a default (float/int) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructors_default_precision():
# construction for custom matrix types
ones = wp.vector(1.0, length=2)
zeros = wp.vector(length=2, dtype=float)
custom = wp.vector(0.0, 1.0)
for i in range(2):
wp.expect_eq(ones[i], 1.0)
wp.expect_eq(zeros[i], 0.0)
wp.expect_eq(custom[i], float(i))
@wp.kernel
def test_vector_mutation(expected: wp.types.vector(length=10, dtype=float)):
v = wp.vector(length=10, dtype=float)
# test element indexing
v[0] = 1.0
for i in range(1, 10):
v[i] = float(i) + 1.0
wp.expect_eq(v, expected)
CONSTANT_LENGTH = wp.constant(10)
# tests that we can use global constants in length keyword argument
# for vector constructor
@wp.kernel
def test_constructors_constant_length():
v = wp.vector(length=(CONSTANT_LENGTH), dtype=float)
for i in range(CONSTANT_LENGTH):
v[i] = float(i)
devices = get_test_devices()
class TestVec(unittest.TestCase):
def test_tpl_ops_with_anon(self):
vec3i = wp.vec(3, dtype=int)
v = wp.vec3i(1, 2, 3)
v += vec3i(2, 3, 4)
v -= vec3i(3, 4, 5)
self.assertSequenceEqual(v, (0, 1, 2))
v = vec3i(1, 2, 3)
v += wp.vec3i(2, 3, 4)
v -= wp.vec3i(3, 4, 5)
self.assertSequenceEqual(v, (0, 1, 2))
add_kernel_test(TestVec, test_vector_constructor_value_func, dim=1, devices=devices)
add_kernel_test(TestVec, test_constructors_explicit_precision, dim=1, devices=devices)
add_kernel_test(TestVec, test_constructors_default_precision, dim=1, devices=devices)
add_kernel_test(TestVec, test_constructors_constant_length, dim=1, devices=devices)
vec10 = wp.types.vector(length=10, dtype=float)
add_kernel_test(
TestVec,
test_vector_mutation,
dim=1,
inputs=[vec10(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)],
devices=devices,
)
for dtype in np_unsigned_int_types:
add_function_test_register_kernel(
TestVec,
f"test_subtraction_unsigned_{dtype.__name__}",
test_subtraction_unsigned,
devices=devices,
dtype=dtype,
)
for dtype in np_signed_int_types + np_float_types:
add_function_test_register_kernel(
TestVec, f"test_negation_{dtype.__name__}", test_negation, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec, f"test_subtraction_{dtype.__name__}", test_subtraction, devices=devices, dtype=dtype
)
for dtype in np_float_types:
add_function_test_register_kernel(
TestVec, f"test_crossproduct_{dtype.__name__}", test_crossproduct, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec, f"test_length_{dtype.__name__}", test_length, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec, f"test_normalize_{dtype.__name__}", test_normalize, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec,
f"test_casting_constructors_{dtype.__name__}",
test_casting_constructors,
devices=devices,
dtype=dtype,
)
add_function_test(
TestVec,
"test_anon_constructor_error_dtype_keyword_missing",
test_anon_constructor_error_dtype_keyword_missing,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_length_mismatch",
test_anon_constructor_error_length_mismatch,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_numeric_arg_missing_1",
test_anon_constructor_error_numeric_arg_missing_1,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_numeric_arg_missing_2",
test_anon_constructor_error_numeric_arg_missing_2,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_dtype_keyword_extraneous",
test_anon_constructor_error_dtype_keyword_extraneous,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_numeric_args_mismatch",
test_anon_constructor_error_numeric_args_mismatch,
devices=devices,
)
add_function_test(
TestVec,
"test_tpl_constructor_error_incompatible_sizes",
test_tpl_constructor_error_incompatible_sizes,
devices=devices,
)
add_function_test(
TestVec,
"test_tpl_constructor_error_numeric_args_mismatch",
test_tpl_constructor_error_numeric_args_mismatch,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 42,423 | Python | 32.589865 | 155 | 0.582443 |
NVIDIA/warp/warp/tests/test_fast_math.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_pow(e: float, result: float):
tid = wp.tid()
y = wp.pow(-2.0, e)
wp.expect_eq(y, result)
def test_fast_math_disabled(test, device):
# on all systems pow() should handle negative base correctly with fast math off
wp.set_module_options({"fast_math": False})
wp.launch(test_pow, dim=1, inputs=[2.0, 4.0], device=device)
@unittest.expectedFailure
def test_fast_math_cuda(test, device):
# on CUDA with --fast-math enabled taking the pow()
# of a negative number will result in a NaN
wp.set_module_options({"fast_math": True})
try:
wp.launch(test_pow, dim=1, inputs=[2.0, 4.0], device=device)
finally:
# Turn fast math back off
wp.set_module_options({"fast_math": False})
class TestFastMath(unittest.TestCase):
def test_fast_math_cpu(self):
# on all systems pow() should handle negative base correctly
wp.set_module_options({"fast_math": True})
try:
wp.launch(test_pow, dim=1, inputs=[2.0, 4.0], device="cpu")
finally:
wp.set_module_options({"fast_math": False})
devices = get_test_devices()
add_function_test(TestFastMath, "test_fast_math_cuda", test_fast_math_cuda, devices=get_cuda_test_devices())
add_function_test(TestFastMath, "test_fast_math_disabled", test_fast_math_disabled, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,951 | Python | 30.999999 | 108 | 0.69144 |
NVIDIA/warp/warp/tests/aux_test_class_kernel.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Dummy class used in test_reload.py"""
import warp as wp
class ClassKernelTest:
def __init__(self, device):
# 3x3 frames in the rest pose:
self.identities = wp.zeros(shape=10, dtype=wp.mat33, device=device)
wp.launch(kernel=self.gen_identities_kernel, dim=10, inputs=[self.identities], device=device)
@wp.func
def return_identity(e: int):
return wp.mat33(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
@wp.kernel
def gen_identities_kernel(s: wp.array(dtype=wp.mat33)):
tid = wp.tid()
s[tid] = ClassKernelTest.return_identity(tid)
| 1,027 | Python | 37.074073 | 101 | 0.70594 |
NVIDIA/warp/warp/tests/unittest_suites.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Warp Test Suites
This file is intended to define functions that return TestSuite objects, which
can be used in parallel or serial unit tests (with optional code coverage)
"""
import os
import sys
import unittest
START_DIRECTORY = os.path.realpath(os.path.dirname(__file__))
TOP_LEVEL_DIRECTORY = os.path.realpath(os.path.join(START_DIRECTORY, "..", ".."))
def _create_suite_from_test_classes(test_loader, test_classes):
suite = unittest.TestSuite()
for test in test_classes:
sub_suite = unittest.TestSuite()
# Note that the test_loader might have testNamePatterns set
sub_suite.addTest(test_loader.loadTestsFromTestCase(test))
suite.addTest(sub_suite)
return suite
def auto_discover_suite(loader=unittest.defaultTestLoader, pattern="test*.py"):
"""Uses unittest auto-discovery to build a test suite (test_*.py pattern)"""
return loader.discover(start_dir=START_DIRECTORY, pattern=pattern, top_level_dir=TOP_LEVEL_DIRECTORY)
def _iter_class_suites(test_suite):
"""Iterate class-level test suites - test suites that contains test cases
From unittest_parallel.py
"""
has_cases = any(isinstance(suite, unittest.TestCase) for suite in test_suite)
if has_cases:
yield test_suite
else:
for suite in test_suite:
yield from _iter_class_suites(suite)
def compare_unittest_suites(
test_loader: unittest.TestLoader, test_suite_name: str, reference_suite: unittest.TestSuite
) -> None:
"""Prints the tests in `test_suite` that are not in `reference_suite`."""
test_suite_fn = getattr(sys.modules[__name__], test_suite_name + "_suite")
test_suite = test_suite_fn(test_loader)
test_suite_classes_str = {
type(test_suite._tests[0]).__name__
for test_suite in list(_iter_class_suites(test_suite))
if test_suite.countTestCases() > 0
}
reference_suite_classes_str = {
type(test_suite._tests[0]).__name__
for test_suite in list(_iter_class_suites(reference_suite))
if test_suite.countTestCases() > 0
}
set_difference = reference_suite_classes_str - test_suite_classes_str
print(f"Selected test suite '{test_suite_name}'")
if len(set_difference) > 0:
print(f"Test suite '{test_suite_name}' omits the following test classes:")
for test_entry in set_difference:
print(f" {test_entry}")
return test_suite
def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader):
"""Example of a manually constructed test suite.
Intended to be modified to create additional test suites
"""
from warp.tests.test_adam import TestAdam
from warp.tests.test_arithmetic import TestArithmetic
from warp.tests.test_array import TestArray
from warp.tests.test_array_reduce import TestArrayReduce
from warp.tests.test_async import TestAsync
from warp.tests.test_atomic import TestAtomic
from warp.tests.test_bool import TestBool
from warp.tests.test_builtins_resolution import TestBuiltinsResolution
from warp.tests.test_bvh import TestBvh
from warp.tests.test_closest_point_edge_edge import TestClosestPointEdgeEdgeMethods
from warp.tests.test_codegen import TestCodeGen
from warp.tests.test_compile_consts import TestConstants
from warp.tests.test_conditional import TestConditional
from warp.tests.test_copy import TestCopy
from warp.tests.test_ctypes import TestCTypes
from warp.tests.test_dense import TestDense
from warp.tests.test_devices import TestDevices
from warp.tests.test_dlpack import TestDLPack
from warp.tests.test_examples import (
TestCoreExamples,
TestFemDiffusionExamples,
TestFemExamples,
TestOptimExamples,
TestSimExamples,
)
from warp.tests.test_fabricarray import TestFabricArray
from warp.tests.test_fast_math import TestFastMath
from warp.tests.test_fem import TestFem, TestFemShapeFunctions
from warp.tests.test_fp16 import TestFp16
from warp.tests.test_func import TestFunc
from warp.tests.test_generics import TestGenerics
from warp.tests.test_grad import TestGrad
from warp.tests.test_grad_customs import TestGradCustoms
from warp.tests.test_hash_grid import TestHashGrid
from warp.tests.test_import import TestImport
from warp.tests.test_indexedarray import TestIndexedArray
from warp.tests.test_intersect import TestIntersect
from warp.tests.test_jax import TestJax
from warp.tests.test_large import TestLarge
from warp.tests.test_launch import TestLaunch
from warp.tests.test_lerp import TestLerp
from warp.tests.test_linear_solvers import TestLinearSolvers
from warp.tests.test_lvalue import TestLValue
from warp.tests.test_marching_cubes import TestMarchingCubes
from warp.tests.test_mat import TestMat
from warp.tests.test_mat_lite import TestMatLite
from warp.tests.test_mat_scalar_ops import TestMatScalarOps
from warp.tests.test_math import TestMath
from warp.tests.test_matmul import TestMatmul
from warp.tests.test_matmul_lite import TestMatmulLite
from warp.tests.test_mempool import TestMempool
from warp.tests.test_mesh import TestMesh
from warp.tests.test_mesh_query_aabb import TestMeshQueryAABBMethods
from warp.tests.test_mesh_query_point import TestMeshQueryPoint
from warp.tests.test_mesh_query_ray import TestMeshQueryRay
from warp.tests.test_mlp import TestMLP
from warp.tests.test_model import TestModel
from warp.tests.test_modules_lite import TestModuleLite
from warp.tests.test_multigpu import TestMultiGPU
from warp.tests.test_noise import TestNoise
from warp.tests.test_operators import TestOperators
from warp.tests.test_options import TestOptions
from warp.tests.test_peer import TestPeer
from warp.tests.test_pinned import TestPinned
from warp.tests.test_print import TestPrint
from warp.tests.test_quat import TestQuat
from warp.tests.test_rand import TestRand
from warp.tests.test_reload import TestReload
from warp.tests.test_rounding import TestRounding
from warp.tests.test_runlength_encode import TestRunlengthEncode
from warp.tests.test_sim_grad import TestSimGradients
from warp.tests.test_sim_kinematics import TestSimKinematics
from warp.tests.test_smoothstep import TestSmoothstep
from warp.tests.test_snippet import TestSnippets
from warp.tests.test_sparse import TestSparse
from warp.tests.test_spatial import TestSpatial
from warp.tests.test_streams import TestStreams
from warp.tests.test_struct import TestStruct
from warp.tests.test_tape import TestTape
from warp.tests.test_torch import TestTorch
from warp.tests.test_transient_module import TestTransientModule
from warp.tests.test_types import TestTypes
from warp.tests.test_utils import TestUtils
from warp.tests.test_vec import TestVec
from warp.tests.test_vec_lite import TestVecLite
from warp.tests.test_vec_scalar_ops import TestVecScalarOps
from warp.tests.test_verify_fp import TestVerifyFP
from warp.tests.test_volume import TestVolume
from warp.tests.test_volume_write import TestVolumeWrite
test_classes = [
TestAdam,
TestArithmetic,
TestArray,
TestArrayReduce,
TestAsync,
TestAtomic,
TestBool,
TestBuiltinsResolution,
TestBvh,
TestClosestPointEdgeEdgeMethods,
TestCodeGen,
TestConstants,
TestConditional,
TestCopy,
TestCTypes,
TestDense,
TestDevices,
TestDLPack,
TestCoreExamples,
TestFemDiffusionExamples,
TestFemExamples,
TestOptimExamples,
TestSimExamples,
TestFabricArray,
TestFastMath,
TestFem,
TestFemShapeFunctions,
TestFp16,
TestFunc,
TestGenerics,
TestGrad,
TestGradCustoms,
TestHashGrid,
TestImport,
TestIndexedArray,
TestIntersect,
TestJax,
TestLarge,
TestLaunch,
TestLerp,
TestLinearSolvers,
TestLValue,
TestMarchingCubes,
TestMat,
TestMatLite,
TestMatScalarOps,
TestMath,
TestMatmul,
TestMatmulLite,
TestMempool,
TestMesh,
TestMeshQueryAABBMethods,
TestMeshQueryPoint,
TestMeshQueryRay,
TestMLP,
TestModel,
TestModuleLite,
TestMultiGPU,
TestNoise,
TestOperators,
TestOptions,
TestPeer,
TestPinned,
TestPrint,
TestQuat,
TestRand,
TestReload,
TestRounding,
TestRunlengthEncode,
TestSimGradients,
TestSimKinematics,
TestSmoothstep,
TestSparse,
TestSnippets,
TestSpatial,
TestStreams,
TestStruct,
TestTape,
TestTorch,
TestTransientModule,
TestTypes,
TestUtils,
TestVec,
TestVecLite,
TestVecScalarOps,
TestVerifyFP,
TestVolume,
TestVolumeWrite,
]
return _create_suite_from_test_classes(test_loader, test_classes)
def kit_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader):
"""Tries to mimic the test suite used for testing omni.warp.core in Kit
Requires manual updates with test_ext.py for now.
"""
from warp.tests.test_array import TestArray
from warp.tests.test_array_reduce import TestArrayReduce
from warp.tests.test_bvh import TestBvh
from warp.tests.test_codegen import TestCodeGen
from warp.tests.test_compile_consts import TestConstants
from warp.tests.test_conditional import TestConditional
from warp.tests.test_ctypes import TestCTypes
from warp.tests.test_devices import TestDevices
from warp.tests.test_dlpack import TestDLPack
from warp.tests.test_fabricarray import TestFabricArray
from warp.tests.test_func import TestFunc
from warp.tests.test_generics import TestGenerics
from warp.tests.test_grad_customs import TestGradCustoms
from warp.tests.test_hash_grid import TestHashGrid
from warp.tests.test_indexedarray import TestIndexedArray
from warp.tests.test_launch import TestLaunch
from warp.tests.test_marching_cubes import TestMarchingCubes
from warp.tests.test_mat_lite import TestMatLite
from warp.tests.test_math import TestMath
from warp.tests.test_matmul_lite import TestMatmulLite
from warp.tests.test_mesh import TestMesh
from warp.tests.test_mesh_query_aabb import TestMeshQueryAABBMethods
from warp.tests.test_mesh_query_point import TestMeshQueryPoint
from warp.tests.test_mesh_query_ray import TestMeshQueryRay
from warp.tests.test_modules_lite import TestModuleLite
from warp.tests.test_noise import TestNoise
from warp.tests.test_operators import TestOperators
from warp.tests.test_quat import TestQuat
from warp.tests.test_rand import TestRand
from warp.tests.test_rounding import TestRounding
from warp.tests.test_runlength_encode import TestRunlengthEncode
from warp.tests.test_sparse import TestSparse
from warp.tests.test_streams import TestStreams
from warp.tests.test_tape import TestTape
from warp.tests.test_transient_module import TestTransientModule
from warp.tests.test_types import TestTypes
from warp.tests.test_utils import TestUtils
from warp.tests.test_vec_lite import TestVecLite
from warp.tests.test_volume import TestVolume
from warp.tests.test_volume_write import TestVolumeWrite
test_classes = [
TestArray,
TestArrayReduce,
TestBvh,
TestCodeGen,
TestConstants,
TestConditional,
TestCTypes,
TestDevices,
TestDLPack,
TestFabricArray,
TestFunc,
TestGenerics,
TestGradCustoms,
TestHashGrid,
TestIndexedArray,
TestLaunch,
TestMarchingCubes,
TestMatLite,
TestMath,
TestMatmulLite,
TestMesh,
TestMeshQueryAABBMethods,
TestMeshQueryPoint,
TestMeshQueryRay,
TestModuleLite,
TestNoise,
TestOperators,
TestQuat,
TestRand,
TestRounding,
TestRunlengthEncode,
TestSparse,
TestStreams,
TestTape,
TestTransientModule,
TestTypes,
TestUtils,
TestVecLite,
TestVolume,
TestVolumeWrite,
]
return _create_suite_from_test_classes(test_loader, test_classes)
| 13,130 | Python | 35.074176 | 105 | 0.711653 |
NVIDIA/warp/warp/tests/test_mat.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_signed_int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.byte,
]
np_float_types = [np.float16, np.float32, np.float64]
def randvals(rng, shape, dtype):
if dtype in np_float_types:
return rng.standard_normal(size=shape).astype(dtype)
elif dtype in [np.int8, np.uint8, np.byte, np.ubyte]:
return rng.integers(1, high=3, size=shape, dtype=dtype)
return rng.integers(1, high=5, size=shape, dtype=dtype)
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
def test_anon_constructor_error_shape_keyword_missing(test, device):
@wp.kernel
def kernel():
wp.matrix(1.0, 2.0, 3.0)
with test.assertRaisesRegex(
RuntimeError,
r"shape keyword must be specified when calling matrix\(\) function$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_dtype_keyword_missing(test, device):
@wp.kernel
def kernel():
wp.matrix(shape=(3, 3))
with test.assertRaisesRegex(
RuntimeError,
r"matrix\(\) must have dtype as a keyword argument if it has no " r"positional arguments$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_shape_mismatch(test, device):
@wp.kernel
def kernel():
wp.matrix(
wp.matrix(shape=(1, 2), dtype=float),
shape=(3, 4),
dtype=float,
)
with test.assertRaisesRegex(
RuntimeError,
r"Incompatible matrix sizes for casting copy constructor, " r"\(3, 4\) vs \(1, 2\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_invalid_arg_count(test, device):
@wp.kernel
def kernel():
wp.matrix(1.0, 2.0, 3.0, shape=(2, 2), dtype=float)
with test.assertRaisesRegex(
RuntimeError,
r"Wrong number of arguments for matrix\(\) function, must initialize "
r"with either a scalar value, or m\*n values$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_incompatible_sizes(test, device):
@wp.kernel
def kernel():
wp.mat33(wp.mat22(1.0, 2.0, 3.0, 4.0))
with test.assertRaisesRegex(
RuntimeError,
r"Incompatible matrix sizes for casting copy constructor, " r"\(3, 3\) vs \(2, 2\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_scalar_type(test, device):
@wp.kernel
def kernel():
wp.mat22(1, 2, 3, 4)
with test.assertRaisesRegex(
RuntimeError,
r"Wrong scalar type for mat 2,2,<class 'warp.types.float32'> constructor$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_vector_count(test, device):
@wp.kernel
def kernel():
wp.mat22(wp.vec3(1.0, 2.0, 3.0))
with test.assertRaisesRegex(
RuntimeError,
r"Wrong number of vectors when attempting to construct a matrix " r"with column vectors$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_vector_shape(test, device):
@wp.kernel
def kernel():
wp.mat22(wp.vec3(1.0, 2.0, 3.0), wp.vec3(4.0, 5.0, 6.0))
with test.assertRaisesRegex(
RuntimeError,
r"Wrong vector row count when attempting to construct a matrix " r"with column vectors$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_arg_count(test, device):
@wp.kernel
def kernel():
wp.mat22(1.0, 2.0, 3.0)
with test.assertRaisesRegex(
RuntimeError,
r"Wrong number of scalars when attempting to construct a matrix " r"from a list of components$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_py_arithmetic_ops(test, device, dtype):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def make_mat(*args):
if wptype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(tuple(wptype._type_(x).value for x in row) for row in args)
return args
def make_vec(*args):
if wptype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(wptype._type_(x).value for x in args)
return args
mat_cls = wp.mat((3, 3), wptype)
vec_cls = wp.vec(3, wptype)
m = mat_cls(((-1, 2, 3), (4, -5, 6), (7, 8, -9)))
test.assertSequenceEqual(+m, make_mat((-1, 2, 3), (4, -5, 6), (7, 8, -9)))
test.assertSequenceEqual(-m, make_mat((1, -2, -3), (-4, 5, -6), (-7, -8, 9)))
test.assertSequenceEqual(m + mat_cls((5, 5, 5) * 3), make_mat((4, 7, 8), (9, 0, 11), (12, 13, -4)))
test.assertSequenceEqual(m - mat_cls((5, 5, 5) * 3), make_mat((-6, -3, -2), (-1, -10, 1), (2, 3, -14)))
test.assertSequenceEqual(m * vec_cls(5, 5, 5), make_vec(20, 25, 30))
test.assertSequenceEqual(m @ vec_cls(5, 5, 5), make_vec(20, 25, 30))
test.assertSequenceEqual(vec_cls(5, 5, 5) * m, make_vec(50, 25, 0))
test.assertSequenceEqual(vec_cls(5, 5, 5) @ m, make_vec(50, 25, 0))
m = mat_cls(((2, 4, 6), (8, 10, 12), (14, 16, 18)))
test.assertSequenceEqual(m * wptype(2), make_mat((4, 8, 12), (16, 20, 24), (28, 32, 36)))
test.assertSequenceEqual(wptype(2) * m, make_mat((4, 8, 12), (16, 20, 24), (28, 32, 36)))
test.assertSequenceEqual(m / wptype(2), make_mat((1, 2, 3), (4, 5, 6), (7, 8, 9)))
test.assertSequenceEqual(wptype(5040) / m, make_mat((2520, 1260, 840), (630, 504, 420), (360, 315, 280)))
test.assertSequenceEqual(m * vec_cls(5, 5, 5), make_vec(60, 150, 240))
test.assertSequenceEqual(m @ vec_cls(5, 5, 5), make_vec(60, 150, 240))
test.assertSequenceEqual(vec_cls(5, 5, 5) * m, make_vec(120, 150, 180))
test.assertSequenceEqual(vec_cls(5, 5, 5) @ m, make_vec(120, 150, 180))
def test_quat_constructor(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
quat = wp.types.quaternion(dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_quat_constructor(
p: wp.array(dtype=vec3),
r: wp.array(dtype=quat),
s: wp.array(dtype=vec3),
outcomponents: wp.array(dtype=wptype),
outcomponents_alt: wp.array(dtype=wptype),
):
m = mat44(p[0], r[0], s[0])
R = wp.transpose(wp.quat_to_matrix(r[0]))
c0 = s[0][0] * R[0]
c1 = s[0][1] * R[1]
c2 = s[0][2] * R[2]
m_alt = mat44(
vec4(c0[0], c0[1], c0[2], wptype(0.0)),
vec4(c1[0], c1[1], c1[2], wptype(0.0)),
vec4(c2[0], c2[1], c2[2], wptype(0.0)),
vec4(p[0][0], p[0][1], p[0][2], wptype(1.0)),
)
idx = 0
for i in range(4):
for j in range(4):
outcomponents[idx] = m[i, j]
outcomponents_alt[idx] = m_alt[i, j]
idx = idx + 1
kernel = getkernel(check_mat_quat_constructor, suffix=dtype.__name__)
if register_kernels:
return
# translation:
p = wp.array(rng.standard_normal(size=(1, 3)).astype(dtype), dtype=vec3, requires_grad=True, device=device)
# generate a normalized quaternion for the rotation:
r = rng.standard_normal(size=(1, 4))
r /= np.linalg.norm(r)
r = wp.array(r.astype(dtype), dtype=quat, requires_grad=True, device=device)
# scale:
s = wp.array(rng.standard_normal(size=(1, 3)).astype(dtype), dtype=vec3, requires_grad=True, device=device)
# just going to generate the matrix using the constructor, then
# more manually, and make sure the values/gradients are the same:
outcomponents = wp.zeros(4 * 4, dtype=wptype, requires_grad=True, device=device)
outcomponents_alt = wp.zeros(4 * 4, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[p, r, s], outputs=[outcomponents, outcomponents_alt], device=device)
assert_np_equal(outcomponents.numpy(), outcomponents_alt.numpy(), tol=1.0e-6)
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
out_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for _i in range(4):
for _j in range(4):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[p, r, s], outputs=[outcomponents, outcomponents_alt], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents_alt, idx], outputs=[out_alt], device=device
)
tape.backward(loss=out)
p_grad = 1.0 * tape.gradients[p].numpy()[0]
r_grad = 1.0 * tape.gradients[r].numpy()[0]
s_grad = 1.0 * tape.gradients[s].numpy()[0]
tape.zero()
tape.backward(loss=out_alt)
p_grad_alt = 1.0 * tape.gradients[p].numpy()[0]
r_grad_alt = 1.0 * tape.gradients[r].numpy()[0]
s_grad_alt = 1.0 * tape.gradients[s].numpy()[0]
tape.zero()
assert_np_equal(p_grad, p_grad_alt, tol=tol)
assert_np_equal(r_grad, r_grad_alt, tol=tol)
assert_np_equal(s_grad, s_grad_alt, tol=tol)
idx = idx + 1
def test_negation(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_negation(
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
mat2 = -m2[0]
mat3 = -m3[0]
mat4 = -m4[0]
mat5 = -m5[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * mat2[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * mat3[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * mat4[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * mat5[i, j]
idx = idx + 1
kernel = getkernel(check_mat_negation, suffix=dtype.__name__)
if register_kernels:
return
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:4], -2 * m2.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], -2 * m3.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], -2 * m4.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], -2 * m5.numpy().reshape(-1), tol=tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, input in [(2, m2), (3, m3), (4, m4), (5, m5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5], outputs=[outcomponents], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = -2
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult)
tape.zero()
idx = idx + 1
def test_subtraction(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_sub(
s2: wp.array(dtype=mat22),
s3: wp.array(dtype=mat33),
s4: wp.array(dtype=mat44),
s5: wp.array(dtype=mat55),
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
v5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
v2result = v2[0] - s2[0]
v3result = v3[0] - s3[0]
v4result = v4[0] - s4[0]
v5result = v5[0] - s5[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * v2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * v3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * v4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * v5result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_sub, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
assert_np_equal(outcomponents.numpy()[:4], 2 * (v2.numpy() - s2.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * (v3.numpy() - s3.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * (v4.numpy() - s4.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * (v5.numpy() - s5.numpy()).reshape(-1), tol=10 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, in1, in2 in [(2, s2, v2), (3, s3, v3), (4, s4, v4), (5, s5, v5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[in2].numpy()[0], expectedresult, tol=10 * tol)
expectedresult[i, j] = -2
assert_np_equal(tape.gradients[in1].numpy()[0], expectedresult, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_determinant(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
def check_mat_det(
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
det2: wp.array(dtype=wptype),
det3: wp.array(dtype=wptype),
det4: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
det2[0] = wptype(2) * wp.determinant(v2[0])
det3[0] = wptype(2) * wp.determinant(v3[0])
det4[0] = wptype(2) * wp.determinant(v4[0])
kernel = getkernel(check_mat_det, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
det2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
det3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
det4 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
if dtype in np_float_types:
assert_np_equal(det2.numpy()[0], 2 * np.linalg.det(v2.numpy()[0].astype(np.float64)), tol=100 * tol)
assert_np_equal(det3.numpy()[0], 2 * np.linalg.det(v3.numpy()[0].astype(np.float64)), tol=100 * tol)
assert_np_equal(det4.numpy()[0], 2 * np.linalg.det(v4.numpy()[0].astype(np.float64)), tol=420 * tol)
else:
assert_np_equal(det2.numpy()[0], 2 * np.around(np.linalg.det(v2.numpy()[0])).astype(int))
assert_np_equal(det3.numpy()[0], 2 * np.around(np.linalg.det(v3.numpy()[0])).astype(int))
assert_np_equal(det4.numpy()[0], 2 * np.around(np.linalg.det(v4.numpy()[0])).astype(int))
if dtype in np_float_types:
# determinant derivative formula is annoying so finite differences?
tape.backward(loss=det2)
v2grads = 1.0 * tape.gradients[v2].numpy()[0]
tape.zero()
tape.backward(loss=det3)
v3grads = 1.0 * tape.gradients[v3].numpy()[0]
tape.zero()
tape.backward(loss=det4)
v4grads = 1.0 * tape.gradients[v4].numpy()[0]
tape.zero()
# finite differences are also annoying hence the large tolerance...
# absolute nightmare in float16 too innit...
dx = 0.01 if dtype == np.float16 else 0.0001
fdtol = 2.0e-1 if dtype == np.float16 else 2.0e-3
for i in range(2):
for j in range(2):
v2test = v2.numpy()
v2test[0, i, j] += dx
wp.launch(
kernel,
dim=1,
inputs=[
wp.array(v2test, dtype=v2.dtype, requires_grad=True, device=device),
v3,
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dplus = det2.numpy()[0]
v2test[0, i, j] -= 2.0 * dx
wp.launch(
kernel,
dim=1,
inputs=[
wp.array(v2test, dtype=v2.dtype, requires_grad=True, device=device),
v3,
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dminus = det2.numpy()[0]
assert_np_equal((dplus - dminus) / (2.0 * dx * dplus), v2grads[i, j] / dplus, tol=fdtol)
for i in range(3):
for j in range(3):
v3test = v3.numpy()
v3test[0, i, j] += dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
wp.array(v3test, dtype=v3.dtype, requires_grad=True, device=device),
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dplus = det3.numpy()[0]
v3test[0, i, j] -= 2.0 * dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
wp.array(v3test, dtype=v3.dtype, requires_grad=True, device=device),
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dminus = det3.numpy()[0]
assert_np_equal((dplus - dminus) / (2.0 * dx * dplus), v3grads[i, j] / dplus, tol=fdtol)
for i in range(4):
for j in range(4):
v4test = v4.numpy()
v4test[0, i, j] += dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
wp.array(v4test, dtype=v4.dtype, requires_grad=True, device=device),
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dplus = det4.numpy()[0]
v4test[0, i, j] -= 2.0 * dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
wp.array(v4test, dtype=v4.dtype, requires_grad=True, device=device),
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dminus = det4.numpy()[0]
assert_np_equal((dplus - dminus) / (2.0 * dx * dplus), v4grads[i, j] / dplus, tol=fdtol)
# Unused. Why?
# def test_get_diag(test, device, dtype, register_kernels=False):
# tol = {
# np.float16: 1.0e-3,
# np.float32: 1.0e-6,
# np.float64: 1.0e-8,
# }.get(dtype, 0)
#
# wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
# mat55 = wp.types.vector(shape=(5, 5), dtype=wptype)
#
# output_select_kernel = get_select_kernel(wptype)
#
# def check_mat_diag(
# m55: wp.array(dtype=mat55),
# outcomponents: wp.array(dtype=wptype),
# ):
# # multiply outputs by 2 so we've got something to backpropagate:
# vec5result = wptype(2) * wp.get_diag(m55[0])
#
# idx = 0
# for i in range(5):
# outcomponents[idx] = vec5result[i]
# idx = idx + 1
#
# kernel = getkernel(check_mat_diag, suffix=dtype.__name__)
#
# if register_kernels:
# return
#
# m55 = wp.array(randvals((1, 5, 5), dtype), dtype=mat55, requires_grad=True, device=device)
# outcomponents = wp.zeros(5, dtype=wptype, requires_grad=True, device=device)
# out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
#
# wp.launch(kernel, dim=1, inputs=[m55], outputs=[outcomponents], device=device)
#
# assert_np_equal(outcomponents.numpy(), 2 * np.diag(m55.numpy()[0]), tol=tol)
#
# if dtype in np_float_types:
# idx = 0
# for i in range(5):
# tape = wp.Tape()
# with tape:
# wp.launch(kernel, dim=1, inputs=[m55], outputs=[outcomponents], device=device)
# wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
# tape.backward(loss=out)
# expectedresult = np.zeros((5, 5), dtype=dtype)
# expectedresult[i, i] = 2
# assert_np_equal(tape.gradients[m55].numpy()[0], expectedresult, tol=10 * tol)
# tape.zero()
#
# idx = idx + 1
def test_inverse(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_inverse(
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
outcomponents: wp.array(dtype=wptype),
):
m2result = wp.inverse(m2[0])
m3result = wp.inverse(m3[0])
m4result = wp.inverse(m4[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * m4result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_inverse, suffix=dtype.__name__)
if register_kernels:
return
m2 = wp.array(
2 * (randvals(rng, [1, 2, 2], dtype) + 0.2 * np.eye(2)), dtype=mat22, requires_grad=True, device=device
)
m3 = wp.array(
2 * (randvals(rng, [1, 3, 3], dtype) + 0.2 * np.eye(3)), dtype=mat33, requires_grad=True, device=device
)
m4 = wp.array(
2 * (randvals(rng, [1, 4, 4], dtype) + 0.2 * np.eye(4)), dtype=mat44, requires_grad=True, device=device
)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:4], 2 * np.linalg.inv(m2.numpy()[0].astype(np.float64)), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * np.linalg.inv(m3.numpy()[0].astype(np.float64)), tol=5 * tol)
assert_np_equal(outcomponents.numpy()[13:], 2 * np.linalg.inv(m4.numpy()[0].astype(np.float64)), tol=5 * tol)
if dtype in np_float_types:
# check gradients:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, input in [(2, m2), (3, m3), (4, m4)]:
minv = np.linalg.inv(input.numpy()[0].astype(np.float64))
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
d = np.zeros((dim, dim))
d[j, i] = 2
assert_np_equal(
tape.gradients[input].numpy()[0], -np.matmul(minv, np.matmul(d, minv)).T, tol=10 * tol
)
tape.zero()
idx = idx + 1
# let's check 2x2 using different formulae just for (in)sanity's sake:
m = m2.numpy()[0]
det = m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]
expected = 2 * np.array([[m[1, 1], -m[0, 1]], [-m[1, 0], m[0, 0]]], dtype=dtype) / det
assert_np_equal(expected, outcomponents.numpy()[:4], tol=tol)
# 0,0 component is this:
# 2 * m[1,1] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(2 * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[0], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 0], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(-2 * m[1, 1] * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(2 * m[1, 1] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(-2 * m[0, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
assert_np_equal(2 * m[1, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
tape.zero()
# 0,1 component is this:
# -2 * m[0,1] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(-2 * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[1], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 1], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(2 * m[0, 1] * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(-2 * m[0, 1] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
assert_np_equal(-2 * m[1, 1] * m[0, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
tape.zero()
# 1,0 component is this:
# -2 * m[1,0] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(-2 * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[2], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 2], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(2 * m[1, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(-2 * m[0, 0] * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
assert_np_equal(-2 * m[1, 0] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
tape.zero()
# 1,1 component is this:
# 2 * m[0,0] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(2 * m[0, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[3], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 3], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(-2 * m[0, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
assert_np_equal(-2 * m[0, 0] * m[0, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
tape.zero()
def test_svd(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-6,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
def check_mat_svd(
m3: wp.array(dtype=mat33),
Uout: wp.array(dtype=mat33),
sigmaout: wp.array(dtype=vec3),
Vout: wp.array(dtype=mat33),
outcomponents: wp.array(dtype=wptype),
):
U = mat33()
sigma = vec3()
V = mat33()
wp.svd3(m3[0], U, sigma, V)
Uout[0] = U
sigmaout[0] = sigma
Vout[0] = V
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * U[i, j]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * sigma[i]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * V[i, j]
idx = idx + 1
kernel = getkernel(check_mat_svd, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
m3 = wp.array(randvals(rng, [1, 3, 3], dtype) + np.eye(3), dtype=mat33, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 3 * 3 + 3, dtype=wptype, requires_grad=True, device=device)
Uout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
sigmaout = wp.zeros(1, dtype=vec3, requires_grad=True, device=device)
Vout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Uout, sigmaout, Vout, outcomponents], device=device)
Uout_np = Uout.numpy()[0].astype(np.float64)
sigmaout_np = np.diag(sigmaout.numpy()[0].astype(np.float64))
Vout_np = Vout.numpy()[0].astype(np.float64)
assert_np_equal(
np.matmul(Uout_np, np.matmul(sigmaout_np, Vout_np.T)), m3.numpy()[0].astype(np.float64), tol=30 * tol
)
if dtype == np.float16:
# I'm not even going to bother testing the gradients for float16
# because the rounding errors are terrible...
return
# check gradients:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for idx in range(3 * 3 + 3 + 3 * 3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Uout, sigmaout, Vout, outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(out)
m3grads = 1.0 * tape.gradients[m3].numpy()[0]
tape.zero()
dx = 0.0001
fdtol = 5.0e-4 if dtype == np.float64 else 2.0e-2
for ii in range(3):
for jj in range(3):
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] += dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Uout, sigmaout, Vout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
plusval = out.numpy()[0]
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] -= dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Uout, sigmaout, Vout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
minusval = out.numpy()[0]
assert_np_equal((plusval - minusval) / (2 * dx), m3grads[ii, jj], tol=fdtol)
def test_qr(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-6,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
def check_mat_qr(
m3: wp.array(dtype=mat33),
Qout: wp.array(dtype=mat33),
Rout: wp.array(dtype=mat33),
outcomponents: wp.array(dtype=wptype),
):
Q = mat33()
R = mat33()
wp.qr3(m3[0], Q, R)
Qout[0] = Q
Rout[0] = R
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * Q[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * R[i, j]
idx = idx + 1
kernel = getkernel(check_mat_qr, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
m3 = wp.array(0.5 * (randvals(rng, [1, 3, 3], dtype) + np.eye(3)), dtype=mat33, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 3 * 3, dtype=wptype, requires_grad=True, device=device)
Qout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
Rout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, Rout, outcomponents], device=device)
Qout_np = Qout.numpy()[0].astype(np.float64)
Rout_np = Rout.numpy()[0].astype(np.float64)
# check it's actually a q and an r:
assert_np_equal(np.matmul(Qout_np.T, Qout_np), np.eye(3, dtype=np.float64), tol=tol)
assert_np_equal(Rout_np[1, [0]], np.zeros(1, dtype=np.float64), tol=tol)
assert_np_equal(Rout_np[2, [0, 1]], np.zeros(2, dtype=np.float64), tol=tol)
# check it's a factorization:
assert_np_equal(np.matmul(Qout_np, Rout_np), m3.numpy()[0].astype(np.float64), tol=30 * tol)
if dtype == np.float16:
# I'm not even going to bother testing the gradients for float16
# because the rounding errors are terrible...
return
# check gradients:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for idx in range(len(outcomponents)):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, Rout, outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(out)
m3grads = 1.0 * tape.gradients[m3].numpy()[0]
tape.zero()
dx = 0.0001
fdtol = 5.0e-4 if dtype == np.float64 else 2.0e-2
for ii in range(3):
for jj in range(3):
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] += dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, Rout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
plusval = out.numpy()[0]
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] -= dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, Rout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
minusval = out.numpy()[0]
assert_np_equal((plusval - minusval) / (2 * dx), m3grads[ii, jj], tol=fdtol)
def test_eig(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 4.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-5,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
def check_mat_eig(
m3: wp.array(dtype=mat33),
Qout: wp.array(dtype=mat33),
dout: wp.array(dtype=vec3),
outcomponents: wp.array(dtype=wptype),
):
Q = mat33()
d = vec3()
wp.eig3(m3[0] + wp.transpose(m3[0]), Q, d)
Qout[0] = Q
dout[0] = d
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * Q[i, j]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * d[i]
idx = idx + 1
kernel = getkernel(check_mat_eig, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
m3_np = randvals(rng, [1, 3, 3], dtype) + np.eye(3, dtype=dtype)
m3 = wp.array(m3_np, dtype=mat33, requires_grad=True, device=device)
outcomponents = wp.zeros(3 * 3 + 3, dtype=wptype, requires_grad=True, device=device)
Qout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
dout = wp.zeros(1, dtype=vec3, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, dout, outcomponents], device=device)
Qout_np = Qout.numpy()[0].astype(np.float64)
dout_np = dout.numpy()[0].astype(np.float64)
Dout_np = np.diag(dout_np)
# check Q is orthogonal:
assert_np_equal(np.matmul(Qout_np.T, Qout_np), np.eye(3), tol=tol)
# check Q contains eigenvectors:
assert_np_equal(np.matmul(Qout_np, np.matmul(Dout_np, Qout_np.T)), (m3_np[0] + m3_np[0].transpose()), tol=tol)
if dtype == np.float16:
# I'm not even going to bother testing the gradients for float16
# because the rounding errors are terrible...
return
# check gradients:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for idx in range(len(outcomponents)):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, dout, outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(out)
m3grads = 1.0 * tape.gradients[m3].numpy()[0]
tape.zero()
dx = 0.0001
fdtol = 5.0e-4 if dtype == np.float64 else 2.0e-2
for ii in range(3):
for jj in range(3):
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] += dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, dout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
plusval = out.numpy()[0]
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] -= dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, dout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
minusval = out.numpy()[0]
assert_np_equal((plusval - minusval) / (2 * dx), m3grads[ii, jj], tol=fdtol)
def test_skew(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_skew(
v3: wp.array(dtype=vec3),
outcomponents: wp.array(dtype=wptype),
):
m3result = wp.skew(v3[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_skew, suffix=dtype.__name__)
if register_kernels:
return
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
outcomponents = wp.zeros(3 * 3, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v3], outputs=[outcomponents], device=device)
# make sure it gives you a cross product matrix:
crossprodmat = outcomponents.numpy().reshape(3, 3)
v = np.array([1, 0, 0])
assert_np_equal(
np.matmul(crossprodmat, np.array([1, 0, 0])).reshape(-1),
2 * np.cross(v3.numpy()[0], np.array([1, 0, 0])),
tol=tol,
)
assert_np_equal(
np.matmul(crossprodmat, np.array([0, 1, 0])).reshape(-1),
2 * np.cross(v3.numpy()[0], np.array([0, 1, 0])),
tol=tol,
)
assert_np_equal(
np.matmul(crossprodmat, np.array([0, 0, 1])).reshape(-1),
2 * np.cross(v3.numpy()[0], np.array([0, 0, 1])),
tol=tol,
)
# check it another way:
x0 = v3.numpy()[0, 0]
x1 = v3.numpy()[0, 1]
x2 = v3.numpy()[0, 2]
crossprodmat_expected = np.array(
[
[0, -x2, x1],
[x2, 0, -x0],
[-x1, x0, 0],
],
dtype=dtype,
)
assert_np_equal(crossprodmat, 2 * crossprodmat_expected, tol=tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(3):
for j in range(3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v3], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
if i == j:
assert_np_equal(tape.gradients[v3].numpy()[0], np.zeros(3))
elif [i, j] == [0, 1]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, 0, -2]))
elif [i, j] == [1, 0]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, 0, 2]))
elif [i, j] == [0, 2]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, 2, 0]))
elif [i, j] == [2, 0]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, -2, 0]))
elif [i, j] == [1, 2]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([-2, 0, 0]))
elif [i, j] == [2, 1]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([2, 0, 0]))
tape.zero()
idx = idx + 1
def test_transform_point(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_transform_point(
v3: wp.array(dtype=vec3),
m4: wp.array(dtype=mat44),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
presult = wptype(2) * wp.transform_point(m4[0], v3[0])
outcomponents[0] = presult[0]
outcomponents[1] = presult[1]
outcomponents[2] = presult[2]
kernel = getkernel(check_mat_transform_point, suffix=dtype.__name__)
if register_kernels:
return
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
outcomponents = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
v3homog = np.ones(4, dtype=dtype)
v3homog[:3] = v3.numpy()[0]
assert_np_equal(outcomponents.numpy(), 2 * np.matmul(m4.numpy()[0], v3homog)[:3], tol=10 * tol)
if dtype in np_float_types:
for j in range(3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, j], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(2 * m4.numpy()[0, j, :3], tape.gradients[v3].numpy(), tol=tol)
expected = np.zeros((4, 4), dtype=dtype)
expected[j, :3] = 2 * v3.numpy()
expected[j, 3] = 2
assert_np_equal(tape.gradients[m4].numpy(), expected, tol=tol)
tape.zero()
def test_transform_vector(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_transform_vector(
v3: wp.array(dtype=vec3),
m4: wp.array(dtype=mat44),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
presult = wptype(2) * wp.transform_vector(m4[0], v3[0])
outcomponents[0] = presult[0]
outcomponents[1] = presult[1]
outcomponents[2] = presult[2]
kernel = getkernel(check_mat_transform_vector, suffix=dtype.__name__)
if register_kernels:
return
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
outcomponents = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
v3homog = np.zeros(4, dtype=dtype)
v3homog[:3] = v3.numpy()[0]
assert_np_equal(outcomponents.numpy(), 2 * np.matmul(m4.numpy()[0], v3homog)[:3], tol=10 * tol)
if dtype in np_float_types:
for j in range(3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, j], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(2 * m4.numpy()[0, j, :3], tape.gradients[v3].numpy(), tol=tol)
expected = np.zeros((4, 4), dtype=dtype)
expected[j, :3] = 2 * v3.numpy()
assert_np_equal(tape.gradients[m4].numpy(), expected, tol=tol)
tape.zero()
# Test matrix constructors using explicit type (float16)
# note that these tests are specifically not using generics / closure
# args to create kernels dynamically (like the rest of this file)
# as those use different code paths to resolve arg types which
# has lead to regressions.
@wp.kernel
def test_constructors_explicit_precision():
# construction for custom matrix types
eye = wp.identity(dtype=wp.float16, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=wp.float16)
custom = wp.matrix(wp.float16(0.0), wp.float16(1.0), wp.float16(2.0), wp.float16(3.0), shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], wp.float16(1.0))
else:
wp.expect_eq(eye[i, j], wp.float16(0.0))
wp.expect_eq(zeros[i, j], wp.float16(0.0))
wp.expect_eq(custom[i, j], wp.float16(i) * wp.float16(2.0) + wp.float16(j))
mat32d = wp.mat(shape=(3, 2), dtype=wp.float64)
@wp.kernel
def test_matrix_constructor_value_func():
a = wp.mat22()
b = wp.matrix(a, shape=(2, 2))
c = mat32d()
d = mat32d(c, shape=(3, 2))
e = mat32d(wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0))
f = mat32d(
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
)
# Same as above but with a default (float/int) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructors_default_precision():
# construction for default (float) matrix types
eye = wp.identity(dtype=float, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=float)
custom = wp.matrix(0.0, 1.0, 2.0, 3.0, shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], 1.0)
else:
wp.expect_eq(eye[i, j], 0.0)
wp.expect_eq(zeros[i, j], 0.0)
wp.expect_eq(custom[i, j], float(i) * 2.0 + float(j))
@wp.kernel
def test_matrix_mutation(expected: wp.types.matrix(shape=(10, 3), dtype=float)):
m = wp.matrix(shape=(10, 3), dtype=float)
# test direct element indexing
m[0, 0] = 1.0
m[0, 1] = 2.0
m[0, 2] = 3.0
# The nested indexing (matrix->vector->scalar) below does not
# currently modify m because m[0] returns row vector by
# value rather than reference, this is different from NumPy
# which always returns by ref. Not clear how we can support
# this as well as auto-diff.
# m[0][1] = 2.0
# m[0][2] = 3.0
# test setting rows
for i in range(1, 10):
m[i] = m[i - 1] + wp.vec3(1.0, 2.0, 3.0)
wp.expect_eq(m, expected)
CONSTANT_SHAPE_ROWS = wp.constant(10)
CONSTANT_SHAPE_COLS = wp.constant(10)
# tests that we can use global constants in shape keyword argument
# for matrix constructor
@wp.kernel
def test_constructors_constant_shape():
m = wp.matrix(shape=(CONSTANT_SHAPE_ROWS, CONSTANT_SHAPE_COLS), dtype=float)
for i in range(CONSTANT_SHAPE_ROWS):
for j in range(CONSTANT_SHAPE_COLS):
m[i, j] = float(i * j)
devices = get_test_devices()
class TestMat(unittest.TestCase):
def test_tpl_ops_with_anon(self):
mat22f = wp.mat((2, 2), dtype=float)
m = wp.mat22f(1.0, 2.0, 3.0, 4.0)
m += mat22f(2.0, 3.0, 4.0, 5.0)
m -= mat22f(3.0, 4.0, 5.0, 6.0)
self.assertSequenceEqual(m, ((0.0, 1.0), (2.0, 3.0)))
m = mat22f(1.0, 2.0, 3.0, 4.0)
m += wp.mat22f(2.0, 3.0, 4.0, 5.0)
m -= wp.mat22f(3.0, 4.0, 5.0, 6.0)
self.assertSequenceEqual(m, ((0.0, 1.0), (2.0, 3.0)))
add_kernel_test(TestMat, test_constructors_explicit_precision, dim=1, devices=devices)
add_kernel_test(TestMat, test_constructors_default_precision, dim=1, devices=devices)
add_kernel_test(TestMat, test_constructors_constant_shape, dim=1, devices=devices)
add_kernel_test(TestMat, test_matrix_constructor_value_func, dim=1, devices=devices)
mat103 = wp.types.matrix(shape=(10, 3), dtype=float)
add_kernel_test(
TestMat,
test_matrix_mutation,
dim=1,
inputs=[
mat103(
1.0, 2.0, 3.0,
2.0, 4.0, 6.0,
3.0, 6.0, 9.0,
4.0, 8.0, 12.0,
5.0, 10.0, 15.0,
6.0, 12.0, 18.0,
7.0, 14.0, 21.0,
8.0, 16.0, 24.0,
9.0, 18.0, 27.0,
10.0, 20.0, 30.0,
)
],
devices=devices,
) # fmt: skip
for dtype in np_signed_int_types + np_float_types:
add_function_test_register_kernel(
TestMat, f"test_negation_{dtype.__name__}", test_negation, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_subtraction_{dtype.__name__}", test_subtraction, devices=devices, dtype=dtype
)
add_function_test(
TestMat,
"test_anon_constructor_error_shape_keyword_missing",
test_anon_constructor_error_shape_keyword_missing,
devices=devices,
)
add_function_test(
TestMat,
"test_anon_constructor_error_dtype_keyword_missing",
test_anon_constructor_error_dtype_keyword_missing,
devices=devices,
)
add_function_test(
TestMat,
"test_anon_constructor_error_shape_mismatch",
test_anon_constructor_error_shape_mismatch,
devices=devices,
)
add_function_test(
TestMat,
"test_anon_constructor_error_invalid_arg_count",
test_anon_constructor_error_invalid_arg_count,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_incompatible_sizes",
test_tpl_constructor_error_incompatible_sizes,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_scalar_type",
test_tpl_constructor_error_invalid_scalar_type,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_vector_count",
test_tpl_constructor_error_invalid_vector_count,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_vector_shape",
test_tpl_constructor_error_invalid_vector_shape,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_arg_count",
test_tpl_constructor_error_invalid_arg_count,
devices=devices,
)
for dtype in np_float_types:
add_function_test(
TestMat, f"test_py_arithmetic_ops_{dtype.__name__}", test_py_arithmetic_ops, devices=None, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_quat_constructor_{dtype.__name__}", test_quat_constructor, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_inverse_{dtype.__name__}", test_inverse, devices=devices, dtype=dtype
)
add_function_test_register_kernel(TestMat, f"test_svd_{dtype.__name__}", test_svd, devices=devices, dtype=dtype)
add_function_test_register_kernel(TestMat, f"test_qr_{dtype.__name__}", test_qr, devices=devices, dtype=dtype)
add_function_test_register_kernel(TestMat, f"test_eig_{dtype.__name__}", test_eig, devices=devices, dtype=dtype)
add_function_test_register_kernel(
TestMat, f"test_transform_point_{dtype.__name__}", test_transform_point, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_transform_vector_{dtype.__name__}", test_transform_vector, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_determinant_{dtype.__name__}", test_determinant, devices=devices, dtype=dtype
)
add_function_test_register_kernel(TestMat, f"test_skew_{dtype.__name__}", test_skew, devices=devices, dtype=dtype)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 65,047 | Python | 35.137778 | 118 | 0.539917 |
NVIDIA/warp/warp/tests/aux_test_unresolved_func.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
@wp.kernel
def unresolved_func_kernel():
# this should trigger an exception due to unresolved function
x = wp.missing_func(42)
| 579 | Python | 37.666664 | 76 | 0.791019 |
NVIDIA/warp/warp/tests/test_bool.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
TRUE_CONSTANT = wp.constant(True)
@wp.func
def identity_function(input_bool: wp.bool, plain_bool: bool):
return input_bool and plain_bool
@wp.kernel
def identity_test(data: wp.array(dtype=wp.bool)):
i = wp.tid()
data[i] = data[i] and True
data[i] = data[i] and wp.bool(True)
data[i] = data[i] and not False
data[i] = data[i] and not wp.bool(False)
data[i] = identity_function(data[i], True)
if data[i]:
data[i] = True
else:
data[i] = False
if not data[i]:
data[i] = False
else:
data[i] = True
if data[i] and True:
data[i] = True
else:
data[i] = False
if data[i] or False:
data[i] = True
else:
data[i] = False
data[i] = wp.select(data[i], False, True)
def test_bool_identity_ops(test, device):
rng = np.random.default_rng(123)
dim_x = 10
rand_np = rng.random(dim_x) > 0.5
data_array = wp.array(data=rand_np, device=device)
test.assertEqual(data_array.dtype, wp.bool)
wp.launch(identity_test, dim=data_array.shape, inputs=[data_array], device=device)
assert_np_equal(data_array.numpy(), rand_np)
@wp.kernel
def check_compile_constant(result: wp.array(dtype=wp.bool)):
if TRUE_CONSTANT:
result[0] = TRUE_CONSTANT
else:
result[0] = False
def test_bool_constant(test, device):
compile_constant_value = wp.zeros(1, dtype=wp.bool, device=device)
wp.launch(check_compile_constant, 1, inputs=[compile_constant_value], device=device)
test.assertTrue(compile_constant_value.numpy()[0])
# Repeat the comparison with dtype=bool for the array
compile_constant_value = wp.zeros(1, dtype=bool, device=device)
wp.launch(check_compile_constant, 1, inputs=[compile_constant_value], device=device)
test.assertTrue(compile_constant_value.numpy()[0])
vec3bool = wp.vec(length=3, dtype=wp.bool)
bool_selector_vec = wp.constant(vec3bool([True, False, True]))
@wp.kernel
def sum_from_bool_vec(sum_array: wp.array(dtype=wp.int32)):
i = wp.tid()
if bool_selector_vec[0]:
sum_array[i] = sum_array[i] + 1
if bool_selector_vec[1]:
sum_array[i] = sum_array[i] + 2
if bool_selector_vec[2]:
sum_array[i] = sum_array[i] + 4
def test_bool_constant_vec(test, device):
result_array = wp.zeros(10, dtype=wp.int32, device=device)
wp.launch(sum_from_bool_vec, result_array.shape, inputs=[result_array], device=device)
assert_np_equal(result_array.numpy(), np.full(result_array.shape, 5))
mat22bool = wp.mat((2, 2), dtype=wp.bool)
bool_selector_mat = wp.constant(mat22bool([True, False, False, True]))
@wp.kernel
def sum_from_bool_mat(sum_array: wp.array(dtype=wp.int32)):
i = wp.tid()
if bool_selector_mat[0, 0]:
sum_array[i] = sum_array[i] + 1
if bool_selector_mat[0, 1]:
sum_array[i] = sum_array[i] + 2
if bool_selector_mat[1, 0]:
sum_array[i] = sum_array[i] + 4
if bool_selector_mat[1, 1]:
sum_array[i] = sum_array[i] + 8
def test_bool_constant_mat(test, device):
result_array = wp.zeros(10, dtype=wp.int32, device=device)
wp.launch(sum_from_bool_mat, result_array.shape, inputs=[result_array], device=device)
assert_np_equal(result_array.numpy(), np.full(result_array.shape, 9))
vec3bool_type = wp.types.vector(length=3, dtype=bool)
@wp.kernel
def test_bool_vec_anonymous_typing():
# Zero initialize
wp.expect_eq(vec3bool_type(), wp.vector(False, False, False))
# Scalar initialize
wp.expect_eq(vec3bool_type(True), wp.vector(True, True, True))
# Component-wise initialize
wp.expect_eq(vec3bool_type(True, False, True), wp.vector(True, False, True))
def test_bool_vec_typing(test, device):
# Zero initialize
vec3bool_z = vec3bool_type()
test.assertEqual(tuple(vec3bool_z), (False, False, False))
# Scalar initialize
vec3bool_s = vec3bool_type(True)
test.assertEqual(tuple(vec3bool_s), (True, True, True))
# Component-wise initialize
vec3bool_c = vec3bool_type(True, False, True)
test.assertEqual(tuple(vec3bool_c), (True, False, True))
wp.launch(test_bool_vec_anonymous_typing, (1,), inputs=[], device=device)
mat22bool_type = wp.types.matrix((2, 2), dtype=bool)
@wp.kernel
def test_bool_mat_anonymous_typing():
# Zero initialize
wp.expect_eq(mat22bool_type(), wp.matrix(False, False, False, False, shape=(2, 2)))
# Scalar initialize
wp.expect_eq(mat22bool_type(True), wp.matrix(True, True, True, True, shape=(2, 2)))
# Component-wise initialize
wp.expect_eq(mat22bool_type(True, False, True, False), wp.matrix(True, False, True, False, shape=(2, 2)))
def test_bool_mat_typing(test, device):
# Zero initialize
mat22bool_z = mat22bool_type()
test.assertEqual(tuple(mat22bool_z), ((False, False), (False, False)))
# Scalar initialize
mat22bool_s = mat22bool_type(True)
test.assertEqual(tuple(mat22bool_s), ((True, True), (True, True)))
# Component-wise initialize
mat22bool_c = mat22bool_type(True, False, True, False)
test.assertEqual(tuple(mat22bool_c), ((True, False), (True, False)))
wp.launch(test_bool_mat_anonymous_typing, (1,), inputs=[], device=device)
devices = get_test_devices()
class TestBool(unittest.TestCase):
pass
add_function_test(TestBool, "test_bool_identity_ops", test_bool_identity_ops, devices=devices)
add_function_test(TestBool, "test_bool_constant", test_bool_constant, devices=devices)
add_function_test(TestBool, "test_bool_constant_vec", test_bool_constant_vec, devices=devices)
add_function_test(TestBool, "test_bool_constant_mat", test_bool_constant_mat, devices=devices)
add_function_test(TestBool, "test_bool_vec_typing", test_bool_vec_typing, devices=devices)
add_function_test(TestBool, "test_bool_mat_typing", test_bool_mat_typing, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,478 | Python | 29.41784 | 109 | 0.676443 |
NVIDIA/warp/warp/tests/test_mesh_query_aabb.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.func
def min_vec3(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.min(a[0], b[0]), wp.min(a[1], b[1]), wp.min(a[2], b[2]))
@wp.func
def max_vec3(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.max(a[0], b[0]), wp.max(a[1], b[1]), wp.max(a[2], b[2]))
@wp.kernel
def compute_bounds(
indices: wp.array(dtype=int),
positions: wp.array(dtype=wp.vec3),
lowers: wp.array(dtype=wp.vec3),
uppers: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
i = indices[tid * 3 + 0]
j = indices[tid * 3 + 1]
k = indices[tid * 3 + 2]
x0 = positions[i] # point zero
x1 = positions[j] # point one
x2 = positions[k] # point two
lower = min_vec3(min_vec3(x0, x1), x2)
upper = max_vec3(max_vec3(x0, x1), x2)
lowers[tid] = lower
uppers[tid] = upper
@wp.kernel
def compute_num_contacts(
lowers: wp.array(dtype=wp.vec3), uppers: wp.array(dtype=wp.vec3), mesh_id: wp.uint64, counts: wp.array(dtype=int)
):
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
tid = wp.tid()
upper = uppers[tid]
lower = lowers[tid]
query = wp.mesh_query_aabb(mesh_id, lower, upper)
count = int(0)
# index = int(-1)
# while wp.mesh_query_aabb_next(query, index):
for _index in query:
count = count + 1
counts[tid] = count
def test_compute_bounds(test, device):
# create two touching triangles.
points = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, -1, 1]])
indices = np.array([0, 1, 2, 1, 2, 3])
m = wp.Mesh(
points=wp.array(points, dtype=wp.vec3, device=device),
indices=wp.array(indices, dtype=int, device=device),
)
num_tris = int(len(indices) / 3)
# First compute bounds of each of the triangles.
lowers = wp.empty(n=num_tris, dtype=wp.vec3, device=device)
uppers = wp.empty_like(lowers)
wp.launch(
kernel=compute_bounds,
dim=num_tris,
inputs=[m.indices, m.points],
outputs=[lowers, uppers],
device=device,
)
lower_view = lowers.numpy()
upper_view = uppers.numpy()
# Confirm the bounds of each triangle are correct.
test.assertTrue(lower_view[0][0] == 0)
test.assertTrue(lower_view[0][1] == 0)
test.assertTrue(lower_view[0][2] == 0)
test.assertTrue(upper_view[0][0] == 1)
test.assertTrue(upper_view[0][1] == 1)
test.assertTrue(upper_view[0][2] == 0)
test.assertTrue(lower_view[1][0] == -1)
test.assertTrue(lower_view[1][1] == -1)
test.assertTrue(lower_view[1][2] == 0)
test.assertTrue(upper_view[1][0] == 1)
test.assertTrue(upper_view[1][1] == 1)
test.assertTrue(upper_view[1][2] == 1)
def test_mesh_query_aabb_count_overlap(test, device):
# create two touching triangles.
points = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, -1, 1]])
indices = np.array([0, 1, 2, 1, 2, 3])
m = wp.Mesh(
points=wp.array(points, dtype=wp.vec3, device=device),
indices=wp.array(indices, dtype=int, device=device),
)
num_tris = int(len(indices) / 3)
# Compute AABB of each of the triangles.
lowers = wp.empty(n=num_tris, dtype=wp.vec3, device=device)
uppers = wp.empty_like(lowers)
wp.launch(
kernel=compute_bounds,
dim=num_tris,
inputs=[m.indices, m.points],
outputs=[lowers, uppers],
device=device,
)
counts = wp.empty(n=num_tris, dtype=int, device=device)
wp.launch(
kernel=compute_num_contacts,
dim=num_tris,
inputs=[lowers, uppers, m.id],
outputs=[counts],
device=device,
)
view = counts.numpy()
# 2 triangles that share a vertex having overlapping AABBs.
for c in view:
test.assertTrue(c == 2)
def test_mesh_query_aabb_count_nonoverlap(test, device):
# create two separate triangles.
points = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [10, 0, 0], [10, 1, 0], [10, 0, 1]])
indices = np.array([0, 1, 2, 3, 4, 5])
m = wp.Mesh(
points=wp.array(points, dtype=wp.vec3, device=device),
indices=wp.array(indices, dtype=int, device=device),
)
num_tris = int(len(indices) / 3)
lowers = wp.empty(n=num_tris, dtype=wp.vec3, device=device)
uppers = wp.empty_like(lowers)
wp.launch(
kernel=compute_bounds,
dim=num_tris,
inputs=[m.indices, m.points],
outputs=[lowers, uppers],
device=device,
)
counts = wp.empty(n=num_tris, dtype=int, device=device)
wp.launch(
kernel=compute_num_contacts,
dim=num_tris,
inputs=[lowers, uppers, m.id],
outputs=[counts],
device=device,
)
view = counts.numpy()
# AABB query only returns one triangle at a time, the triangles are not close enough to overlap.
for c in view:
test.assertTrue(c == 1)
devices = get_test_devices()
class TestMeshQueryAABBMethods(unittest.TestCase):
def test_mesh_query_aabb_codegen_adjoints_with_select(self):
def kernel_fn(
mesh: wp.uint64,
):
v = wp.vec3(0.0, 0.0, 0.0)
if True:
query = wp.mesh_query_aabb(mesh, v, v)
else:
query = wp.mesh_query_aabb(mesh, v, v)
wp.Kernel(func=kernel_fn)
add_function_test(TestMeshQueryAABBMethods, "test_compute_bounds", test_compute_bounds, devices=devices)
add_function_test(
TestMeshQueryAABBMethods, "test_mesh_query_aabb_count_overlap", test_mesh_query_aabb_count_overlap, devices=devices
)
add_function_test(
TestMeshQueryAABBMethods,
"test_mesh_query_aabb_count_nonoverlap",
test_mesh_query_aabb_count_nonoverlap,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,307 | Python | 26.788546 | 119 | 0.611701 |
NVIDIA/warp/warp/tests/test_grad_customs.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# atomic add function that memorizes which thread incremented the counter
# so that the correct counter value per thread can be used in the replay
# phase of the backward pass
@wp.func
def reversible_increment(
counter: wp.array(dtype=int), counter_index: int, value: int, thread_values: wp.array(dtype=int), tid: int
):
"""This is a docstring"""
next_index = wp.atomic_add(counter, counter_index, value)
thread_values[tid] = next_index
return next_index
@wp.func_replay(reversible_increment)
def replay_reversible_increment(
counter: wp.array(dtype=int), counter_index: int, value: int, thread_values: wp.array(dtype=int), tid: int
):
"""This is a docstring"""
return thread_values[tid]
def test_custom_replay_grad(test, device):
num_threads = 128
counter = wp.zeros(1, dtype=wp.int32, device=device)
thread_ids = wp.zeros(num_threads, dtype=wp.int32, device=device)
inputs = wp.array(np.arange(num_threads, dtype=np.float32), device=device, requires_grad=True)
outputs = wp.zeros_like(inputs)
@wp.kernel
def run_atomic_add(
input: wp.array(dtype=float),
counter: wp.array(dtype=int),
thread_values: wp.array(dtype=int),
output: wp.array(dtype=float),
):
tid = wp.tid()
idx = reversible_increment(counter, 0, 1, thread_values, tid)
output[idx] = input[idx] ** 2.0
tape = wp.Tape()
with tape:
wp.launch(
run_atomic_add, dim=num_threads, inputs=[inputs, counter, thread_ids], outputs=[outputs], device=device
)
tape.backward(grads={outputs: wp.ones(num_threads, dtype=wp.float32, device=device)})
assert_np_equal(inputs.grad.numpy(), 2.0 * inputs.numpy(), tol=1e-4)
@wp.func
def overload_fn(x: float, y: float):
"""This is a docstring"""
return x * 3.0 + y / 3.0, y**2.5
@wp.func_grad(overload_fn)
def overload_fn_grad(x: float, y: float, adj_ret0: float, adj_ret1: float):
"""This is a docstring"""
wp.adjoint[x] += x * adj_ret0 * 42.0 + y * adj_ret1 * 10.0
wp.adjoint[y] += y * adj_ret1 * 3.0
@wp.struct
class MyStruct:
"""This is a docstring"""
scalar: float
vec: wp.vec3
@wp.func
def overload_fn(x: MyStruct):
"""This is a docstring"""
return x.vec[0] * x.vec[1] * x.vec[2] * 4.0, wp.length(x.vec), x.scalar**0.5
@wp.func_grad(overload_fn)
def overload_fn_grad(x: MyStruct, adj_ret0: float, adj_ret1: float, adj_ret2: float):
"""This is a docstring"""
wp.adjoint[x.scalar] += x.scalar * adj_ret0 * 10.0
wp.adjoint[x.vec][0] += adj_ret0 * x.vec[1] * x.vec[2] * 20.0
wp.adjoint[x.vec][1] += adj_ret1 * x.vec[0] * x.vec[2] * 30.0
wp.adjoint[x.vec][2] += adj_ret2 * x.vec[0] * x.vec[1] * 40.0
@wp.kernel
def run_overload_float_fn(
xs: wp.array(dtype=float), ys: wp.array(dtype=float), output0: wp.array(dtype=float), output1: wp.array(dtype=float)
):
"""This is a docstring"""
i = wp.tid()
out0, out1 = overload_fn(xs[i], ys[i])
output0[i] = out0
output1[i] = out1
@wp.kernel
def run_overload_struct_fn(xs: wp.array(dtype=MyStruct), output: wp.array(dtype=float)):
i = wp.tid()
out0, out1, out2 = overload_fn(xs[i])
output[i] = out0 + out1 + out2
def test_custom_overload_grad(test, device):
dim = 3
xs_float = wp.array(np.arange(1.0, dim + 1.0), dtype=wp.float32, requires_grad=True, device=device)
ys_float = wp.array(np.arange(10.0, dim + 10.0), dtype=wp.float32, requires_grad=True, device=device)
out0_float = wp.zeros(dim, device=device)
out1_float = wp.zeros(dim, device=device)
tape = wp.Tape()
with tape:
wp.launch(
run_overload_float_fn, dim=dim, inputs=[xs_float, ys_float], outputs=[out0_float, out1_float], device=device
)
tape.backward(
grads={
out0_float: wp.ones(dim, dtype=wp.float32, device=device),
out1_float: wp.ones(dim, dtype=wp.float32, device=device),
}
)
assert_np_equal(xs_float.grad.numpy(), xs_float.numpy() * 42.0 + ys_float.numpy() * 10.0)
assert_np_equal(ys_float.grad.numpy(), ys_float.numpy() * 3.0)
x0 = MyStruct()
x0.vec = wp.vec3(1.0, 2.0, 3.0)
x0.scalar = 4.0
x1 = MyStruct()
x1.vec = wp.vec3(5.0, 6.0, 7.0)
x1.scalar = -1.0
x2 = MyStruct()
x2.vec = wp.vec3(8.0, 9.0, 10.0)
x2.scalar = 19.0
xs_struct = wp.array([x0, x1, x2], dtype=MyStruct, requires_grad=True, device=device)
out_struct = wp.zeros(dim, device=device)
tape = wp.Tape()
with tape:
wp.launch(run_overload_struct_fn, dim=dim, inputs=[xs_struct], outputs=[out_struct], device=device)
tape.backward(grads={out_struct: wp.ones(dim, dtype=wp.float32, device=device)})
xs_struct_np = xs_struct.numpy()
struct_grads = xs_struct.grad.numpy()
# fmt: off
assert_np_equal(
np.array([g[0] for g in struct_grads]),
np.array([g[0] * 10.0 for g in xs_struct_np]))
assert_np_equal(
np.array([g[1][0] for g in struct_grads]),
np.array([g[1][1] * g[1][2] * 20.0 for g in xs_struct_np]))
assert_np_equal(
np.array([g[1][1] for g in struct_grads]),
np.array([g[1][0] * g[1][2] * 30.0 for g in xs_struct_np]))
assert_np_equal(
np.array([g[1][2] for g in struct_grads]),
np.array([g[1][0] * g[1][1] * 40.0 for g in xs_struct_np]))
# fmt: on
def test_custom_import_grad(test, device):
from warp.tests.aux_test_grad_customs import aux_custom_fn
@wp.kernel
def run_defined_float_fn(
xs: wp.array(dtype=float),
ys: wp.array(dtype=float),
output0: wp.array(dtype=float),
output1: wp.array(dtype=float),
):
i = wp.tid()
out0, out1 = aux_custom_fn(xs[i], ys[i])
output0[i] = out0
output1[i] = out1
dim = 3
xs_float = wp.array(np.arange(1.0, dim + 1.0), dtype=wp.float32, requires_grad=True, device=device)
ys_float = wp.array(np.arange(10.0, dim + 10.0), dtype=wp.float32, requires_grad=True, device=device)
out0_float = wp.zeros(dim, device=device)
out1_float = wp.zeros(dim, device=device)
tape = wp.Tape()
with tape:
wp.launch(
run_defined_float_fn, dim=dim, inputs=[xs_float, ys_float], outputs=[out0_float, out1_float], device=device
)
tape.backward(
grads={
out0_float: wp.ones(dim, dtype=wp.float32, device=device),
out1_float: wp.ones(dim, dtype=wp.float32, device=device),
}
)
assert_np_equal(xs_float.grad.numpy(), xs_float.numpy() * 42.0 + ys_float.numpy() * 10.0)
assert_np_equal(ys_float.grad.numpy(), ys_float.numpy() * 3.0)
@wp.func
def sigmoid(x: float):
return 1.0 / (1.0 + wp.exp(-x))
@wp.func_grad(sigmoid)
def adj_sigmoid(x: float, adj: float):
# unused function to test that we don't run into infinite recursion when calling
# the forward function from within the gradient function
wp.adjoint[x] += adj * sigmoid(x) * (1.0 - sigmoid(x))
@wp.func
def sigmoid_no_return(i: int, xs: wp.array(dtype=float), ys: wp.array(dtype=float)):
# test function that does not return anything
ys[i] = sigmoid(xs[i])
@wp.func_grad(sigmoid_no_return)
def adj_sigmoid_no_return(i: int, xs: wp.array(dtype=float), ys: wp.array(dtype=float)):
wp.adjoint[xs][i] += ys[i] * (1.0 - ys[i])
@wp.kernel
def eval_sigmoid(xs: wp.array(dtype=float), ys: wp.array(dtype=float)):
i = wp.tid()
sigmoid_no_return(i, xs, ys)
def test_custom_grad_no_return(test, device):
xs = wp.array([1.0, 2.0, 3.0, 4.0], dtype=wp.float32, requires_grad=True, device=device)
ys = wp.zeros_like(xs, device=device)
ys.grad.fill_(1.0)
tape = wp.Tape()
with tape:
wp.launch(eval_sigmoid, dim=len(xs), inputs=[xs], outputs=[ys], device=device)
tape.backward()
sigmoids = ys.numpy()
grad = xs.grad.numpy()
assert_np_equal(grad, sigmoids * (1.0 - sigmoids))
@wp.func
def dense_gemm(
m: int,
n: int,
p: int,
transpose_A: bool,
transpose_B: bool,
add_to_C: bool,
A: wp.array(dtype=float),
B: wp.array(dtype=float),
# outputs
C: wp.array(dtype=float),
):
# this function doesn't get called but it is an important test for code generation
# multiply a `m x p` matrix A by a `p x n` matrix B to produce a `m x n` matrix C
for i in range(m):
for j in range(n):
sum = float(0.0)
for k in range(p):
if transpose_A:
a_i = k * m + i
else:
a_i = i * p + k
if transpose_B:
b_j = j * p + k
else:
b_j = k * n + j
sum += A[a_i] * B[b_j]
if add_to_C:
C[i * n + j] += sum
else:
C[i * n + j] = sum
@wp.func_grad(dense_gemm)
def adj_dense_gemm(
m: int,
n: int,
p: int,
transpose_A: bool,
transpose_B: bool,
add_to_C: bool,
A: wp.array(dtype=float),
B: wp.array(dtype=float),
# outputs
C: wp.array(dtype=float),
):
# code generation would break here if we didn't defer building the custom grad
# function until after the forward functions + kernels of the module have been built
add_to_C = True
if transpose_A:
dense_gemm(p, m, n, False, True, add_to_C, B, wp.adjoint[C], wp.adjoint[A])
dense_gemm(p, n, m, False, False, add_to_C, A, wp.adjoint[C], wp.adjoint[B])
else:
dense_gemm(m, p, n, False, not transpose_B, add_to_C, wp.adjoint[C], B, wp.adjoint[A])
dense_gemm(p, n, m, True, False, add_to_C, A, wp.adjoint[C], wp.adjoint[B])
devices = get_test_devices()
class TestGradCustoms(unittest.TestCase):
def test_wrapped_docstring(self):
self.assertTrue("This is a docstring" in reversible_increment.__doc__)
self.assertTrue("This is a docstring" in replay_reversible_increment.__doc__)
self.assertTrue("This is a docstring" in overload_fn.__doc__)
self.assertTrue("This is a docstring" in overload_fn_grad.__doc__)
self.assertTrue("This is a docstring" in run_overload_float_fn.__doc__)
self.assertTrue("This is a docstring" in MyStruct.__doc__)
add_function_test(TestGradCustoms, "test_custom_replay_grad", test_custom_replay_grad, devices=devices)
add_function_test(TestGradCustoms, "test_custom_overload_grad", test_custom_overload_grad, devices=devices)
add_function_test(TestGradCustoms, "test_custom_import_grad", test_custom_import_grad, devices=devices)
add_function_test(TestGradCustoms, "test_custom_grad_no_return", test_custom_grad_no_return, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 11,323 | Python | 33.108434 | 120 | 0.619271 |
NVIDIA/warp/warp/tests/test_options.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import contextlib
import io
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def scale(
x: wp.array(dtype=float),
y: wp.array(dtype=float),
):
y[0] = x[0] ** 2.0
@wp.kernel(enable_backward=True)
def scale_1(
x: wp.array(dtype=float),
y: wp.array(dtype=float),
):
y[0] = x[0] ** 2.0
@wp.kernel(enable_backward=False)
def scale_2(
x: wp.array(dtype=float),
y: wp.array(dtype=float),
):
y[0] = x[0] ** 2.0
def test_options_1(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": False})
tape = wp.Tape()
with tape:
wp.launch(scale, dim=1, inputs=[x, y], device=device)
with contextlib.redirect_stdout(io.StringIO()) as f:
tape.backward(y)
expected = f"Warp UserWarning: Running the tape backwards may produce incorrect gradients because recorded kernel {scale.key} is defined in a module with the option 'enable_backward=False' set.\n"
assert f.getvalue() == expected
assert_np_equal(tape.gradients[x].numpy(), np.array(0.0))
def test_options_2(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": True})
tape = wp.Tape()
with tape:
wp.launch(scale, dim=1, inputs=[x, y], device=device)
tape.backward(y)
assert_np_equal(tape.gradients[x].numpy(), np.array(6.0))
def test_options_3(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": False})
tape = wp.Tape()
with tape:
wp.launch(scale_1, dim=1, inputs=[x, y], device=device)
tape.backward(y)
assert_np_equal(tape.gradients[x].numpy(), np.array(6.0))
def test_options_4(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": True})
tape = wp.Tape()
with tape:
wp.launch(scale_2, dim=1, inputs=[x, y], device=device)
with contextlib.redirect_stdout(io.StringIO()) as f:
tape.backward(y)
expected = f"Warp UserWarning: Running the tape backwards may produce incorrect gradients because recorded kernel {scale_2.key} is configured with the option 'enable_backward=False'.\n"
assert f.getvalue() == expected
assert_np_equal(tape.gradients[x].numpy(), np.array(0.0))
devices = get_test_devices()
class TestOptions(unittest.TestCase):
pass
add_function_test(TestOptions, "test_options_1", test_options_1, devices=devices)
add_function_test(TestOptions, "test_options_2", test_options_2, devices=devices)
add_function_test(TestOptions, "test_options_3", test_options_3, devices=devices)
add_function_test(TestOptions, "test_options_4", test_options_4, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 3,485 | Python | 27.57377 | 200 | 0.681492 |
NVIDIA/warp/warp/tests/test_indexedarray.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.test_array import FillStruct
from warp.tests.unittest_utils import *
@wp.kernel
def kernel_1d(a: wp.indexedarray(dtype=float), expected: wp.array(dtype=float)):
i = wp.tid()
wp.expect_eq(a[i], expected[i])
a[i] = 2.0 * a[i]
wp.atomic_add(a, i, 1.0)
wp.expect_eq(a[i], 2.0 * expected[i] + 1.0)
def test_indexedarray_1d(test, device):
values = np.arange(10, dtype=np.float32)
arr = wp.array(data=values, device=device)
indices = wp.array([1, 3, 5, 7, 9], dtype=int, device=device)
iarr = wp.indexedarray1d(arr, [indices])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 1)
test.assertEqual(iarr.shape, (5,))
test.assertEqual(iarr.size, 5)
expected_arr = wp.array(data=[1, 3, 5, 7, 9], dtype=float, device=device)
wp.launch(kernel_1d, dim=iarr.size, inputs=[iarr, expected_arr], device=device)
@wp.kernel
def kernel_2d(a: wp.indexedarray2d(dtype=float), expected: wp.array2d(dtype=float)):
i, j = wp.tid()
# check expected values
wp.expect_eq(a[i, j], expected[i, j])
# test wp.view()
wp.expect_eq(a[i][j], a[i, j])
a[i, j] = 2.0 * a[i, j]
wp.atomic_add(a, i, j, 1.0)
wp.expect_eq(a[i, j], 2.0 * expected[i, j] + 1.0)
def test_indexedarray_2d(test, device):
values = np.arange(100, dtype=np.float32).reshape((10, 10))
arr = wp.array(data=values, device=device)
indices0 = wp.array([1, 3], dtype=int, device=device)
indices1 = wp.array([2, 4, 8], dtype=int, device=device)
iarr = wp.indexedarray2d(arr, [indices0, indices1])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 2)
test.assertEqual(iarr.shape, (2, 3))
test.assertEqual(iarr.size, 6)
expected_values = [[12, 14, 18], [32, 34, 38]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_2d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
@wp.kernel
def kernel_3d(a: wp.indexedarray3d(dtype=float), expected: wp.array3d(dtype=float)):
i, j, k = wp.tid()
# check expected values
wp.expect_eq(a[i, j, k], expected[i, j, k])
# test wp.view()
wp.expect_eq(a[i][j][k], a[i, j, k])
wp.expect_eq(a[i, j][k], a[i, j, k])
wp.expect_eq(a[i][j, k], a[i, j, k])
a[i, j, k] = 2.0 * a[i, j, k]
wp.atomic_add(a, i, j, k, 1.0)
wp.expect_eq(a[i, j, k], 2.0 * expected[i, j, k] + 1.0)
def test_indexedarray_3d(test, device):
values = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
arr = wp.array(data=values, device=device)
indices0 = wp.array([1, 3], dtype=int, device=device)
indices1 = wp.array([2, 4, 8], dtype=int, device=device)
indices2 = wp.array([0, 5], dtype=int, device=device)
iarr = wp.indexedarray3d(arr, [indices0, indices1, indices2])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 3, 2))
test.assertEqual(iarr.size, 12)
expected_values = [
[[120, 125], [140, 145], [180, 185]],
[[320, 325], [340, 345], [380, 385]],
]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
@wp.kernel
def kernel_4d(a: wp.indexedarray4d(dtype=float), expected: wp.array4d(dtype=float)):
i, j, k, l = wp.tid()
# check expected values
wp.expect_eq(a[i, j, k, l], expected[i, j, k, l])
# test wp.view()
wp.expect_eq(a[i][j][k][l], a[i, j, k, l])
wp.expect_eq(a[i][j, k, l], a[i, j, k, l])
wp.expect_eq(a[i, j][k, l], a[i, j, k, l])
wp.expect_eq(a[i, j, k][l], a[i, j, k, l])
a[i, j, k, l] = 2.0 * a[i, j, k, l]
wp.atomic_add(a, i, j, k, l, 1.0)
wp.expect_eq(a[i, j, k, l], 2.0 * expected[i, j, k, l] + 1.0)
def test_indexedarray_4d(test, device):
values = np.arange(10000, dtype=np.float32).reshape((10, 10, 10, 10))
arr = wp.array(data=values, device=device)
indices0 = wp.array([1, 3], dtype=int, device=device)
indices1 = wp.array([2, 4, 8], dtype=int, device=device)
indices2 = wp.array([0, 5], dtype=int, device=device)
indices3 = wp.array([6, 7, 9], dtype=int, device=device)
iarr = wp.indexedarray4d(arr, [indices0, indices1, indices2, indices3])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 4)
test.assertEqual(iarr.shape, (2, 3, 2, 3))
test.assertEqual(iarr.size, 36)
expected_values = [
[
[[1206, 1207, 1209], [1256, 1257, 1259]],
[[1406, 1407, 1409], [1456, 1457, 1459]],
[[1806, 1807, 1809], [1856, 1857, 1859]],
],
[
[[3206, 3207, 3209], [3256, 3257, 3259]],
[[3406, 3407, 3409], [3456, 3457, 3459]],
[[3806, 3807, 3809], [3856, 3857, 3859]],
],
]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_4d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
def test_indexedarray_mixed(test, device):
# [[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11],
# [12, 13, 14, 15]],
# [[16, 17, 18, 19],
# [20, 21, 22, 23],
# [24, 25, 26, 27],
# [28, 29, 30, 31]],
# [[32, 33, 34, 35],
# [36, 37, 38, 39],
# [40, 41, 42, 43],
# [44, 45, 46, 47],
# [[48, 49, 50, 51],
# [52, 53, 54, 55],
# [56, 57, 58, 59],
# [60, 61, 62, 63]]]]
values = np.arange(64, dtype=np.float32).reshape((4, 4, 4))
indices = wp.array([0, 3], dtype=int, device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [indices, None, None])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 4, 4))
test.assertEqual(iarr.size, 32)
expected_values = [
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]],
[[48, 49, 50, 51], [52, 53, 54, 55], [56, 57, 58, 59], [60, 61, 62, 63]],
]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [indices, indices, None])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 2, 4))
test.assertEqual(iarr.size, 16)
expected_values = [[[0, 1, 2, 3], [12, 13, 14, 15]], [[48, 49, 50, 51], [60, 61, 62, 63]]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [indices, None, indices])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 4, 2))
test.assertEqual(iarr.size, 16)
expected_values = [[[0, 3], [4, 7], [8, 11], [12, 15]], [[48, 51], [52, 55], [56, 59], [60, 63]]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [None, indices, indices])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (4, 2, 2))
test.assertEqual(iarr.size, 16)
expected_values = [[[0, 3], [12, 15]], [[16, 19], [28, 31]], [[32, 35], [44, 47]], [[48, 51], [60, 63]]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
vec2i = wp.types.vector(length=2, dtype=wp.int32)
vec3i = wp.types.vector(length=3, dtype=wp.int32)
vec4i = wp.types.vector(length=4, dtype=wp.int32)
@wp.kernel
def shape_kernel_1d(arr: wp.indexedarray1d(dtype=float), expected: int):
wp.expect_eq(arr.shape[0], expected)
@wp.kernel
def shape_kernel_2d(arr: wp.indexedarray2d(dtype=float), expected: vec2i):
wp.expect_eq(arr.shape[0], expected[0])
wp.expect_eq(arr.shape[1], expected[1])
# 1d slice
view = arr[0]
wp.expect_eq(view.shape[0], expected[1])
@wp.kernel
def shape_kernel_3d(arr: wp.indexedarray3d(dtype=float), expected: vec3i):
wp.expect_eq(arr.shape[0], expected[0])
wp.expect_eq(arr.shape[1], expected[1])
wp.expect_eq(arr.shape[2], expected[2])
# 2d slice
view2 = arr[0]
wp.expect_eq(view2.shape[0], expected[1])
wp.expect_eq(view2.shape[1], expected[2])
# 1d slice
view1 = arr[0, 0]
wp.expect_eq(view1.shape[0], expected[2])
@wp.kernel
def shape_kernel_4d(arr: wp.indexedarray4d(dtype=float), expected: vec4i):
wp.expect_eq(arr.shape[0], expected[0])
wp.expect_eq(arr.shape[1], expected[1])
wp.expect_eq(arr.shape[2], expected[2])
wp.expect_eq(arr.shape[3], expected[3])
# 3d slice
view3 = arr[0]
wp.expect_eq(view3.shape[0], expected[1])
wp.expect_eq(view3.shape[1], expected[2])
wp.expect_eq(view3.shape[2], expected[3])
# 2d slice
view2 = arr[0, 0]
wp.expect_eq(view2.shape[0], expected[2])
wp.expect_eq(view2.shape[1], expected[3])
# 1d slice
view1 = arr[0, 0, 0]
wp.expect_eq(view1.shape[0], expected[3])
def test_indexedarray_shape(test, device):
with wp.ScopedDevice(device):
data1 = wp.zeros(10, dtype=float)
data2 = wp.zeros((10, 20), dtype=float)
data3 = wp.zeros((10, 20, 30), dtype=float)
data4 = wp.zeros((10, 20, 30, 40), dtype=float)
indices1 = wp.array(data=[2, 7], dtype=int)
indices2 = wp.array(data=[2, 7, 12, 17], dtype=int)
indices3 = wp.array(data=[2, 7, 12, 17, 22, 27], dtype=int)
indices4 = wp.array(data=[2, 7, 12, 17, 22, 27, 32, 37], dtype=int)
ia1 = wp.indexedarray(data1, [indices1])
wp.launch(shape_kernel_1d, dim=1, inputs=[ia1, 2])
ia2_1 = wp.indexedarray(data2, [indices1, None])
ia2_2 = wp.indexedarray(data2, [None, indices2])
ia2_3 = wp.indexedarray(data2, [indices1, indices2])
wp.launch(shape_kernel_2d, dim=1, inputs=[ia2_1, vec2i(2, 20)])
wp.launch(shape_kernel_2d, dim=1, inputs=[ia2_2, vec2i(10, 4)])
wp.launch(shape_kernel_2d, dim=1, inputs=[ia2_3, vec2i(2, 4)])
ia3_1 = wp.indexedarray(data3, [indices1, None, None])
ia3_2 = wp.indexedarray(data3, [None, indices2, None])
ia3_3 = wp.indexedarray(data3, [None, None, indices3])
ia3_4 = wp.indexedarray(data3, [indices1, indices2, None])
ia3_5 = wp.indexedarray(data3, [indices1, None, indices3])
ia3_6 = wp.indexedarray(data3, [None, indices2, indices3])
ia3_7 = wp.indexedarray(data3, [indices1, indices2, indices3])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_1, vec3i(2, 20, 30)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_2, vec3i(10, 4, 30)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_3, vec3i(10, 20, 6)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_4, vec3i(2, 4, 30)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_5, vec3i(2, 20, 6)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_6, vec3i(10, 4, 6)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_7, vec3i(2, 4, 6)])
ia4_1 = wp.indexedarray(data4, [indices1, None, None, None])
ia4_2 = wp.indexedarray(data4, [indices1, None, None, indices4])
ia4_3 = wp.indexedarray(data4, [None, indices2, indices3, None])
ia4_4 = wp.indexedarray(data4, [indices1, indices2, indices3, indices4])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_1, vec4i(2, 20, 30, 40)])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_2, vec4i(2, 20, 30, 8)])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_3, vec4i(10, 4, 6, 40)])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_4, vec4i(2, 4, 6, 8)])
wp.synchronize_device(device)
def test_indexedarray_getitem(test, device):
with wp.ScopedDevice(device):
data = wp.array(data=np.arange(1000, dtype=np.int32).reshape((10, 10, 10)))
I = wp.array(data=[0, 1, 2], dtype=int)
# use constructor
a1 = wp.indexedarray(data, [None, None, I])
a2 = wp.indexedarray(data, [None, I])
a3 = wp.indexedarray(data, [None, I, I])
a4 = wp.indexedarray(data, [I])
a5 = wp.indexedarray(data, [I, None, I])
a6 = wp.indexedarray(data, [I, I])
a7 = wp.indexedarray(data, [I, I, I])
# use array.__getitem__()
b1 = data[:, :, I]
b2 = data[:, I]
b3 = data[:, I, I]
b4 = data[I]
b5 = data[I, :, I]
b6 = data[I, I]
b7 = data[I, I, I]
test.assertEqual(type(a1), type(b1))
test.assertEqual(type(a2), type(b2))
test.assertEqual(type(a3), type(b3))
test.assertEqual(type(a4), type(b4))
test.assertEqual(type(a5), type(b5))
test.assertEqual(type(a6), type(b6))
test.assertEqual(type(a7), type(b7))
assert_np_equal(a1.numpy(), b1.numpy())
assert_np_equal(a2.numpy(), b2.numpy())
assert_np_equal(a3.numpy(), b3.numpy())
assert_np_equal(a4.numpy(), b4.numpy())
assert_np_equal(a5.numpy(), b5.numpy())
assert_np_equal(a6.numpy(), b6.numpy())
assert_np_equal(a7.numpy(), b7.numpy())
def test_indexedarray_slicing(test, device):
with wp.ScopedDevice(device):
data = wp.array(data=np.arange(1000, dtype=np.int32).reshape((10, 10, 10)))
# test equivalence of slicing and indexing the same range
s = slice(0, 3)
I = wp.array(data=[0, 1, 2], dtype=int)
a0 = data[s, s, s]
test.assertEqual(type(a0), wp.array)
a1 = data[s, s, I]
test.assertEqual(type(a1), wp.indexedarray)
a2 = data[s, I, s]
test.assertEqual(type(a2), wp.indexedarray)
a3 = data[s, I, I]
test.assertEqual(type(a3), wp.indexedarray)
a4 = data[I, s, s]
test.assertEqual(type(a4), wp.indexedarray)
a5 = data[I, s, I]
test.assertEqual(type(a5), wp.indexedarray)
a6 = data[I, I, s]
test.assertEqual(type(a6), wp.indexedarray)
a7 = data[I, I, I]
test.assertEqual(type(a7), wp.indexedarray)
expected = a0.numpy()
assert_np_equal(a1.numpy(), expected)
assert_np_equal(a2.numpy(), expected)
assert_np_equal(a3.numpy(), expected)
assert_np_equal(a4.numpy(), expected)
assert_np_equal(a5.numpy(), expected)
assert_np_equal(a6.numpy(), expected)
assert_np_equal(a7.numpy(), expected)
# generic increment kernels that work with any array (regular or indexed)
@wp.kernel
def inc_1d(a: Any):
i = wp.tid()
a[i] = a[i] + 1
@wp.kernel
def inc_2d(a: Any):
i, j = wp.tid()
a[i, j] = a[i, j] + 1
@wp.kernel
def inc_3d(a: Any):
i, j, k = wp.tid()
a[i, j, k] = a[i, j, k] + 1
@wp.kernel
def inc_4d(a: Any):
i, j, k, l = wp.tid()
a[i, j, k, l] = a[i, j, k, l] + 1
# optional overloads to avoid module reloading
wp.overload(inc_1d, [wp.array1d(dtype=int)])
wp.overload(inc_2d, [wp.array2d(dtype=int)])
wp.overload(inc_3d, [wp.array3d(dtype=int)])
wp.overload(inc_4d, [wp.array4d(dtype=int)])
wp.overload(inc_1d, [wp.indexedarray1d(dtype=int)])
wp.overload(inc_2d, [wp.indexedarray2d(dtype=int)])
wp.overload(inc_3d, [wp.indexedarray3d(dtype=int)])
wp.overload(inc_4d, [wp.indexedarray4d(dtype=int)])
def test_indexedarray_generics(test, device):
with wp.ScopedDevice(device):
data1 = wp.zeros((5,), dtype=int)
data2 = wp.zeros((5, 5), dtype=int)
data3 = wp.zeros((5, 5, 5), dtype=int)
data4 = wp.zeros((5, 5, 5, 5), dtype=int)
indices = wp.array(data=[0, 4], dtype=int)
ia1 = wp.indexedarray(data1, [indices])
ia2 = wp.indexedarray(data2, [indices, indices])
ia3 = wp.indexedarray(data3, [indices, indices, indices])
ia4 = wp.indexedarray(data4, [indices, indices, indices, indices])
wp.launch(inc_1d, dim=data1.shape, inputs=[data1])
wp.launch(inc_2d, dim=data2.shape, inputs=[data2])
wp.launch(inc_3d, dim=data3.shape, inputs=[data3])
wp.launch(inc_4d, dim=data4.shape, inputs=[data4])
wp.launch(inc_1d, dim=ia1.shape, inputs=[ia1])
wp.launch(inc_2d, dim=ia2.shape, inputs=[ia2])
wp.launch(inc_3d, dim=ia3.shape, inputs=[ia3])
wp.launch(inc_4d, dim=ia4.shape, inputs=[ia4])
expected1 = np.ones(5, dtype=np.int32)
expected1[0] = 2
expected1[4] = 2
expected2 = np.ones((5, 5), dtype=np.int32)
expected2[0, 0] = 2
expected2[0, 4] = 2
expected2[4, 0] = 2
expected2[4, 4] = 2
expected3 = np.ones((5, 5, 5), dtype=np.int32)
expected3[0, 0, 0] = 2
expected3[0, 0, 4] = 2
expected3[0, 4, 0] = 2
expected3[0, 4, 4] = 2
expected3[4, 0, 0] = 2
expected3[4, 0, 4] = 2
expected3[4, 4, 0] = 2
expected3[4, 4, 4] = 2
expected4 = np.ones((5, 5, 5, 5), dtype=np.int32)
expected4[0, 0, 0, 0] = 2
expected4[0, 0, 0, 4] = 2
expected4[0, 0, 4, 0] = 2
expected4[0, 0, 4, 4] = 2
expected4[0, 4, 0, 0] = 2
expected4[0, 4, 0, 4] = 2
expected4[0, 4, 4, 0] = 2
expected4[0, 4, 4, 4] = 2
expected4[4, 0, 0, 0] = 2
expected4[4, 0, 0, 4] = 2
expected4[4, 0, 4, 0] = 2
expected4[4, 0, 4, 4] = 2
expected4[4, 4, 0, 0] = 2
expected4[4, 4, 0, 4] = 2
expected4[4, 4, 4, 0] = 2
expected4[4, 4, 4, 4] = 2
assert_np_equal(data1.numpy(), expected1)
assert_np_equal(data2.numpy(), expected2)
assert_np_equal(data3.numpy(), expected3)
assert_np_equal(data4.numpy(), expected4)
assert_np_equal(ia1.numpy(), np.full((2,), 2, dtype=np.int32))
assert_np_equal(ia2.numpy(), np.full((2, 2), 2, dtype=np.int32))
assert_np_equal(ia3.numpy(), np.full((2, 2, 2), 2, dtype=np.int32))
assert_np_equal(ia4.numpy(), np.full((2, 2, 2, 2), 2, dtype=np.int32))
def test_indexedarray_empty(test, device):
# Test whether common operations work with empty (zero-sized) indexed arrays
# without throwing exceptions.
def test_empty_ops(ndim, nrows, ncols, wptype, nptype):
data_shape = (1,) * ndim
dtype_shape = ()
if wptype in wp.types.scalar_types:
# scalar, vector, or matrix
if ncols > 0:
if nrows > 0:
wptype = wp.types.matrix((nrows, ncols), wptype)
else:
wptype = wp.types.vector(ncols, wptype)
dtype_shape = wptype._shape_
fill_value = wptype(42)
else:
# struct
fill_value = wptype()
# create a data array
data = wp.empty(data_shape, dtype=wptype, device=device, requires_grad=True)
# create a zero-sized array of indices
indices = wp.empty(0, dtype=int, device=device)
a = data[indices]
# we expect dim to be zero for the empty indexed array, unchanged otherwise
expected_shape = (0, *data_shape[1:])
test.assertEqual(a.size, 0)
test.assertEqual(a.shape, expected_shape)
# all of these methods should succeed with zero-sized arrays
a.zero_()
a.fill_(fill_value)
b = a.contiguous()
b = wp.empty_like(a)
b = wp.zeros_like(a)
b = wp.full_like(a, fill_value)
b = wp.clone(a)
wp.copy(a, b)
a.assign(b)
na = a.numpy()
test.assertEqual(na.size, 0)
test.assertEqual(na.shape, (*expected_shape, *dtype_shape))
test.assertEqual(na.dtype, nptype)
test.assertEqual(a.list(), [])
for ndim in range(1, 5):
# test with scalars, vectors, and matrices
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# scalars
test_empty_ops(ndim, 0, 0, wptype, nptype)
for ncols in [2, 3, 4, 5]:
# vectors
test_empty_ops(ndim, 0, ncols, wptype, nptype)
# square matrices
test_empty_ops(ndim, ncols, ncols, wptype, nptype)
# non-square matrices
test_empty_ops(ndim, 2, 3, wptype, nptype)
test_empty_ops(ndim, 3, 2, wptype, nptype)
test_empty_ops(ndim, 3, 4, wptype, nptype)
test_empty_ops(ndim, 4, 3, wptype, nptype)
# test with structs
test_empty_ops(ndim, 0, 0, FillStruct, FillStruct.numpy_dtype())
def test_indexedarray_fill_scalar(test, device):
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
data1 = wp.zeros(dim_x, dtype=wptype, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=wptype, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=wptype, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=wptype, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
# fill with int value
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value, dtype=nptype))
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value
fill_value = 13.37
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value, dtype=nptype))
# fill with Warp scalar value
fill_value = wptype(17)
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value.value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value.value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value.value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value.value, dtype=nptype))
def test_indexedarray_fill_vector(test, device):
# test filling a vector array with scalar or vector values (vec_type, list, or numpy array)
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# vector types
vector_types = [
wp.types.vector(2, wptype),
wp.types.vector(3, wptype),
wp.types.vector(4, wptype),
wp.types.vector(5, wptype),
]
for vec_type in vector_types:
vec_len = vec_type._length_
data1 = wp.zeros(dim_x, dtype=vec_type, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=vec_type, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=vec_type, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=vec_type, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, vec_len), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, vec_len), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, vec_len), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, vec_len), dtype=nptype))
# fill with int scalar
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, vec_len), fill_value, dtype=nptype))
# test zeroing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, vec_len), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, vec_len), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, vec_len), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, vec_len), dtype=nptype))
# vector values can be passed as a list, numpy array, or Warp vector instance
fill_list = [17, 42, 99, 101, 127][:vec_len]
fill_arr = np.array(fill_list, dtype=nptype)
fill_vec = vec_type(fill_list)
expected1 = np.tile(fill_arr, a1.size).reshape((*a1.shape, vec_len))
expected2 = np.tile(fill_arr, a2.size).reshape((*a2.shape, vec_len))
expected3 = np.tile(fill_arr, a3.size).reshape((*a3.shape, vec_len))
expected4 = np.tile(fill_arr, a4.size).reshape((*a4.shape, vec_len))
# fill with list of vector length
a1.fill_(fill_list)
a2.fill_(fill_list)
a3.fill_(fill_list)
a4.fill_(fill_list)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with numpy array of vector length
a1.fill_(fill_arr)
a2.fill_(fill_arr)
a3.fill_(fill_arr)
a4.fill_(fill_arr)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with vec instance
a1.fill_(fill_vec)
a2.fill_(fill_vec)
a3.fill_(fill_vec)
a4.fill_(fill_vec)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
if wptype in wp.types.float_types:
# fill with float scalar
fill_value = 13.37
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, vec_len), fill_value, dtype=nptype))
# fill with float list of vector length
fill_list = [-2.5, -1.25, 1.25, 2.5, 5.0][:vec_len]
a1.fill_(fill_list)
a2.fill_(fill_list)
a3.fill_(fill_list)
a4.fill_(fill_list)
expected1 = np.tile(np.array(fill_list, dtype=nptype), a1.size).reshape((*a1.shape, vec_len))
expected2 = np.tile(np.array(fill_list, dtype=nptype), a2.size).reshape((*a2.shape, vec_len))
expected3 = np.tile(np.array(fill_list, dtype=nptype), a3.size).reshape((*a3.shape, vec_len))
expected4 = np.tile(np.array(fill_list, dtype=nptype), a4.size).reshape((*a4.shape, vec_len))
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
def test_indexedarray_fill_matrix(test, device):
# test filling a matrix array with scalar or matrix values (mat_type, nested list, or 2d numpy array)
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# matrix types
matrix_types = [
# square matrices
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
# non-square matrices
wp.types.matrix((2, 3), wptype),
wp.types.matrix((3, 2), wptype),
wp.types.matrix((3, 4), wptype),
wp.types.matrix((4, 3), wptype),
]
for mat_type in matrix_types:
mat_len = mat_type._length_
mat_shape = mat_type._shape_
data1 = wp.zeros(dim_x, dtype=mat_type, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=mat_type, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=mat_type, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=mat_type, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, *mat_shape), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, *mat_shape), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, *mat_shape), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, *mat_shape), dtype=nptype))
# fill with scalar
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, *mat_shape), fill_value, dtype=nptype))
# test zeroing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, *mat_shape), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, *mat_shape), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, *mat_shape), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, *mat_shape), dtype=nptype))
# matrix values can be passed as a 1d numpy array, 2d numpy array, flat list, nested list, or Warp matrix instance
if wptype != wp.bool:
fill_arr1 = np.arange(mat_len, dtype=nptype)
else:
fill_arr1 = np.ones(mat_len, dtype=nptype)
fill_arr2 = fill_arr1.reshape(mat_shape)
fill_list1 = list(fill_arr1)
fill_list2 = [list(row) for row in fill_arr2]
fill_mat = mat_type(fill_arr1)
expected1 = np.tile(fill_arr1, a1.size).reshape((*a1.shape, *mat_shape))
expected2 = np.tile(fill_arr1, a2.size).reshape((*a2.shape, *mat_shape))
expected3 = np.tile(fill_arr1, a3.size).reshape((*a3.shape, *mat_shape))
expected4 = np.tile(fill_arr1, a4.size).reshape((*a4.shape, *mat_shape))
# fill with 1d numpy array
a1.fill_(fill_arr1)
a2.fill_(fill_arr1)
a3.fill_(fill_arr1)
a4.fill_(fill_arr1)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with 2d numpy array
a1.fill_(fill_arr2)
a2.fill_(fill_arr2)
a3.fill_(fill_arr2)
a4.fill_(fill_arr2)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with flat list
a1.fill_(fill_list1)
a2.fill_(fill_list1)
a3.fill_(fill_list1)
a4.fill_(fill_list1)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with nested list
a1.fill_(fill_list2)
a2.fill_(fill_list2)
a3.fill_(fill_list2)
a4.fill_(fill_list2)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with mat instance
a1.fill_(fill_mat)
a2.fill_(fill_mat)
a3.fill_(fill_mat)
a4.fill_(fill_mat)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
def test_indexedarray_fill_struct(test, device):
dim_x = 8
nptype = FillStruct.numpy_dtype()
data1 = wp.zeros(dim_x, dtype=FillStruct, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=FillStruct, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=FillStruct, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=FillStruct, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
s = FillStruct()
# fill with default struct value (should be all zeros)
a1.fill_(s)
a2.fill_(s)
a3.fill_(s)
a4.fill_(s)
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
# scalars
s.i1 = -17
s.i2 = 42
s.i4 = 99
s.i8 = 101
s.f2 = -1.25
s.f4 = 13.37
s.f8 = 0.125
# vectors
s.v2 = [21, 22]
s.v3 = [31, 32, 33]
s.v4 = [41, 42, 43, 44]
s.v5 = [51, 52, 53, 54, 55]
# matrices
s.m2 = [[61, 62]] * 2
s.m3 = [[71, 72, 73]] * 3
s.m4 = [[81, 82, 83, 84]] * 4
s.m5 = [[91, 92, 93, 94, 95]] * 5
# arrays
s.a1 = wp.zeros((2,) * 1, dtype=float, device=device)
s.a2 = wp.zeros((2,) * 2, dtype=float, device=device)
s.a3 = wp.zeros((2,) * 3, dtype=float, device=device)
s.a4 = wp.zeros((2,) * 4, dtype=float, device=device)
# fill with custom struct value
a1.fill_(s)
a2.fill_(s)
a3.fill_(s)
a4.fill_(s)
ns = s.numpy_value()
expected1 = np.empty(a1.shape, dtype=nptype)
expected2 = np.empty(a2.shape, dtype=nptype)
expected3 = np.empty(a3.shape, dtype=nptype)
expected4 = np.empty(a4.shape, dtype=nptype)
expected1.fill(ns)
expected2.fill(ns)
expected3.fill(ns)
expected4.fill(ns)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# test clearing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
devices = get_test_devices()
class TestIndexedArray(unittest.TestCase):
pass
add_function_test(TestIndexedArray, "test_indexedarray_1d", test_indexedarray_1d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_2d", test_indexedarray_2d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_3d", test_indexedarray_3d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_4d", test_indexedarray_4d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_mixed", test_indexedarray_mixed, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_shape", test_indexedarray_shape, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_getitem", test_indexedarray_getitem, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_slicing", test_indexedarray_slicing, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_generics", test_indexedarray_generics, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_empty", test_indexedarray_empty, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_scalar", test_indexedarray_fill_scalar, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_vector", test_indexedarray_fill_vector, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_matrix", test_indexedarray_fill_matrix, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_struct", test_indexedarray_fill_struct, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 40,800 | Python | 35.011474 | 126 | 0.579044 |
NVIDIA/warp/warp/tests/test_marching_cubes.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def make_field(field: wp.array3d(dtype=float), center: wp.vec3, radius: float):
i, j, k = wp.tid()
p = wp.vec3(float(i), float(j), float(k))
d = wp.length(p - center) - radius
field[i, j, k] = d
def test_marching_cubes(test, device):
dim = 64
max_verts = 10**6
max_tris = 10**6
field = wp.zeros(shape=(dim, dim, dim), dtype=float, device=device)
iso = wp.MarchingCubes(nx=dim, ny=dim, nz=dim, max_verts=max_verts, max_tris=max_tris, device=device)
radius = dim / 4.0
wp.launch(make_field, dim=field.shape, inputs=[field, wp.vec3(dim / 2, dim / 2, dim / 2), radius], device=device)
iso.surface(field=field, threshold=0.0)
# check that all returned vertices lie on the surface of the sphere
length = np.linalg.norm(iso.verts.numpy() - np.array([dim / 2, dim / 2, dim / 2]), axis=1)
error = np.abs(length - radius)
test.assertTrue(np.max(error) < 1.0)
iso.resize(nx=dim * 2, ny=dim * 2, nz=dim * 2, max_verts=max_verts, max_tris=max_tris)
devices = get_selected_cuda_test_devices()
class TestMarchingCubes(unittest.TestCase):
pass
add_function_test(TestMarchingCubes, "test_marching_cubes", test_marching_cubes, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,843 | Python | 27.8125 | 117 | 0.691264 |
NVIDIA/warp/warp/tests/test_devices.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
def test_devices_get_cuda_device_functions(test, device):
test.assertTrue(device.is_cuda)
test.assertTrue(wp.is_device_available(device))
device_ordinal = device.ordinal
current_device = wp.get_cuda_device(device_ordinal)
test.assertEqual(current_device, device)
current_device = wp.get_cuda_device() # No-ordinal version
test.assertTrue(wp.is_device_available(current_device))
if device == current_device:
test.assertEqual(device, "cuda")
else:
test.assertNotEqual(device, "cuda")
preferred_device = wp.get_preferred_device()
test.assertTrue(wp.is_device_available(preferred_device))
def test_devices_map_cuda_device(test, device):
with wp.ScopedDevice(device):
saved_alias = device.alias
# Map alias twice to check code path
wp.map_cuda_device("new_alias")
wp.map_cuda_device("new_alias")
wp.context.runtime.rename_device(device, saved_alias)
def test_devices_verify_cuda_device(test, device):
verify_cuda_saved = wp.config.verify_cuda
wp.config.verify_cuda = True
wp.context.runtime.verify_cuda_device(device)
wp.config.verify_cuda = verify_cuda_saved
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_devices_can_access_self(test, device):
test.assertTrue(device.can_access(device))
for warp_device in wp.get_devices():
device_str = str(warp_device)
if (device.is_cpu and warp_device.is_cuda) or (device.is_cuda and warp_device.is_cpu):
test.assertFalse(device.can_access(warp_device))
test.assertNotEqual(device, warp_device)
test.assertNotEqual(device, device_str)
devices = get_test_devices()
class TestDevices(unittest.TestCase):
def test_devices_unmap_imaginary_device(self):
with self.assertRaises(RuntimeError):
wp.unmap_cuda_device("imaginary_device:0")
add_function_test(
TestDevices,
"test_devices_get_cuda_device_functions",
test_devices_get_cuda_device_functions,
devices=get_selected_cuda_test_devices(),
)
add_function_test(
TestDevices, "test_devices_map_cuda_device", test_devices_map_cuda_device, devices=get_selected_cuda_test_devices()
)
add_function_test(TestDevices, "test_devices_verify_cuda_device", test_devices_verify_cuda_device, devices=devices)
add_function_test(TestDevices, "test_devices_can_access_self", test_devices_can_access_self, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 3,048 | Python | 32.877777 | 119 | 0.721457 |
NVIDIA/warp/warp/tests/test_utils.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import contextlib
import inspect
import io
import unittest
from warp.tests.unittest_utils import *
def test_array_scan(test, device):
rng = np.random.default_rng(123)
for dtype in (int, float):
if dtype == int:
values = rng.integers(-1e6, high=1e6, size=100000, dtype=dtype)
else:
values = rng.uniform(low=-1e6, high=1e6, size=100000)
expected = np.cumsum(values)
values = wp.array(values, dtype=dtype, device=device)
result_inc = wp.zeros_like(values)
result_exc = wp.zeros_like(values)
wp.utils.array_scan(values, result_inc, True)
wp.utils.array_scan(values, result_exc, False)
tolerance = 0 if dtype == int else 1e-3
result_inc = result_inc.numpy().squeeze()
result_exc = result_exc.numpy().squeeze()
error_inc = np.max(np.abs(result_inc - expected)) / abs(expected[-1])
error_exc = max(np.max(np.abs(result_exc[1:] - expected[:-1])), abs(result_exc[0])) / abs(expected[-2])
test.assertTrue(error_inc <= tolerance)
test.assertTrue(error_exc <= tolerance)
def test_array_scan_empty(test, device):
values = wp.array((), dtype=int, device=device)
result = wp.array((), dtype=int, device=device)
wp.utils.array_scan(values, result)
def test_array_scan_error_sizes_mismatch(test, device):
values = wp.zeros(123, dtype=int, device=device)
result = wp.zeros(234, dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Array storage sizes do not match$",
):
wp.utils.array_scan(values, result, True)
def test_array_scan_error_dtypes_mismatch(test, device):
values = wp.zeros(123, dtype=int, device=device)
result = wp.zeros(123, dtype=float, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Array data types do not match$",
):
wp.utils.array_scan(values, result, True)
def test_array_scan_error_unsupported_dtype(test, device):
values = wp.zeros(123, dtype=wp.vec3, device=device)
result = wp.zeros(123, dtype=wp.vec3, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Unsupported data type$",
):
wp.utils.array_scan(values, result, True)
def test_radix_sort_pairs(test, device):
keys = wp.array((7, 2, 8, 4, 1, 6, 5, 3, 0, 0, 0, 0, 0, 0, 0, 0), dtype=int, device=device)
values = wp.array((1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0), dtype=int, device=device)
wp.utils.radix_sort_pairs(keys, values, 8)
assert_np_equal(keys.numpy()[:8], np.array((1, 2, 3, 4, 5, 6, 7, 8)))
assert_np_equal(values.numpy()[:8], np.array((5, 2, 8, 4, 7, 6, 1, 3)))
def test_radix_sort_pairs_empty(test, device):
keys = wp.array((), dtype=int, device=device)
values = wp.array((), dtype=int, device=device)
wp.utils.radix_sort_pairs(keys, values, 0)
def test_radix_sort_pairs_error_insufficient_storage(test, device):
keys = wp.array((1, 2, 3), dtype=int, device=device)
values = wp.array((1, 2, 3), dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Array storage must be large enough to contain 2\*count elements$",
):
wp.utils.radix_sort_pairs(keys, values, 3)
def test_radix_sort_pairs_error_unsupported_dtype(test, device):
keys = wp.array((1.0, 2.0, 3.0), dtype=float, device=device)
values = wp.array((1.0, 2.0, 3.0), dtype=float, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Unsupported data type$",
):
wp.utils.radix_sort_pairs(keys, values, 1)
def test_array_sum(test, device):
for dtype in (wp.float32, wp.float64):
with test.subTest(dtype=dtype):
values = wp.array((1.0, 2.0, 3.0), dtype=dtype, device=device)
test.assertEqual(wp.utils.array_sum(values), 6.0)
values = wp.array((1.0, 2.0, 3.0), dtype=dtype, device=device)
result = wp.empty(shape=(1,), dtype=dtype, device=device)
wp.utils.array_sum(values, out=result)
test.assertEqual(result.numpy()[0], 6.0)
def test_array_sum_error_out_dtype_mismatch(test, device):
values = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
result = wp.empty(shape=(1,), dtype=wp.float64, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"out array should have type float32$",
):
wp.utils.array_sum(values, out=result)
def test_array_sum_error_out_shape_mismatch(test, device):
values = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
result = wp.empty(shape=(2,), dtype=wp.float32, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"out array should have shape \(1,\)$",
):
wp.utils.array_sum(values, out=result)
def test_array_sum_error_unsupported_dtype(test, device):
values = wp.array((1, 2, 3), dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Unsupported data type$",
):
wp.utils.array_sum(values)
def test_array_inner(test, device):
for dtype in (wp.float32, wp.float64):
a = wp.array((1.0, 2.0, 3.0), dtype=dtype, device=device)
b = wp.array((1.0, 2.0, 3.0), dtype=dtype, device=device)
test.assertEqual(wp.utils.array_inner(a, b), 14.0)
a = wp.array((1.0, 2.0, 3.0), dtype=dtype, device=device)
b = wp.array((1.0, 2.0, 3.0), dtype=dtype, device=device)
result = wp.empty(shape=(1,), dtype=dtype, device=device)
wp.utils.array_inner(a, b, out=result)
test.assertEqual(result.numpy()[0], 14.0)
def test_array_inner_error_sizes_mismatch(test, device):
a = wp.array((1.0, 2.0), dtype=wp.float32, device=device)
b = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Array storage sizes do not match$",
):
wp.utils.array_inner(a, b)
def test_array_inner_error_dtypes_mismatch(test, device):
a = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
b = wp.array((1.0, 2.0, 3.0), dtype=wp.float64, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Array data types do not match$",
):
wp.utils.array_inner(a, b)
def test_array_inner_error_out_dtype_mismatch(test, device):
a = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
b = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
result = wp.empty(shape=(1,), dtype=wp.float64, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"out array should have type float32$",
):
wp.utils.array_inner(a, b, result)
def test_array_inner_error_out_shape_mismatch(test, device):
a = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
b = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device=device)
result = wp.empty(shape=(2,), dtype=wp.float32, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"out array should have shape \(1,\)$",
):
wp.utils.array_inner(a, b, result)
def test_array_inner_error_unsupported_dtype(test, device):
a = wp.array((1, 2, 3), dtype=int, device=device)
b = wp.array((1, 2, 3), dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Unsupported data type$",
):
wp.utils.array_inner(a, b)
def test_array_cast(test, device):
values = wp.array((1, 2, 3), dtype=int, device=device)
result = wp.empty(3, dtype=float, device=device)
wp.utils.array_cast(values, result)
test.assertEqual(result.dtype, wp.float32)
test.assertEqual(result.shape, (3,))
assert_np_equal(result.numpy(), np.array((1.0, 2.0, 3.0), dtype=float))
values = wp.array((1, 2, 3, 4), dtype=int, device=device)
result = wp.empty((2, 2), dtype=float, device=device)
wp.utils.array_cast(values, result)
test.assertEqual(result.dtype, wp.float32)
test.assertEqual(result.shape, (2, 2))
assert_np_equal(result.numpy(), np.array(((1.0, 2.0), (3.0, 4.0)), dtype=float))
values = wp.array(((1, 2), (3, 4)), dtype=wp.vec2, device=device)
result = wp.zeros(2, dtype=float, device=device)
wp.utils.array_cast(values, result, count=1)
test.assertEqual(result.dtype, wp.float32)
test.assertEqual(result.shape, (2,))
assert_np_equal(result.numpy(), np.array((1.0, 2.0), dtype=float))
values = wp.array(((1, 2), (3, 4)), dtype=int, device=device)
result = wp.zeros((2, 2), dtype=int, device=device)
wp.utils.array_cast(values, result)
test.assertEqual(result.dtype, wp.int32)
test.assertEqual(result.shape, (2, 2))
assert_np_equal(result.numpy(), np.array(((1, 2), (3, 4)), dtype=int))
def test_array_cast_error_unsupported_partial_cast(test, device):
values = wp.array(((1, 2), (3, 4)), dtype=int, device=device)
result = wp.zeros((2, 2), dtype=float, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Partial cast is not supported for arrays with more than one dimension$",
):
wp.utils.array_cast(values, result, count=1)
devices = get_test_devices()
class TestUtils(unittest.TestCase):
def test_warn(self):
# Multiple warnings get printed out each time.
with contextlib.redirect_stdout(io.StringIO()) as f:
wp.utils.warn("hello, world!")
wp.utils.warn("hello, world!")
expected = "Warp UserWarning: hello, world!\n" "Warp UserWarning: hello, world!\n"
self.assertEqual(f.getvalue(), expected)
# Test verbose warnings
saved_verbosity = wp.config.verbose_warnings
try:
wp.config.verbose_warnings = True
with contextlib.redirect_stdout(io.StringIO()) as f:
frame_info = inspect.getframeinfo(inspect.currentframe())
wp.utils.warn("hello, world!")
wp.utils.warn("hello, world!")
expected = (
f"Warp UserWarning: hello, world! ({frame_info.filename}:{frame_info.lineno + 1})\n"
' wp.utils.warn("hello, world!")\n'
f"Warp UserWarning: hello, world! ({frame_info.filename}:{frame_info.lineno + 2})\n"
' wp.utils.warn("hello, world!")\n'
)
self.assertEqual(f.getvalue(), expected)
finally:
# make sure to restore warning verbosity
wp.config.verbose_warnings = saved_verbosity
# Multiple similar deprecation warnings get printed out only once.
with contextlib.redirect_stdout(io.StringIO()) as f:
wp.utils.warn("hello, world!", category=DeprecationWarning)
wp.utils.warn("hello, world!", category=DeprecationWarning)
expected = "Warp DeprecationWarning: hello, world!\n"
self.assertEqual(f.getvalue(), expected)
# Multiple different deprecation warnings get printed out each time.
with contextlib.redirect_stdout(io.StringIO()) as f:
wp.utils.warn("foo", category=DeprecationWarning)
wp.utils.warn("bar", category=DeprecationWarning)
expected = "Warp DeprecationWarning: foo\n" "Warp DeprecationWarning: bar\n"
self.assertEqual(f.getvalue(), expected)
def test_transform_expand(self):
t = (1.0, 2.0, 3.0, 4.0, 3.0, 2.0, 1.0)
self.assertEqual(
wp.utils.transform_expand(t),
wp.transformf(p=(1.0, 2.0, 3.0), q=(4.0, 3.0, 2.0, 1.0)),
)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_array_scan_error_devices_mismatch(self):
values = wp.zeros(123, dtype=int, device="cpu")
result = wp.zeros_like(values, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
wp.utils.array_scan(values, result, True)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_radix_sort_pairs_error_devices_mismatch(self):
keys = wp.array((1, 2, 3), dtype=int, device="cpu")
values = wp.array((1, 2, 3), dtype=int, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
wp.utils.radix_sort_pairs(keys, values, 1)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_array_inner_error_out_device_mismatch(self):
a = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device="cpu")
b = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device="cpu")
result = wp.empty(shape=(1,), dtype=wp.float32, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"out storage device should match values array$",
):
wp.utils.array_inner(a, b, result)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_array_sum_error_out_device_mismatch(self):
values = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device="cpu")
result = wp.empty(shape=(1,), dtype=wp.float32, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"out storage device should match values array$",
):
wp.utils.array_sum(values, out=result)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_array_inner_error_devices_mismatch(self):
a = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device="cpu")
b = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
wp.utils.array_inner(a, b)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_array_cast_error_devices_mismatch(self):
values = wp.array((1, 2, 3), dtype=int, device="cpu")
result = wp.empty(3, dtype=float, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
wp.utils.array_cast(values, result)
def test_mesh_adjacency(self):
triangles = (
(0, 3, 1),
(0, 2, 3),
)
adj = wp.utils.MeshAdjacency(triangles, len(triangles))
expected_edges = {
(0, 3): (0, 3, 1, 2, 0, 1),
(1, 3): (3, 1, 0, -1, 0, -1),
(0, 1): (1, 0, 3, -1, 0, -1),
(0, 2): (0, 2, 3, -1, 1, -1),
(2, 3): (2, 3, 0, -1, 1, -1),
}
edges = {k: (e.v0, e.v1, e.o0, e.o1, e.f0, e.f1) for k, e in adj.edges.items()}
self.assertDictEqual(edges, expected_edges)
def test_mesh_adjacency_error_manifold(self):
triangles = (
(0, 3, 1),
(0, 2, 3),
(3, 0, 1),
)
with contextlib.redirect_stdout(io.StringIO()) as f:
wp.utils.MeshAdjacency(triangles, len(triangles))
self.assertEqual(f.getvalue(), "Detected non-manifold edge\n")
def test_scoped_timer(self):
with contextlib.redirect_stdout(io.StringIO()) as f:
with wp.ScopedTimer("hello"):
pass
self.assertRegex(f.getvalue(), r"^hello took \d+\.\d+ ms$")
with contextlib.redirect_stdout(io.StringIO()) as f:
with wp.ScopedTimer("hello", detailed=True):
pass
self.assertRegex(f.getvalue(), r"^ 4 function calls in \d+\.\d+ seconds")
self.assertRegex(f.getvalue(), r"hello took \d+\.\d+ ms$")
add_function_test(TestUtils, "test_array_scan", test_array_scan, devices=devices)
add_function_test(TestUtils, "test_array_scan_empty", test_array_scan_empty, devices=devices)
add_function_test(
TestUtils, "test_array_scan_error_sizes_mismatch", test_array_scan_error_sizes_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_scan_error_dtypes_mismatch", test_array_scan_error_dtypes_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_scan_error_unsupported_dtype", test_array_scan_error_unsupported_dtype, devices=devices
)
add_function_test(TestUtils, "test_radix_sort_pairs", test_radix_sort_pairs, devices=devices)
add_function_test(TestUtils, "test_radix_sort_pairs_empty", test_radix_sort_pairs, devices=devices)
add_function_test(
TestUtils,
"test_radix_sort_pairs_error_insufficient_storage",
test_radix_sort_pairs_error_insufficient_storage,
devices=devices,
)
add_function_test(
TestUtils,
"test_radix_sort_pairs_error_unsupported_dtype",
test_radix_sort_pairs_error_unsupported_dtype,
devices=devices,
)
add_function_test(TestUtils, "test_array_sum", test_array_sum, devices=devices)
add_function_test(
TestUtils, "test_array_sum_error_out_dtype_mismatch", test_array_sum_error_out_dtype_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_sum_error_out_shape_mismatch", test_array_sum_error_out_shape_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_sum_error_unsupported_dtype", test_array_sum_error_unsupported_dtype, devices=devices
)
add_function_test(TestUtils, "test_array_inner", test_array_inner, devices=devices)
add_function_test(
TestUtils, "test_array_inner_error_sizes_mismatch", test_array_inner_error_sizes_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_inner_error_dtypes_mismatch", test_array_inner_error_dtypes_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_inner_error_out_dtype_mismatch", test_array_inner_error_out_dtype_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_inner_error_out_shape_mismatch", test_array_inner_error_out_shape_mismatch, devices=devices
)
add_function_test(
TestUtils, "test_array_inner_error_unsupported_dtype", test_array_inner_error_unsupported_dtype, devices=devices
)
add_function_test(TestUtils, "test_array_cast", test_array_cast, devices=devices)
add_function_test(
TestUtils,
"test_array_cast_error_unsupported_partial_cast",
test_array_cast_error_unsupported_partial_cast,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 18,801 | Python | 37.449898 | 118 | 0.633264 |
NVIDIA/warp/warp/tests/test_conditional.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_conditional_if_else():
a = 0.5
b = 2.0
if a > b:
c = 1.0
else:
c = -1.0
wp.expect_eq(c, -1.0)
@wp.kernel
def test_conditional_if_else_nested():
a = 1.0
b = 2.0
if a > b:
c = 3.0
d = 4.0
if c > d:
e = 1.0
else:
e = -1.0
else:
c = 6.0
d = 7.0
if c > d:
e = 2.0
else:
e = -2.0
wp.expect_eq(e, -2.0)
@wp.kernel
def test_boolean_and():
a = 1.0
b = 2.0
c = 1.0
if a > 0.0 and b > 0.0:
c = -1.0
wp.expect_eq(c, -1.0)
@wp.kernel
def test_boolean_or():
a = 1.0
b = 2.0
c = 1.0
if a > 0.0 and b > 0.0:
c = -1.0
wp.expect_eq(c, -1.0)
@wp.kernel
def test_boolean_compound():
a = 1.0
b = 2.0
c = 3.0
d = 1.0
if a > 0.0 and b > 0.0 or c > a:
d = -1.0
wp.expect_eq(d, -1.0)
@wp.kernel
def test_boolean_literal():
t = True
f = False
r = 1.0
if t == (not f):
r = -1.0
wp.expect_eq(r, -1.0)
@wp.kernel
def test_int_logical_not():
x = 0
if not 123:
x = 123
wp.expect_eq(x, 0)
@wp.kernel
def test_int_conditional_assign_overload():
if 123:
x = 123
if 234:
x = 234
wp.expect_eq(x, 234)
@wp.kernel
def test_bool_param_conditional(foo: bool):
if foo:
x = 123
wp.expect_eq(x, 123)
@wp.kernel
def test_conditional_chain_basic():
x = -1
if 0 < x < 1:
success = False
else:
success = True
wp.expect_eq(success, True)
@wp.kernel
def test_conditional_chain_empty_range():
x = -1
y = 4
if -2 <= x <= 10 <= y:
success = False
else:
success = True
wp.expect_eq(success, True)
@wp.kernel
def test_conditional_chain_faker():
x = -1
# Not actually a chained inequality
if (-2 < x) < (1 > 0):
success = False
else:
success = True
wp.expect_eq(success, True)
@wp.kernel
def test_conditional_chain_and():
x = -1
if (-2 < x < 0) and (-1 <= x <= -1):
success = True
else:
success = False
wp.expect_eq(success, True)
@wp.kernel
def test_conditional_chain_eqs():
x = wp.int32(10)
y = 10
z = -10
if x == y != z:
success = True
else:
success = False
wp.expect_eq(success, True)
@wp.kernel
def test_conditional_chain_mixed():
x = 0
if x < 10 == 1:
success = False
else:
success = True
wp.expect_eq(success, True)
def test_conditional_unequal_types(test: unittest.TestCase, device):
# The bad kernel must be in a separate module, otherwise the current module would fail to load
from warp.tests.aux_test_conditional_unequal_types_kernels import (
unequal_types_kernel,
)
with test.assertRaises(TypeError):
wp.launch(unequal_types_kernel, dim=(1,), inputs=[], device=device)
# remove all references to the bad module so that subsequent calls to wp.force_load()
# won't try to load it unless we explicitly re-import it again
del wp.context.user_modules["warp.tests.aux_test_conditional_unequal_types_kernels"]
del sys.modules["warp.tests.aux_test_conditional_unequal_types_kernels"]
devices = get_test_devices()
class TestConditional(unittest.TestCase):
pass
add_kernel_test(TestConditional, kernel=test_conditional_if_else, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_conditional_if_else_nested, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_boolean_and, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_boolean_or, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_boolean_compound, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_boolean_literal, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_int_logical_not, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_int_conditional_assign_overload, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_bool_param_conditional, dim=1, inputs=[True], devices=devices)
add_kernel_test(TestConditional, kernel=test_conditional_chain_basic, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_conditional_chain_empty_range, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_conditional_chain_faker, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_conditional_chain_and, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_conditional_chain_eqs, dim=1, devices=devices)
add_kernel_test(TestConditional, kernel=test_conditional_chain_mixed, dim=1, devices=devices)
add_function_test(TestConditional, "test_conditional_unequal_types", test_conditional_unequal_types, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,501 | Python | 21.457143 | 117 | 0.635703 |
NVIDIA/warp/warp/tests/unused_test_misc.py | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import warp as wp
@wp.kernel
def arange(out: wp.array(dtype=int)):
tid = wp.tid()
out[tid] = tid
device = "cuda:0"
cmds = []
n = 10
arrays = []
for _i in range(5):
arrays.append(wp.zeros(n, dtype=int, device=device))
# setup CUDA graph
wp.capture_begin()
# launch kernels and keep command object around
for i in range(5):
cmd = wp.launch(arange, dim=n, inputs=[arrays[i]], device=device, record_cmd=True)
cmds.append(cmd)
graph = wp.capture_end()
# ---------------------------------------
ref = np.arange(0, n, dtype=int)
wp.capture_launch(graph)
for i in range(5):
print(arrays[i].numpy())
# ---------------------------------------
n = 16
arrays = []
for _i in range(5):
arrays.append(wp.zeros(n, dtype=int, device=device))
# update graph params
for i in range(5):
cmd.set_dim(n)
cmd.set_param(arrays[i])
cmd.update_graph()
wp.capture_launch(graph)
wp.synchronize()
ref = np.arange(0, n, dtype=int)
for i in range(5):
print(arrays[i].numpy())
| 1,454 | Python | 19.785714 | 86 | 0.657497 |
NVIDIA/warp/warp/tests/test_noise.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def pnoise(
kernel_seed: int, W: int, px: int, py: int, noise_values: wp.array(dtype=float), pixel_values: wp.array(dtype=float)
):
tid = wp.tid()
state = wp.rand_init(kernel_seed)
x = (float(tid % W) + 0.5) * 0.2
y = (float(tid / W) + 0.5) * 0.2
p = wp.vec2(x, y)
n = wp.pnoise(state, p, px, py)
noise_values[tid] = n
g = ((n + 1.0) / 2.0) * 255.0
pixel_values[tid] = g
@wp.kernel
def curlnoise(kernel_seed: int, W: int, noise_coords: wp.array(dtype=wp.vec2), noise_vectors: wp.array(dtype=wp.vec2)):
tid = wp.tid()
state = wp.rand_init(kernel_seed)
x = (float(tid % W) + 0.5) * 0.2
y = (float(tid / W) + 0.5) * 0.2
p = wp.vec2(x, y)
v = wp.curlnoise(state, p)
noise_coords[tid] = p
noise_vectors[tid] = v
def test_pnoise(test, device):
# image dim
W = 256
H = 256
N = W * H
seed = 42
# periodic perlin noise test
px = 16
py = 16
noise_values = wp.zeros(N, dtype=float, device=device)
pixel_values = wp.zeros(N, dtype=float, device=device)
wp.launch(kernel=pnoise, dim=N, inputs=[seed, W, px, py, noise_values, pixel_values], outputs=[], device=device)
# Perlin theoretical range is [-0.5*sqrt(n), 0.5*sqrt(n)] for n dimensions
n = noise_values.numpy()
# max = np.max(n)
# min = np.min(n)
img = pixel_values.numpy()
img = np.reshape(img, (W, H))
### Figure viewing ###
# img = img.astype(np.uint8)
# imgplot = plt.imshow(img, 'gray')
# plt.savefig("pnoise_test.png")
### Generating pnoise_test_result_true.npy ###
# np.save(os.path.join(os.path.dirname(__file__), "assets/pnoise_golden.npy"), img)
### Golden image comparison ###
img_true = np.load(os.path.join(os.path.dirname(__file__), "assets/pnoise_golden.npy"))
test.assertTrue(img.shape == img_true.shape)
err = np.max(np.abs(img - img_true))
tolerance = 1.5e-3
test.assertTrue(err < tolerance, f"err is {err} which is >= {tolerance}")
def test_curlnoise(test, device):
# image dim
W = 128
H = 128
N = W * H
seed = 42
# curl noise test
quiver_coords_host = wp.zeros(N, dtype=wp.vec2, device="cpu")
quiver_coords = wp.zeros(N, dtype=wp.vec2, device=device)
quiver_arrows_host = wp.zeros(N, dtype=wp.vec2, device="cpu")
quiver_arrows = wp.zeros(N, dtype=wp.vec2, device=device)
wp.launch(kernel=curlnoise, dim=N, inputs=[seed, W, quiver_coords, quiver_arrows], outputs=[], device=device)
wp.copy(quiver_coords_host, quiver_coords)
wp.copy(quiver_arrows_host, quiver_arrows)
wp.synchronize()
xy_coords = quiver_coords_host.numpy()
uv_coords = quiver_arrows_host.numpy()
# normalize
norms = uv_coords[:, 0] * uv_coords[:, 0] + uv_coords[:, 1] * uv_coords[:, 1]
uv_coords = uv_coords / np.sqrt(np.max(norms))
X = xy_coords[:, 0]
Y = xy_coords[:, 1]
U = uv_coords[:, 0]
V = uv_coords[:, 1]
### Figure viewing ###
# fig, ax = plt.subplots(figsize=(25,25))
# ax.quiver(X, Y, U, V)
# ax.axis([0.0, 25.0, 0.0, 25.0])
# ax.set_aspect('equal')
# plt.savefig("curlnoise_test.png")
### Generating curlnoise_test_result_true.npy ###
result = np.stack((xy_coords, uv_coords))
# np.save(os.path.join(os.path.dirname(__file__), "assets/curlnoise_golden.npy"), result)
### Golden image comparison ###
result_true = np.load(os.path.join(os.path.dirname(__file__), "assets/curlnoise_golden.npy"))
test.assertTrue(result.shape, result_true.shape)
err = np.max(np.abs(result - result_true))
test.assertTrue(err < 1e-04)
@wp.kernel
def noise_loss_kernel(
kernel_seed: int,
query_positions: wp.array(dtype=wp.vec2),
noise_values: wp.array(dtype=float),
noise_loss: wp.array(dtype=float),
):
tid = wp.tid()
state = wp.rand_init(kernel_seed)
p = query_positions[tid]
n = wp.noise(state, p)
noise_values[tid] = n
wp.atomic_add(noise_loss, 0, n)
@wp.kernel
def noise_cd(kernel_seed: int, query_positions: wp.array(dtype=wp.vec2), gradients: wp.array(dtype=wp.vec2)):
tid = wp.tid()
state = wp.rand_init(kernel_seed)
p = query_positions[tid]
eps = 1.0e-3
pl = wp.vec2(p[0] - eps, p[1])
pr = wp.vec2(p[0] + eps, p[1])
pd = wp.vec2(p[0], p[1] - eps)
pu = wp.vec2(p[0], p[1] + eps)
nl = wp.noise(state, pl)
nr = wp.noise(state, pr)
nd = wp.noise(state, pd)
nu = wp.noise(state, pu)
gx = (nr - nl) / (2.0 * eps)
gy = (nu - nd) / (2.0 * eps)
gradients[tid] = wp.vec2(gx, gy)
def test_adj_noise(test, device):
# grid dim
N = 9
seed = 42
tape = wp.Tape()
positions = np.array(
[
[-0.1, -0.1],
[0.0, -0.1],
[0.1, -0.1],
[-0.1, 0.0],
[0.0, 0.0],
[0.1, 0.0],
[-0.1, 0.1],
[0.0, 0.1],
[0.1, 0.1],
]
)
with tape:
query_positions = wp.array(positions, dtype=wp.vec2, device=device, requires_grad=True)
noise_values = wp.zeros(N, dtype=float, device=device)
noise_loss = wp.zeros(n=1, dtype=float, device=device, requires_grad=True)
wp.launch(
kernel=noise_loss_kernel, dim=N, inputs=[seed, query_positions, noise_values, noise_loss], device=device
)
# analytic
tape.backward(loss=noise_loss)
analytic = tape.gradients[query_positions].numpy().reshape((3, 3, 2))
# central difference
gradients = wp.zeros(N, dtype=wp.vec2, device=device)
wp.launch(kernel=noise_cd, dim=N, inputs=[seed, query_positions, gradients], device=device)
gradients_host = gradients.numpy().reshape((3, 3, 2))
diff = analytic - gradients_host
result = np.sum(diff * diff, axis=2)
err = np.where(result > 1.0e-3, result, 0).sum()
test.assertTrue(err < 1.0e-8)
devices = get_test_devices()
class TestNoise(unittest.TestCase):
pass
add_function_test(TestNoise, "test_pnoise", test_pnoise, devices=devices)
add_function_test(TestNoise, "test_curlnoise", test_curlnoise, devices=devices)
add_function_test(TestNoise, "test_adj_noise", test_adj_noise, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,826 | Python | 26.865306 | 120 | 0.607237 |
NVIDIA/warp/warp/tests/test_transient_module.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import tempfile
import unittest
from importlib import util
import warp as wp
from warp.tests.unittest_utils import *
CODE = """# -*- coding: utf-8 -*-
import warp as wp
@wp.struct
class Data:
x: wp.array(dtype=int)
@wp.func
def increment(x: int):
# This shouldn't be picked up.
return x + 123
@wp.func
def increment(x: int):
return x + 1
@wp.kernel
def compute(data: Data):
data.x[0] = increment(data.x[0])
"""
def load_code_as_module(code, name):
file, file_path = tempfile.mkstemp(suffix=".py")
try:
with os.fdopen(file, "w") as f:
f.write(code)
spec = util.spec_from_file_location(name, file_path)
module = util.module_from_spec(spec)
spec.loader.exec_module(module)
finally:
os.remove(file_path)
return module
def test_transient_module(test, device):
module = load_code_as_module(CODE, "")
# Loading it a second time shouldn't be an issue.
module = load_code_as_module(CODE, "")
assert len(module.compute.module.structs) == 1
assert len(module.compute.module.functions) == 1
data = module.Data()
data.x = wp.array([123], dtype=int, device=device)
wp.set_module_options({"foo": "bar"}, module=module)
assert wp.get_module_options(module=module).get("foo") == "bar"
assert module.compute.module.options.get("foo") == "bar"
wp.launch(module.compute, dim=1, inputs=[data], device=device)
assert_np_equal(data.x.numpy(), np.array([124]))
devices = get_test_devices()
class TestTransientModule(unittest.TestCase):
pass
add_function_test(TestTransientModule, "test_transient_module", test_transient_module, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 2,212 | Python | 24.732558 | 103 | 0.686709 |
NVIDIA/warp/warp/tests/test_copy.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def mul_1d(a: wp.array1d(dtype=float), s: float):
i = wp.tid()
a[i] = a[i] * s
@wp.kernel
def mul_2d(a: wp.array2d(dtype=float), s: float):
i, j = wp.tid()
a[i, j] = a[i, j] * s
@wp.kernel
def mul_3d(a: wp.array3d(dtype=float), s: float):
i, j, k = wp.tid()
a[i, j, k] = a[i, j, k] * s
@wp.kernel
def mul_4d(a: wp.array4d(dtype=float), s: float):
i, j, k, l = wp.tid()
a[i, j, k, l] = a[i, j, k, l] * s
def test_copy_strided(test, device):
with wp.ScopedDevice(device):
np_data1 = np.arange(10, dtype=np.float32)
np_data2 = np.arange(100, dtype=np.float32).reshape((10, 10))
np_data3 = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
np_data4 = np.arange(10000, dtype=np.float32).reshape((10, 10, 10, 10))
wp_data1 = wp.array(data=np_data1, copy=True)
wp_data2 = wp.array(data=np_data2, copy=True)
wp_data3 = wp.array(data=np_data3, copy=True)
wp_data4 = wp.array(data=np_data4, copy=True)
expected1 = np_data1[1::2]
expected2 = np_data2[1::2, 1::2]
expected3 = np_data3[1::2, 1::2, 1::2]
expected4 = np_data4[1::2, 1::2, 1::2, 1::2]
a1 = wp_data1[1::2]
a2 = wp_data2[1::2, 1::2]
a3 = wp_data3[1::2, 1::2, 1::2]
a4 = wp_data4[1::2, 1::2, 1::2, 1::2]
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
b1 = wp.zeros_like(a1)
b2 = wp.zeros_like(a2)
b3 = wp.zeros_like(a3)
b4 = wp.zeros_like(a4)
test.assertFalse(a1.is_contiguous)
test.assertFalse(a2.is_contiguous)
test.assertFalse(a3.is_contiguous)
test.assertFalse(a4.is_contiguous)
test.assertTrue(b1.is_contiguous)
test.assertTrue(b2.is_contiguous)
test.assertTrue(b3.is_contiguous)
test.assertTrue(b4.is_contiguous)
# copy non-contiguous to contiguous
wp.copy(b1, a1)
wp.copy(b2, a2)
wp.copy(b3, a3)
wp.copy(b4, a4)
assert_np_equal(a1.numpy(), b1.numpy())
assert_np_equal(a2.numpy(), b2.numpy())
assert_np_equal(a3.numpy(), b3.numpy())
assert_np_equal(a4.numpy(), b4.numpy())
s = 2.0
wp.launch(mul_1d, dim=b1.shape, inputs=[b1, s])
wp.launch(mul_2d, dim=b2.shape, inputs=[b2, s])
wp.launch(mul_3d, dim=b3.shape, inputs=[b3, s])
wp.launch(mul_4d, dim=b4.shape, inputs=[b4, s])
# copy contiguous to non-contiguous
wp.copy(a1, b1)
wp.copy(a2, b2)
wp.copy(a3, b3)
wp.copy(a4, b4)
assert_np_equal(a1.numpy(), b1.numpy())
assert_np_equal(a2.numpy(), b2.numpy())
assert_np_equal(a3.numpy(), b3.numpy())
assert_np_equal(a4.numpy(), b4.numpy())
assert_np_equal(a1.numpy(), expected1 * s)
assert_np_equal(a2.numpy(), expected2 * s)
assert_np_equal(a3.numpy(), expected3 * s)
assert_np_equal(a4.numpy(), expected4 * s)
def test_copy_indexed(test, device):
with wp.ScopedDevice(device):
np_data1 = np.arange(10, dtype=np.float32)
np_data2 = np.arange(100, dtype=np.float32).reshape((10, 10))
np_data3 = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
np_data4 = np.arange(10000, dtype=np.float32).reshape((10, 10, 10, 10))
wp_data1 = wp.array(data=np_data1, copy=True)
wp_data2 = wp.array(data=np_data2, copy=True)
wp_data3 = wp.array(data=np_data3, copy=True)
wp_data4 = wp.array(data=np_data4, copy=True)
np_indices = np.array([1, 5, 8, 9])
wp_indices = wp.array(data=np_indices, dtype=wp.int32)
# Note: Indexing using multiple index arrays works differently
# in Numpy and Warp, so the syntax is different.
expected1 = np_data1[np_indices]
expected2 = np_data2[np_indices][:, np_indices]
expected3 = np_data3[np_indices][:, np_indices][:, :, np_indices]
expected4 = np_data4[np_indices][:, np_indices][:, :, np_indices][:, :, :, np_indices]
a1 = wp_data1[wp_indices]
a2 = wp_data2[wp_indices, wp_indices]
a3 = wp_data3[wp_indices, wp_indices, wp_indices]
a4 = wp_data4[wp_indices, wp_indices, wp_indices, wp_indices]
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
b1 = wp.zeros_like(a1)
b2 = wp.zeros_like(a2)
b3 = wp.zeros_like(a3)
b4 = wp.zeros_like(a4)
test.assertFalse(a1.is_contiguous)
test.assertFalse(a2.is_contiguous)
test.assertFalse(a3.is_contiguous)
test.assertFalse(a4.is_contiguous)
test.assertTrue(b1.is_contiguous)
test.assertTrue(b2.is_contiguous)
test.assertTrue(b3.is_contiguous)
test.assertTrue(b4.is_contiguous)
# copy non-contiguous to contiguous
wp.copy(b1, a1)
wp.copy(b2, a2)
wp.copy(b3, a3)
wp.copy(b4, a4)
assert_np_equal(a1.numpy(), b1.numpy())
assert_np_equal(a2.numpy(), b2.numpy())
assert_np_equal(a3.numpy(), b3.numpy())
assert_np_equal(a4.numpy(), b4.numpy())
s = 2.0
wp.launch(mul_1d, dim=b1.shape, inputs=[b1, s])
wp.launch(mul_2d, dim=b2.shape, inputs=[b2, s])
wp.launch(mul_3d, dim=b3.shape, inputs=[b3, s])
wp.launch(mul_4d, dim=b4.shape, inputs=[b4, s])
# copy contiguous to non-contiguous
wp.copy(a1, b1)
wp.copy(a2, b2)
wp.copy(a3, b3)
wp.copy(a4, b4)
assert_np_equal(a1.numpy(), b1.numpy())
assert_np_equal(a2.numpy(), b2.numpy())
assert_np_equal(a3.numpy(), b3.numpy())
assert_np_equal(a4.numpy(), b4.numpy())
assert_np_equal(a1.numpy(), expected1 * s)
assert_np_equal(a2.numpy(), expected2 * s)
assert_np_equal(a3.numpy(), expected3 * s)
assert_np_equal(a4.numpy(), expected4 * s)
def test_copy_adjoint(test, device):
state_in = wp.from_numpy(
np.array([1.0, 2.0, 3.0]).astype(np.float32), dtype=wp.float32, requires_grad=True, device=device
)
state_out = wp.zeros(state_in.shape, dtype=wp.float32, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.copy(state_out, state_in)
grads = {state_out: wp.from_numpy(np.array([1.0, 1.0, 1.0]).astype(np.float32), dtype=wp.float32, device=device)}
tape.backward(grads=grads)
assert_np_equal(state_in.grad.numpy(), np.array([1.0, 1.0, 1.0]).astype(np.float32))
devices = get_test_devices()
class TestCopy(unittest.TestCase):
pass
add_function_test(TestCopy, "test_copy_strided", test_copy_strided, devices=devices)
add_function_test(TestCopy, "test_copy_indexed", test_copy_indexed, devices=devices)
add_function_test(TestCopy, "test_copy_adjoint", test_copy_adjoint, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 7,709 | Python | 32.376623 | 117 | 0.601245 |
NVIDIA/warp/warp/tests/test_types.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from warp.tests.unittest_utils import *
def test_integers(test, device, dtype):
value = dtype(0)
test.assertIsInstance(bool(value), bool)
test.assertIsInstance(int(value), int)
test.assertIsInstance(float(value), float)
test.assertEqual(bool(value), False)
test.assertEqual(int(value), 0)
test.assertEqual(float(value), 0.0)
try:
ctypes.c_bool(value)
ctypes.c_int(value)
ctypes.c_float(value)
except Exception:
test.fail()
value = dtype(123)
test.assertIsInstance(bool(value), bool)
test.assertIsInstance(int(value), int)
test.assertIsInstance(float(value), float)
test.assertEqual(bool(value), True)
test.assertEqual(int(value), 123)
test.assertEqual(float(value), 123.0)
try:
ctypes.c_bool(value)
ctypes.c_int(value)
ctypes.c_float(value)
except Exception:
test.fail()
def test_floats(test, device, dtype):
value = dtype(0.0)
test.assertIsInstance(bool(value), bool)
test.assertIsInstance(int(value), int)
test.assertIsInstance(float(value), float)
test.assertEqual(bool(value), False)
test.assertEqual(int(value), 0)
test.assertEqual(float(value), 0.0)
try:
ctypes.c_bool(value)
ctypes.c_float(value)
except Exception:
test.fail()
value = dtype(1.25)
test.assertIsInstance(bool(value), bool)
test.assertIsInstance(int(value), int)
test.assertIsInstance(float(value), float)
test.assertEqual(bool(value), True)
test.assertEqual(int(value), 1)
test.assertEqual(float(value), 1.25)
try:
ctypes.c_bool(value)
ctypes.c_float(value)
except Exception:
test.fail()
def test_vector(test, device, dtype):
def make_scalar(x):
# Cast to the correct integer type to simulate wrapping.
if dtype in wp.types.int_types:
return dtype._type_(x).value
return x
def make_vec(*args):
if dtype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(dtype._type_(x).value for x in args)
return args
vec3_cls = wp.vec(3, dtype)
vec4_cls = wp.vec(4, dtype)
v = vec4_cls(1, 2, 3, 4)
test.assertEqual(v[0], make_scalar(1))
test.assertEqual(v.x, make_scalar(1))
test.assertEqual(v.y, make_scalar(2))
test.assertEqual(v.z, make_scalar(3))
test.assertEqual(v.w, make_scalar(4))
test.assertSequenceEqual(v[0:2], make_vec(1, 2))
test.assertSequenceEqual(v, make_vec(1, 2, 3, 4))
v[0] = -1
test.assertEqual(v[0], make_scalar(-1))
test.assertEqual(v.x, make_scalar(-1))
test.assertEqual(v.y, make_scalar(2))
test.assertEqual(v.z, make_scalar(3))
test.assertEqual(v.w, make_scalar(4))
test.assertSequenceEqual(v[0:2], make_vec(-1, 2))
test.assertSequenceEqual(v, make_vec(-1, 2, 3, 4))
v[1:3] = (-2, -3)
test.assertEqual(v[0], make_scalar(-1))
test.assertEqual(v.x, make_scalar(-1))
test.assertEqual(v.y, make_scalar(-2))
test.assertEqual(v.z, make_scalar(-3))
test.assertEqual(v.w, make_scalar(4))
test.assertSequenceEqual(v[0:2], make_vec(-1, -2))
test.assertSequenceEqual(v, make_vec(-1, -2, -3, 4))
v.x = 1
test.assertEqual(v[0], make_scalar(1))
test.assertEqual(v.x, make_scalar(1))
test.assertEqual(v.y, make_scalar(-2))
test.assertEqual(v.z, make_scalar(-3))
test.assertEqual(v.w, make_scalar(4))
test.assertSequenceEqual(v[0:2], make_vec(1, -2))
test.assertSequenceEqual(v, make_vec(1, -2, -3, 4))
v = vec3_cls(2, 4, 6)
test.assertSequenceEqual(+v, make_vec(2, 4, 6))
test.assertSequenceEqual(-v, make_vec(-2, -4, -6))
test.assertSequenceEqual(v + vec3_cls(1, 1, 1), make_vec(3, 5, 7))
test.assertSequenceEqual(v - vec3_cls(1, 1, 1), make_vec(1, 3, 5))
test.assertSequenceEqual(v * dtype(2), make_vec(4, 8, 12))
test.assertSequenceEqual(dtype(2) * v, make_vec(4, 8, 12))
test.assertSequenceEqual(v / dtype(2), make_vec(1, 2, 3))
test.assertSequenceEqual(dtype(12) / v, make_vec(6, 3, 2))
test.assertTrue(v != vec3_cls(1, 2, 3))
test.assertEqual(str(v), "[{}]".format(", ".join(str(x) for x in v)))
# Check added purely for coverage reasons but is this really a desired
# behaviour? Not allowing to define new attributes using systems like
# `__slots__` could help improving memory usage.
v.foo = 123
test.assertEqual(v.foo, 123)
devices = [x for x in get_test_devices() if x.is_cpu]
class TestTypes(unittest.TestCase):
def test_bool(self):
value = wp.bool(False)
self.assertIsInstance(bool(value), bool)
self.assertIsInstance(int(value), int)
self.assertIsInstance(float(value), float)
self.assertEqual(bool(value), False)
self.assertEqual(int(value), 0)
self.assertEqual(float(value), 0.0)
try:
ctypes.c_bool(value)
except Exception:
self.fail()
value = wp.bool(True)
self.assertIsInstance(bool(value), bool)
self.assertIsInstance(int(value), int)
self.assertIsInstance(float(value), float)
self.assertEqual(bool(value), True)
self.assertEqual(int(value), 1)
self.assertEqual(float(value), 1.0)
try:
ctypes.c_bool(value)
except Exception:
self.fail()
value = wp.bool(0.0)
self.assertIsInstance(bool(value), bool)
self.assertIsInstance(int(value), int)
self.assertIsInstance(float(value), float)
self.assertEqual(bool(value), False)
self.assertEqual(int(value), 0)
self.assertEqual(float(value), 0.0)
try:
ctypes.c_bool(value)
except Exception:
self.fail()
value = wp.bool(123)
self.assertIsInstance(bool(value), bool)
self.assertIsInstance(int(value), int)
self.assertIsInstance(float(value), float)
self.assertEqual(bool(value), True)
self.assertEqual(int(value), 1)
self.assertEqual(float(value), 1.0)
try:
ctypes.c_bool(value)
except Exception:
self.fail()
def test_constant(self):
const = wp.constant(123)
self.assertEqual(const, 123)
const = wp.constant(1.25)
self.assertEqual(const, 1.25)
const = wp.constant(True)
self.assertEqual(const, True)
const = wp.constant(wp.float16(1.25))
self.assertEqual(const.value, 1.25)
const = wp.constant(wp.int16(123))
self.assertEqual(const.value, 123)
const = wp.constant(wp.vec3i(1, 2, 3))
self.assertEqual(const, wp.vec3i(1, 2, 3))
def test_constant_error_invalid_type(self):
with self.assertRaisesRegex(RuntimeError, r"Invalid constant type: <class 'tuple'>$"):
wp.constant((1, 2, 3))
def test_vector_assign(self):
v = wp.vec3s()
v[0] = 1
v[1] = wp.int8(2)
v[2] = np.int8(3)
self.assertEqual(v, (1, 2, 3))
v = wp.vec3h()
v[0] = 1.0
v[1] = wp.float16(2.0)
v[2] = np.float16(3.0)
self.assertEqual(v, (1.0, 2.0, 3.0))
def test_vector_error_invalid_arg_count(self):
with self.assertRaisesRegex(
ValueError, r"Invalid number of arguments in vector constructor, expected 3 elements, got 2$"
):
wp.vec3(1, 2)
def test_vector_error_invalid_ptr(self):
with self.assertRaisesRegex(RuntimeError, r"NULL pointer exception"):
wp.vec3.from_ptr(0)
def test_vector_error_invalid_get_item_key(self):
v = wp.vec3(1, 2, 3)
with self.assertRaisesRegex(KeyError, r"Invalid key None, expected int or slice"):
v[None]
def test_vector_error_invalid_set_item_key(self):
v = wp.vec3(1, 2, 3)
with self.assertRaisesRegex(KeyError, r"Invalid key None, expected int or slice"):
v[None] = 0
def test_vector_error_invalid_set_item_value(self):
v1 = wp.vec3i(1, 2, 3)
v2 = wp.vec3h(1, 2, 3)
with self.assertRaisesRegex(TypeError, r"Expected to assign a `int32` value but got `str` instead"):
v1[0] = "123.0"
with self.assertRaisesRegex(
TypeError, r"Expected to assign a slice from a sequence of values but got `int` instead"
):
v1[:] = 123
with self.assertRaisesRegex(
TypeError, r"Expected to assign a slice from a sequence of `int32` values but got `vec3i` instead"
):
v1[:1] = (v1,)
with self.assertRaisesRegex(ValueError, r"Can only assign sequence of same size"):
v1[:1] = (1, 2)
with self.assertRaisesRegex(
TypeError, r"Expected to assign a slice from a sequence of `float16` values but got `vec3h` instead"
):
v2[:1] = (v2,)
def test_matrix(self):
for dtype in tuple(wp.types.float_types) + (float,):
def make_scalar(x, dtype=dtype):
# Cast to the correct integer type to simulate wrapping.
if dtype in wp.types.int_types:
return dtype._type_(x).value
return x
def make_vec(*args, dtype=dtype):
if dtype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(dtype._type_(x).value for x in args)
return args
def make_mat(*args, dtype=dtype):
if dtype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(tuple(dtype._type_(x).value for x in row) for row in args)
return args
mat22_cls = wp.mat((2, 2), dtype)
mat33_cls = wp.mat((3, 3), dtype)
vec2_cls = wp.vec(2, dtype)
m = mat33_cls(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
self.assertEqual(m[0][0], make_scalar(1))
self.assertEqual(m[0][1], make_scalar(2))
self.assertEqual(m[0][2], make_scalar(3))
self.assertEqual(m[1][0], make_scalar(4))
self.assertEqual(m[1][1], make_scalar(5))
self.assertEqual(m[1][2], make_scalar(6))
self.assertEqual(m[2][0], make_scalar(7))
self.assertEqual(m[2][1], make_scalar(8))
self.assertEqual(m[2][2], make_scalar(9))
self.assertEqual(m[0, 0], make_scalar(1))
self.assertEqual(m[0, 1], make_scalar(2))
self.assertEqual(m[0, 2], make_scalar(3))
self.assertEqual(m[1, 0], make_scalar(4))
self.assertEqual(m[1, 1], make_scalar(5))
self.assertEqual(m[1, 2], make_scalar(6))
self.assertEqual(m[2, 0], make_scalar(7))
self.assertEqual(m[2, 1], make_scalar(8))
self.assertEqual(m[2, 2], make_scalar(9))
self.assertSequenceEqual(m[0], make_vec(1, 2, 3))
self.assertSequenceEqual(m[1], make_vec(4, 5, 6))
self.assertSequenceEqual(m[2], make_vec(7, 8, 9))
self.assertSequenceEqual(m[0][1:3], make_vec(2, 3))
self.assertSequenceEqual(m[1][0:2], make_vec(4, 5))
self.assertSequenceEqual(m[2][0:3], make_vec(7, 8, 9))
# self.assertSequenceEqual(m[0, 1:3], make_vec(2, 3))
# self.assertSequenceEqual(m[1, 0:2], make_vec(4, 5))
# self.assertSequenceEqual(m[2, 0:3], make_vec(7, 8, 9))
self.assertSequenceEqual(m, make_mat((1, 2, 3), (4, 5, 6), (7, 8, 9)))
m[1, 0] = -4
self.assertEqual(m[0][0], make_scalar(1))
self.assertEqual(m[0][1], make_scalar(2))
self.assertEqual(m[0][2], make_scalar(3))
self.assertEqual(m[1][0], make_scalar(-4))
self.assertEqual(m[1][1], make_scalar(5))
self.assertEqual(m[1][2], make_scalar(6))
self.assertEqual(m[2][0], make_scalar(7))
self.assertEqual(m[2][1], make_scalar(8))
self.assertEqual(m[2][2], make_scalar(9))
self.assertEqual(m[0, 0], make_scalar(1))
self.assertEqual(m[0, 1], make_scalar(2))
self.assertEqual(m[0, 2], make_scalar(3))
self.assertEqual(m[1, 0], make_scalar(-4))
self.assertEqual(m[1, 1], make_scalar(5))
self.assertEqual(m[1, 2], make_scalar(6))
self.assertEqual(m[2, 0], make_scalar(7))
self.assertEqual(m[2, 1], make_scalar(8))
self.assertEqual(m[2, 2], make_scalar(9))
self.assertSequenceEqual(m[0], make_vec(1, 2, 3))
self.assertSequenceEqual(m[1], make_vec(-4, 5, 6))
self.assertSequenceEqual(m[2], make_vec(7, 8, 9))
self.assertSequenceEqual(m[0][1:3], make_vec(2, 3))
self.assertSequenceEqual(m[1][0:2], make_vec(-4, 5))
self.assertSequenceEqual(m[2][0:3], make_vec(7, 8, 9))
# self.assertSequenceEqual(m[0, 1:3], make_vec(2, 3))
# self.assertSequenceEqual(m[1, 0:2], make_vec(-4, 5))
# self.assertSequenceEqual(m[2, 0:3], make_vec(7, 8, 9))
self.assertSequenceEqual(m, make_mat((1, 2, 3), (-4, 5, 6), (7, 8, 9)))
m[2] = (-7, 8, -9)
self.assertEqual(m[0][0], make_scalar(1))
self.assertEqual(m[0][1], make_scalar(2))
self.assertEqual(m[0][2], make_scalar(3))
self.assertEqual(m[1][0], make_scalar(-4))
self.assertEqual(m[1][1], make_scalar(5))
self.assertEqual(m[1][2], make_scalar(6))
self.assertEqual(m[2][0], make_scalar(-7))
self.assertEqual(m[2][1], make_scalar(8))
self.assertEqual(m[2][2], make_scalar(-9))
self.assertEqual(m[0, 0], make_scalar(1))
self.assertEqual(m[0, 1], make_scalar(2))
self.assertEqual(m[0, 2], make_scalar(3))
self.assertEqual(m[1, 0], make_scalar(-4))
self.assertEqual(m[1, 1], make_scalar(5))
self.assertEqual(m[1, 2], make_scalar(6))
self.assertEqual(m[2, 0], make_scalar(-7))
self.assertEqual(m[2, 1], make_scalar(8))
self.assertEqual(m[2, 2], make_scalar(-9))
self.assertSequenceEqual(m[0], make_vec(1, 2, 3))
self.assertSequenceEqual(m[1], make_vec(-4, 5, 6))
self.assertSequenceEqual(m[2], make_vec(-7, 8, -9))
self.assertSequenceEqual(m[0][1:3], make_vec(2, 3))
self.assertSequenceEqual(m[1][0:2], make_vec(-4, 5))
self.assertSequenceEqual(m[2][0:3], make_vec(-7, 8, -9))
# self.assertSequenceEqual(m[0, 1:3], make_vec(2, 3))
# self.assertSequenceEqual(m[1, 0:2], make_vec(-4, 5))
# self.assertSequenceEqual(m[2, 0:3], make_vec(-7, 8, -9))
self.assertSequenceEqual(m, make_mat((1, 2, 3), (-4, 5, 6), (-7, 8, -9)))
m = mat22_cls(2, 4, 6, 8)
self.assertSequenceEqual(+m, make_mat((2, 4), (6, 8)))
self.assertSequenceEqual(-m, make_mat((-2, -4), (-6, -8)))
self.assertSequenceEqual(m + mat22_cls(1, 1, 1, 1), make_mat((3, 5), (7, 9)))
self.assertSequenceEqual(m - mat22_cls(1, 1, 1, 1), make_mat((1, 3), (5, 7)))
self.assertSequenceEqual(m * dtype(2), make_mat((4, 8), (12, 16)))
self.assertSequenceEqual(dtype(2) * m, make_mat((4, 8), (12, 16)))
self.assertSequenceEqual(m / dtype(2), make_mat((1, 2), (3, 4)))
self.assertSequenceEqual(dtype(24) / m, make_mat((12, 6), (4, 3)))
self.assertSequenceEqual(m * vec2_cls(1, 2), make_vec(10, 22))
self.assertSequenceEqual(m @ vec2_cls(1, 2), make_vec(10, 22))
self.assertSequenceEqual(vec2_cls(1, 2) * m, make_vec(14, 20))
self.assertSequenceEqual(vec2_cls(1, 2) @ m, make_vec(14, 20))
self.assertTrue(m != mat22_cls(1, 2, 3, 4))
self.assertEqual(
str(m),
"[{}]".format(",\n ".join("[{}]".format(", ".join(str(y) for y in m[x])) for x in range(m._shape_[0]))),
)
# Check added purely for coverage reasons but is this really a desired
# behaviour? Not allowing to define new attributes using systems like
# `__slots__` could help improving memory usage.
m.foo = 123
self.assertEqual(m.foo, 123)
def test_matrix_error_invalid_arg_count(self):
with self.assertRaisesRegex(
ValueError, r"Invalid number of arguments in matrix constructor, expected 4 elements, got 3$"
):
wp.mat22(1, 2, 3)
def test_matrix_error_invalid_row_count(self):
with self.assertRaisesRegex(
TypeError, r"Invalid argument in matrix constructor, expected row of length 2, got \(1, 2, 3\)$"
):
wp.mat22((1, 2, 3), (3, 4, 5))
def test_matrix_error_invalid_ptr(self):
with self.assertRaisesRegex(RuntimeError, r"NULL pointer exception"):
wp.mat22.from_ptr(0)
def test_matrix_error_invalid_set_row_index(self):
m = wp.mat22(1, 2, 3, 4)
with self.assertRaisesRegex(IndexError, r"Invalid row index$"):
m.set_row(2, (0, 0))
def test_matrix_error_invalid_get_item_key(self):
m = wp.mat22(1, 2, 3, 4)
with self.assertRaisesRegex(KeyError, r"Invalid key None, expected int or pair of ints"):
m[None]
def test_matrix_error_invalid_get_item_key_length(self):
m = wp.mat22(1, 2, 3, 4)
with self.assertRaisesRegex(KeyError, r"Invalid key, expected one or two indices, got 3"):
m[0, 1, 2]
def test_matrix_error_invalid_set_item_key(self):
m = wp.mat22(1, 2, 3, 4)
with self.assertRaisesRegex(KeyError, r"Invalid key None, expected int or pair of ints"):
m[None] = 0
def test_matrix_error_invalid_set_item_key_length(self):
m = wp.mat22(1, 2, 3, 4)
with self.assertRaisesRegex(KeyError, r"Invalid key, expected one or two indices, got 3"):
m[0, 1, 2] = (0, 0)
def test_matrix_error_invalid_set_item_value(self):
m = wp.mat22h(1, 2, 3, 4)
with self.assertRaisesRegex(TypeError, r"Expected to assign a `float16` value but got `str` instead"):
m[0, 0] = "123.0"
with self.assertRaisesRegex(TypeError, r"Expected to assign a `float16` value but got `str` instead"):
m[0][0] = "123.0"
with self.assertRaisesRegex(
TypeError, r"Expected to assign a slice from a sequence of values but got `int` instead"
):
m[0] = 123
with self.assertRaisesRegex(
TypeError, r"Expected to assign a slice from a sequence of `float16` values but got `mat22h` instead"
):
m[0] = (m,)
with self.assertRaisesRegex(
KeyError, r"Slices are not supported when indexing matrices using the `m\[start:end\]` notation"
):
m[:] = 123
with self.assertRaisesRegex(
KeyError, r"Slices are not supported when indexing matrices using the `m\[i, j\]` notation"
):
m[0, :1] = (123,)
with self.assertRaisesRegex(ValueError, r"Can only assign sequence of same size"):
m[0][:1] = (1, 2)
def test_dtype_from_numpy(self):
import numpy as np
def test_conversions(np_type, warp_type):
self.assertEqual(wp.dtype_from_numpy(np_type), warp_type)
self.assertEqual(wp.dtype_from_numpy(np.dtype(np_type)), warp_type)
test_conversions(np.float16, wp.float16)
test_conversions(np.float32, wp.float32)
test_conversions(np.float64, wp.float64)
test_conversions(np.int8, wp.int8)
test_conversions(np.int16, wp.int16)
test_conversions(np.int32, wp.int32)
test_conversions(np.int64, wp.int64)
test_conversions(np.uint8, wp.uint8)
test_conversions(np.uint16, wp.uint16)
test_conversions(np.uint32, wp.uint32)
test_conversions(np.uint64, wp.uint64)
test_conversions(np.bool_, wp.bool)
test_conversions(np.byte, wp.int8)
test_conversions(np.ubyte, wp.uint8)
def test_dtype_to_numpy(self):
import numpy as np
def test_conversions(warp_type, np_type):
self.assertEqual(wp.dtype_to_numpy(warp_type), np_type)
test_conversions(wp.float16, np.float16)
test_conversions(wp.float32, np.float32)
test_conversions(wp.float64, np.float64)
test_conversions(wp.int8, np.int8)
test_conversions(wp.int16, np.int16)
test_conversions(wp.int32, np.int32)
test_conversions(wp.int64, np.int64)
test_conversions(wp.uint8, np.uint8)
test_conversions(wp.uint16, np.uint16)
test_conversions(wp.uint32, np.uint32)
test_conversions(wp.uint64, np.uint64)
test_conversions(wp.bool, np.bool_)
for dtype in wp.types.int_types:
add_function_test(TestTypes, f"test_integers_{dtype.__name__}", test_integers, devices=devices, dtype=dtype)
for dtype in wp.types.float_types:
add_function_test(TestTypes, f"test_floats_{dtype.__name__}", test_floats, devices=devices, dtype=dtype)
for dtype in tuple(wp.types.scalar_types) + (int, float):
add_function_test(TestTypes, f"test_vector_{dtype.__name__}", test_vector, devices=devices, dtype=dtype)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 22,115 | Python | 38.848649 | 120 | 0.588605 |
NVIDIA/warp/warp/tests/test_compile_consts.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
import warp.tests.aux_test_compile_consts_dummy
from warp.tests.unittest_utils import *
LOCAL_ONE = wp.constant(1)
SQRT3_OVER_3 = wp.constant(0.57735026919)
UNIT_VEC = wp.constant(wp.vec3(SQRT3_OVER_3, SQRT3_OVER_3, SQRT3_OVER_3))
ONE_FP16 = wp.constant(wp.float16(1.0))
TEST_BOOL = wp.constant(True)
class Foobar:
ONE = wp.constant(1)
TWO = wp.constant(2)
@wp.kernel
def test_bool():
if TEST_BOOL:
expect_eq(1.0, 1.0)
else:
expect_eq(1.0, -1.0)
@wp.kernel
def test_int(a: int):
if Foobar.ONE > 0:
a = 123 + Foobar.TWO + warp.tests.aux_test_compile_consts_dummy.MINUS_ONE
else:
a = 456 + LOCAL_ONE
expect_eq(a, 124)
@wp.kernel
def test_float(x: float):
x = SQRT3_OVER_3
for i in range(3):
expect_eq(UNIT_VEC[i], x)
approx_one = wp.dot(UNIT_VEC, UNIT_VEC)
expect_near(approx_one, 1.0, 1e-6)
# test casting
expect_near(wp.float32(ONE_FP16), 1.0, 1e-6)
def test_closure_capture(test, device):
def make_closure_kernel(cst):
def closure_kernel_fn(expected: int):
wp.expect_eq(cst, expected)
return wp.Kernel(func=closure_kernel_fn)
one_closure = make_closure_kernel(Foobar.ONE)
two_closure = make_closure_kernel(Foobar.TWO)
wp.launch(one_closure, dim=(1), inputs=[1], device=device)
wp.launch(two_closure, dim=(1), inputs=[2], device=device)
def test_hash_global_capture(test, device):
"""Verifies that global variables are included in the module hash"""
a = 0
wp.launch(test_int, (1,), inputs=[a], device=device)
module_constants = wp.get_module(test_int.__module__).constants
# Ensure the expected constants and values are in the dictionary used in hashing
# Depending on what's been launched already, there might be additional constants present
test.assertEqual(module_constants["Foobar.ONE"], 1)
test.assertEqual(module_constants["Foobar.TWO"], 2)
test.assertEqual(module_constants["warp.tests.aux_test_compile_consts_dummy.MINUS_ONE"], -1)
test.assertEqual(module_constants["LOCAL_ONE"], 1)
def test_hash_redefine_kernel(test, device):
"""This test defines a second ``test_function`` so that the second launch returns the correct result."""
@wp.kernel
def test_function(data: wp.array(dtype=wp.float32)):
i = wp.tid()
data[i] = TEST_CONSTANT
TEST_CONSTANT = wp.constant(1.0)
test_array = wp.empty(1, dtype=wp.float32, device=device)
wp.launch(test_function, (1,), inputs=[test_array], device=device)
test.assertEqual(test_array.numpy()[0], 1.0)
module_hash_0 = wp.get_module(test_function.__module__).hash_module()
module_constants = wp.get_module(test_function.__module__).constants
test.assertEqual(module_constants["TEST_CONSTANT"], 1.0)
@wp.kernel
def test_function(data: wp.array(dtype=wp.float32)):
i = wp.tid()
data[i] = TEST_CONSTANT
TEST_CONSTANT = wp.constant(2.0)
wp.launch(test_function, (1,), inputs=[test_array], device=device)
test.assertEqual(test_array.numpy()[0], 2.0)
module_hash_1 = wp.get_module(test_function.__module__).hash_module()
module_constants = wp.get_module(test_function.__module__).constants
test.assertEqual(module_constants["TEST_CONSTANT"], 2.0)
test.assertNotEqual(module_hash_0, module_hash_1)
def test_hash_redefine_constant_only(test, device):
"""This test does not define a second ``test_function``, so the second launch does not invalidate the cache.
For now this is expected behavior, but we can verify that the content has is different.
"""
@wp.kernel
def test_function(data: wp.array(dtype=wp.float32)):
i = wp.tid()
data[i] = TEST_CONSTANT
TEST_CONSTANT = wp.constant(1.0)
test_array = wp.empty(1, dtype=wp.float32, device=device)
wp.launch(test_function, (1,), inputs=[test_array], device=device)
test.assertEqual(test_array.numpy()[0], 1.0)
module_hash_0 = wp.get_module(test_function.__module__).hash_module()
module_constants = wp.get_module(test_function.__module__).constants
test.assertEqual(module_constants["TEST_CONSTANT"], 1.0)
TEST_CONSTANT = wp.constant(2.0)
module_hash_1 = wp.get_module(test_function.__module__).hash_module(recompute_content_hash=True)
module_constants = wp.get_module(test_function.__module__).constants
test.assertEqual(module_constants["TEST_CONSTANT"], 2.0)
test.assertNotEqual(module_hash_0, module_hash_1, "Module hashes should be different if TEST_CONSTANT is changed.")
TEST_CONSTANT = wp.constant(1.0)
module_hash_2 = wp.get_module(test_function.__module__).hash_module(recompute_content_hash=True)
module_constants = wp.get_module(test_function.__module__).constants
test.assertEqual(module_constants["TEST_CONSTANT"], 1.0)
test.assertEqual(module_hash_0, module_hash_2, "Module hashes should be the same if TEST_CONSTANT is the same.")
def test_hash_shadowed_var(test, device):
"""Tests to ensure shadowed variables are not mistakenly added to the module hash"""
TEST_CONSTANT_SHADOW_0 = wp.constant(1.0)
TEST_CONSTANT_SHADOW_1 = wp.constant(1.0)
TEST_CONSTANT_SHADOW_2 = wp.constant(1.0)
@wp.kernel
def test_function(data: wp.array(dtype=wp.float32)):
i = wp.tid()
TEST_CONSTANT_SHADOW_0 = 2.0
TEST_CONSTANT_SHADOW_1, TEST_CONSTANT_SHADOW_2 = 4.0, 8.0
data[i] = TEST_CONSTANT_SHADOW_0 + TEST_CONSTANT_SHADOW_1 + TEST_CONSTANT_SHADOW_2
test_array = wp.empty(1, dtype=wp.float32, device=device)
wp.launch(test_function, (1,), inputs=[test_array], device=device)
test.assertEqual(test_array.numpy()[0], 14.0)
module_hash_0 = wp.get_module(test_function.__module__).hash_module()
module_constants = wp.get_module(test_function.__module__).constants
test.assertFalse("TEST_CONSTANT_SHADOW_0" in module_constants, "Constant should not be in dictionary.")
test.assertFalse("TEST_CONSTANT_SHADOW_1" in module_constants, "Constant should not be in dictionary.")
test.assertFalse("TEST_CONSTANT_SHADOW_2" in module_constants, "Constant should not be in dictionary.")
TEST_CONSTANT_SHADOW_0 = wp.constant(0.0)
TEST_CONSTANT_SHADOW_1 = wp.constant(0.0)
TEST_CONSTANT_SHADOW_2 = wp.constant(0.0)
module_hash_1 = wp.get_module(test_function.__module__).hash_module(recompute_content_hash=True)
test.assertEqual(module_hash_0, module_hash_1, "Module hashes should be the same since all constants are shadowed.")
class TestConstants(unittest.TestCase):
def test_constant_math(self):
# test doing math with python defined constants in *python* scope
twopi = wp.pi * 2.0
import math
self.assertEqual(twopi, math.pi * 2.0)
a = 0
x = 0.0
devices = get_test_devices()
add_kernel_test(TestConstants, test_bool, dim=1, inputs=[], devices=devices)
add_kernel_test(TestConstants, test_int, dim=1, inputs=[a], devices=devices)
add_kernel_test(TestConstants, test_float, dim=1, inputs=[x], devices=devices)
add_function_test(TestConstants, "test_closure_capture", test_closure_capture, devices=devices)
add_function_test(TestConstants, "test_hash_global_capture", test_hash_global_capture, devices=devices)
add_function_test(TestConstants, "test_hash_redefine_kernel", test_hash_redefine_kernel, devices=devices)
add_function_test(TestConstants, "test_hash_redefine_constant_only", test_hash_redefine_constant_only, devices=devices)
add_function_test(TestConstants, "test_hash_shadowed_var", test_hash_shadowed_var, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 8,147 | Python | 36.376147 | 120 | 0.696821 |
NVIDIA/warp/warp/tests/test_reload.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import importlib
import os
import unittest
import numpy as np
import warp as wp
# dummy modules used for testing reload with dependencies
import warp.tests.aux_test_dependent as test_dependent
import warp.tests.aux_test_reference as test_reference
import warp.tests.aux_test_reference_reference as test_reference_reference
# dummy module used for testing reload
import warp.tests.aux_test_square as test_square
from warp.tests.unittest_utils import *
def reload_module(module):
# Clearing the .pyc file associated with a module is a necessary workaround
# for `importlib.reload` to work as expected when run from within Kit.
cache_file = importlib.util.cache_from_source(module.__file__)
os.remove(cache_file)
importlib.reload(module)
def test_redefine(test, device):
# --------------------------------------------
# first pass
@wp.kernel
def basic(x: wp.array(dtype=float)):
tid = wp.tid()
x[tid] = float(tid) * 1.0
n = 32
x = wp.zeros(n, dtype=float, device=device)
wp.launch(kernel=basic, dim=n, inputs=[x], device=device)
# --------------------------------------------
# redefine kernel, should trigger a recompile
@wp.kernel
def basic(x: wp.array(dtype=float)):
tid = wp.tid()
x[tid] = float(tid) * 2.0
y = wp.zeros(n, dtype=float, device=device)
wp.launch(kernel=basic, dim=n, inputs=[y], device=device)
assert_np_equal(np.arange(0, n, 1), x.numpy())
assert_np_equal(np.arange(0, n, 1) * 2.0, y.numpy())
square_two = """import warp as wp
@wp.func
def sqr(x: float):
return x * x
@wp.kernel
def kern(expect: float):
wp.expect_eq(sqr(2.0), expect)
def run(expect, device):
wp.launch(kern, dim=1, inputs=[expect], device=device)
"""
square_four = """import warp as wp
@wp.func
def multiply(x: float):
return x * x
@wp.kernel
def kern(expect: float):
wp.expect_eq(multiply(4.0), expect)
def run(expect, device):
wp.launch(kern, dim=1, inputs=[expect], device=device)
"""
def test_reload(test, device):
# write out the module python and import it
f = open(os.path.abspath(os.path.join(os.path.dirname(__file__), "aux_test_square.py")), "w")
f.writelines(square_two)
f.flush()
f.close()
reload_module(test_square)
test_square.run(expect=4.0, device=device) # 2*2=4
f = open(os.path.abspath(os.path.join(os.path.dirname(__file__), "aux_test_square.py")), "w")
f.writelines(square_four)
f.flush()
f.close()
# reload module, this should trigger all of the funcs / kernels to be updated
reload_module(test_square)
test_square.run(expect=16.0, device=device) # 4*4 = 16
def test_reload_class(test, device):
def test_func():
import importlib as imp
import warp.tests.aux_test_class_kernel
from warp.tests.aux_test_class_kernel import ClassKernelTest
imp.reload(warp.tests.aux_test_class_kernel)
ctest = ClassKernelTest(device)
expected = np.zeros((10, 3, 3), dtype=np.float32)
expected[:] = np.eye(3)
assert_np_equal(expected, ctest.identities.numpy())
test_func()
test_func()
template_ref = """# This file is used to test reloading module references.
import warp as wp
import warp.tests.aux_test_reference_reference as refref
@wp.func
def magic():
return {} * refref.more_magic()
"""
template_refref = """# This file is used to test reloading module references.
import warp as wp
@wp.func
def more_magic():
return {}
"""
def test_reload_references(test, device):
path_ref = os.path.abspath(os.path.join(os.path.dirname(__file__), "aux_test_reference.py"))
path_refref = os.path.abspath(os.path.join(os.path.dirname(__file__), "aux_test_reference_reference.py"))
# rewrite both dependency modules and reload them
with open(path_ref, "w") as f:
f.writelines(template_ref.format(1.0))
importlib.reload(test_reference)
with open(path_refref, "w") as f:
f.writelines(template_refref.format(1.0))
importlib.reload(test_reference_reference)
test_dependent.run(expect=1.0, device=device) # 1 * 1 = 1
# rewrite and reload the first dependency module
with open(path_ref, "w") as f:
f.writelines(template_ref.format(2.0))
importlib.reload(test_reference)
test_dependent.run(expect=2.0, device=device) # 2 * 1 = 1
# rewrite and reload the second dependency module
with open(path_refref, "w") as f:
f.writelines(template_refref.format(2.0))
importlib.reload(test_reference_reference)
test_dependent.run(expect=4.0, device=device) # 2 * 2 = 4
devices = get_test_devices()
class TestReload(unittest.TestCase):
pass
add_function_test(TestReload, "test_redefine", test_redefine, devices=devices)
add_function_test(TestReload, "test_reload", test_reload, devices=devices)
add_function_test(TestReload, "test_reload_class", test_reload_class, devices=devices)
add_function_test(TestReload, "test_reload_references", test_reload_references, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 5,617 | Python | 26.009615 | 109 | 0.673135 |
NVIDIA/warp/warp/tests/test_spatial.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_float_types = [np.float32, np.float64, np.float16]
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
############################################################
def test_spatial_vector_constructors(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
spatial_vector = wp.types.vector(length=6, dtype=wptype)
def check_spatial_vector_component_constructor(
input: wp.array(dtype=wptype),
out: wp.array(dtype=wptype),
):
result = spatial_vector(input[0], input[1], input[2], input[3], input[4], input[5])
# multiply the output by 2 so we've got something to backpropagate:
out[0] = wptype(2) * result[0]
out[1] = wptype(2) * result[1]
out[2] = wptype(2) * result[2]
out[3] = wptype(2) * result[3]
out[4] = wptype(2) * result[4]
out[5] = wptype(2) * result[5]
def check_spatial_vector_vector_constructor(
input: wp.array(dtype=wptype),
out: wp.array(dtype=wptype),
):
result = spatial_vector(vec3(input[0], input[1], input[2]), vec3(input[3], input[4], input[5]))
# multiply the output by 2 so we've got something to backpropagate:
out[0] = wptype(2) * result[0]
out[1] = wptype(2) * result[1]
out[2] = wptype(2) * result[2]
out[3] = wptype(2) * result[3]
out[4] = wptype(2) * result[4]
out[5] = wptype(2) * result[5]
kernel = getkernel(check_spatial_vector_component_constructor, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
vec_kernel = getkernel(check_spatial_vector_vector_constructor, suffix=dtype.__name__)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=6).astype(dtype), requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=tol)
for i in range(len(input)):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
input = wp.array(rng.standard_normal(size=6).astype(dtype), requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(vec_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=tol)
for i in range(len(input)):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
def test_spatial_vector_indexing(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_vector = wp.types.vector(length=6, dtype=wptype)
def check_spatial_vector_indexing(
input: wp.array(dtype=spatial_vector),
out: wp.array(dtype=wptype),
):
inpt = input[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(6):
out[idx] = wptype(2) * inpt[i]
idx = idx + 1
kernel = getkernel(check_spatial_vector_indexing, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(
rng.standard_normal(size=(1, 6)).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device
)
outcmps = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcmps], device=device)
assert_np_equal(outcmps.numpy(), 2 * input.numpy().ravel(), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcmps], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcmps, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(6, dtype=dtype)
expectedresult[i] = 2
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult)
tape.zero()
def test_spatial_vector_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_vector = wp.types.vector(length=6, dtype=wptype)
def check_spatial_vector_scalar_mul(
s: wp.array(dtype=wptype),
q: wp.array(dtype=spatial_vector),
outcmps_l: wp.array(dtype=wptype),
outcmps_r: wp.array(dtype=wptype),
):
lresult = s[0] * q[0]
rresult = q[0] * s[0]
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(6):
outcmps_l[i] = wptype(2) * lresult[i]
outcmps_r[i] = wptype(2) * rresult[i]
kernel = getkernel(check_spatial_vector_scalar_mul, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=1).astype(dtype), requires_grad=True, device=device)
q = wp.array(
rng.standard_normal(size=(1, 6)).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device
)
outcmps_l = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
outcmps_r = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
outcmps_l,
outcmps_r,
],
device=device,
)
assert_np_equal(outcmps_l.numpy(), 2 * s.numpy()[0] * q.numpy(), tol=tol)
assert_np_equal(outcmps_r.numpy(), 2 * s.numpy()[0] * q.numpy(), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
# test left/right mul gradients:
for wrt in [outcmps_l, outcmps_r]:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[s, q], outputs=[outcmps_l, outcmps_r], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[wrt, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(6, dtype=dtype)
expectedresult[i] = 2 * s.numpy()[0]
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[s].numpy()[0], 2 * q.numpy()[0, i], tol=tol)
tape.zero()
def test_spatial_vector_add_sub(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_vector = wp.types.vector(length=6, dtype=wptype)
def check_spatial_vector_add_sub(
q: wp.array(dtype=spatial_vector),
v: wp.array(dtype=spatial_vector),
outputs_add: wp.array(dtype=wptype),
outputs_sub: wp.array(dtype=wptype),
):
addresult = q[0] + v[0]
subresult = q[0] - v[0]
for i in range(6):
outputs_add[i] = wptype(2) * addresult[i]
outputs_sub[i] = wptype(2) * subresult[i]
kernel = getkernel(check_spatial_vector_add_sub, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=6).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=6).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device)
outputs_add = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
outputs_sub = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
q,
v,
],
outputs=[outputs_add, outputs_sub],
device=device,
)
assert_np_equal(outputs_add.numpy(), 2 * (q.numpy() + v.numpy()), tol=tol)
assert_np_equal(outputs_sub.numpy(), 2 * (q.numpy() - v.numpy()), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
# test add gradients:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q, v], outputs=[outputs_add, outputs_sub], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_add, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(6, dtype=dtype)
expectedresult[i] = 2
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[v].numpy()[0], expectedresult, tol=tol)
tape.zero()
# test subtraction gradients:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q, v], outputs=[outputs_add, outputs_sub], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_sub, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(6, dtype=dtype)
expectedresult[i] = 2
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[v].numpy()[0], -expectedresult, tol=tol)
tape.zero()
def test_spatial_dot(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_vector = wp.types.vector(length=6, dtype=wptype)
def check_spatial_dot(
s: wp.array(dtype=spatial_vector),
v: wp.array(dtype=spatial_vector),
dot: wp.array(dtype=wptype),
):
dot[0] = wptype(2) * wp.spatial_dot(v[0], s[0])
kernel = getkernel(check_spatial_dot, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=6).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=6).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device)
dot = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
v,
],
outputs=[dot],
device=device,
)
assert_np_equal(dot.numpy()[0], 2.0 * (v.numpy() * s.numpy()).sum(), tol=tol)
tape.backward(loss=dot)
sgrads = tape.gradients[s].numpy()[0]
expected_grads = 2.0 * v.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v].numpy()[0]
expected_grads = 2.0 * s.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
def test_spatial_cross(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_vector = wp.types.vector(length=6, dtype=wptype)
def check_spatial_cross(
s: wp.array(dtype=spatial_vector),
v: wp.array(dtype=spatial_vector),
outputs: wp.array(dtype=wptype),
outputs_dual: wp.array(dtype=wptype),
outputs_wcrossw: wp.array(dtype=wptype),
outputs_vcrossw: wp.array(dtype=wptype),
outputs_wcrossv: wp.array(dtype=wptype),
outputs_vcrossv: wp.array(dtype=wptype),
):
c = wp.spatial_cross(s[0], v[0])
d = wp.spatial_cross_dual(s[0], v[0])
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(6):
outputs[i] = wptype(2) * c[i]
outputs_dual[i] = wptype(2) * d[i]
sw = wp.spatial_top(s[0])
sv = wp.spatial_bottom(s[0])
vw = wp.spatial_top(v[0])
vv = wp.spatial_bottom(v[0])
wcrossw = wp.cross(sw, vw)
vcrossw = wp.cross(sv, vw)
wcrossv = wp.cross(sw, vv)
vcrossv = wp.cross(sv, vv)
for i in range(3):
outputs_wcrossw[i] = wcrossw[i]
outputs_vcrossw[i] = vcrossw[i]
outputs_wcrossv[i] = wcrossv[i]
outputs_vcrossv[i] = vcrossv[i]
kernel = getkernel(check_spatial_cross, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=6).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=6).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device)
outputs = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
outputs_dual = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
outputs_wcrossw = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_vcrossw = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_wcrossv = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_vcrossv = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s,
v,
],
outputs=[outputs, outputs_dual, outputs_wcrossw, outputs_vcrossw, outputs_wcrossv, outputs_vcrossv],
device=device,
)
sw = s.numpy()[0, :3]
sv = s.numpy()[0, 3:]
vw = v.numpy()[0, :3]
vv = v.numpy()[0, 3:]
wcrossw = np.cross(sw, vw)
vcrossw = np.cross(sv, vw)
wcrossv = np.cross(sw, vv)
vcrossv = np.cross(sv, vv)
assert_np_equal(outputs.numpy()[:3], 2 * wcrossw, tol=tol)
assert_np_equal(outputs.numpy()[3:], 2 * (vcrossw + wcrossv), tol=tol)
assert_np_equal(outputs_dual.numpy()[:3], 2 * (wcrossw + vcrossv), tol=tol)
assert_np_equal(outputs_dual.numpy()[3:], 2 * wcrossv, tol=tol)
for i in range(3):
cmp_w = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_v = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_w_dual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_v_dual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_wcrossw = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_vcrossw = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_wcrossv = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_vcrossv = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
v,
],
outputs=[outputs, outputs_dual, outputs_wcrossw, outputs_vcrossw, outputs_wcrossv, outputs_vcrossv],
device=device,
)
# ith w and v vector components of spatial_cross:
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[cmp_w], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i + 3], outputs=[cmp_v], device=device)
# ith w and v vector components of spatial_cross_dual:
wp.launch(output_select_kernel, dim=1, inputs=[outputs_dual, i], outputs=[cmp_w_dual], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_dual, i + 3], outputs=[cmp_v_dual], device=device)
# ith vector components of some cross products:
wp.launch(output_select_kernel, dim=1, inputs=[outputs_wcrossw, i], outputs=[cmp_wcrossw], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_vcrossw, i], outputs=[cmp_vcrossw], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_wcrossv, i], outputs=[cmp_wcrossv], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_vcrossv, i], outputs=[cmp_vcrossv], device=device)
def getgrads(cmp, tape=tape):
tape.backward(loss=cmp)
sgrads = 1.0 * tape.gradients[s].numpy()
vgrads = 1.0 * tape.gradients[v].numpy()
tape.zero()
return sgrads, vgrads
dcmp_w_ds, dcmp_w_dv = getgrads(cmp_w)
dcmp_v_ds, dcmp_v_dv = getgrads(cmp_v)
dcmp_w_dual_ds, dcmp_w_dual_dv = getgrads(cmp_w_dual)
dcmp_v_dual_ds, dcmp_v_dual_dv = getgrads(cmp_v_dual)
dcmp_wcrossw_ds, dcmp_wcrossw_dv = getgrads(cmp_wcrossw)
dcmp_vcrossw_ds, dcmp_vcrossw_dv = getgrads(cmp_vcrossw)
dcmp_wcrossv_ds, dcmp_wcrossv_dv = getgrads(cmp_wcrossv)
dcmp_vcrossv_ds, dcmp_vcrossv_dv = getgrads(cmp_vcrossv)
assert_np_equal(dcmp_w_ds, 2 * dcmp_wcrossw_ds, tol=tol)
assert_np_equal(dcmp_w_dv, 2 * dcmp_wcrossw_dv, tol=tol)
assert_np_equal(dcmp_v_ds, 2 * (dcmp_vcrossw_ds + dcmp_wcrossv_ds), tol=tol)
assert_np_equal(dcmp_v_dv, 2 * (dcmp_vcrossw_dv + dcmp_wcrossv_dv), tol=tol)
assert_np_equal(dcmp_w_dual_ds, 2 * (dcmp_wcrossw_ds + dcmp_vcrossv_ds), tol=tol)
assert_np_equal(dcmp_w_dual_dv, 2 * (dcmp_wcrossw_dv + dcmp_vcrossv_dv), tol=tol)
assert_np_equal(dcmp_v_dual_ds, 2 * dcmp_wcrossv_ds, tol=tol)
assert_np_equal(dcmp_v_dual_dv, 2 * dcmp_wcrossv_dv, tol=tol)
def test_spatial_top_bottom(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_vector = wp.types.vector(length=6, dtype=wptype)
def check_spatial_top_bottom(
s: wp.array(dtype=spatial_vector),
outputs: wp.array(dtype=wptype),
):
top = wp.spatial_top(s[0])
bottom = wp.spatial_bottom(s[0])
outputs[0] = wptype(2) * top[0]
outputs[1] = wptype(2) * top[1]
outputs[2] = wptype(2) * top[2]
outputs[3] = wptype(2) * bottom[0]
outputs[4] = wptype(2) * bottom[1]
outputs[5] = wptype(2) * bottom[2]
kernel = getkernel(check_spatial_top_bottom, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=6).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device)
outputs = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s,
],
outputs=[outputs],
device=device,
)
assert_np_equal(outputs.numpy(), 2.0 * s.numpy(), tol=tol)
for i in range(6):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
],
outputs=[outputs],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(6)
expectedgrads[i] = 2
assert_np_equal(tape.gradients[s].numpy(), expectedgrads.reshape((1, 6)))
tape.zero()
def test_transform_constructors(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
transform = wp.types.transformation(dtype=wptype)
quat = wp.types.quaternion(dtype=wptype)
def check_transform_constructor(
input: wp.array(dtype=wptype),
out: wp.array(dtype=wptype),
):
result = transform(vec3(input[0], input[1], input[2]), quat(input[3], input[4], input[5], input[6]))
# multiply the output by 2 so we've got something to backpropagate:
out[0] = wptype(2) * result[0]
out[1] = wptype(2) * result[1]
out[2] = wptype(2) * result[2]
out[3] = wptype(2) * result[3]
out[4] = wptype(2) * result[4]
out[5] = wptype(2) * result[5]
out[6] = wptype(2) * result[6]
kernel = getkernel(check_transform_constructor, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
p = rng.standard_normal(size=3).astype(dtype)
q = rng.standard_normal(size=4).astype(dtype)
q /= np.linalg.norm(q)
input = wp.array(np.concatenate((p, q)), requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=tol)
for i in range(len(input)):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
def test_transform_indexing(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
transform = wp.types.transformation(dtype=wptype)
def check_transform_indexing(
input: wp.array(dtype=transform),
out: wp.array(dtype=wptype),
):
inpt = input[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(7):
out[idx] = wptype(2) * inpt[i]
idx = idx + 1
kernel = getkernel(check_transform_indexing, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=(1, 7)).astype(dtype), dtype=transform, requires_grad=True, device=device)
outcmps = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcmps], device=device)
assert_np_equal(outcmps.numpy(), 2 * input.numpy().ravel(), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(7):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcmps], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcmps, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(7, dtype=dtype)
expectedresult[i] = 2
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult)
tape.zero()
def test_transform_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
transform = wp.types.transformation(dtype=wptype)
def check_transform_scalar_mul(
s: wp.array(dtype=wptype),
q: wp.array(dtype=transform),
outcmps_l: wp.array(dtype=wptype),
outcmps_r: wp.array(dtype=wptype),
):
lresult = s[0] * q[0]
rresult = q[0] * s[0]
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(7):
outcmps_l[i] = wptype(2) * lresult[i]
outcmps_r[i] = wptype(2) * rresult[i]
kernel = getkernel(check_transform_scalar_mul, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=1).astype(dtype), requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 7)).astype(dtype), dtype=transform, requires_grad=True, device=device)
outcmps_l = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
outcmps_r = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
outcmps_l,
outcmps_r,
],
device=device,
)
assert_np_equal(outcmps_l.numpy(), 2 * s.numpy()[0] * q.numpy(), tol=tol)
assert_np_equal(outcmps_r.numpy(), 2 * s.numpy()[0] * q.numpy(), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(7):
# test left/right mul gradients:
for wrt in [outcmps_l, outcmps_r]:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[s, q], outputs=[outcmps_l, outcmps_r], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[wrt, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(7, dtype=dtype)
expectedresult[i] = 2 * s.numpy()[0]
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[s].numpy()[0], 2 * q.numpy()[0, i], tol=tol)
tape.zero()
def test_transform_add_sub(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
transform = wp.types.transformation(dtype=wptype)
def check_transform_add_sub(
q: wp.array(dtype=transform),
v: wp.array(dtype=transform),
outputs_add: wp.array(dtype=wptype),
outputs_sub: wp.array(dtype=wptype),
):
addresult = q[0] + v[0]
subresult = q[0] - v[0]
for i in range(7):
outputs_add[i] = wptype(2) * addresult[i]
outputs_sub[i] = wptype(2) * subresult[i]
kernel = getkernel(check_transform_add_sub, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=7).astype(dtype), dtype=transform, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=7).astype(dtype), dtype=transform, requires_grad=True, device=device)
outputs_add = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
outputs_sub = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
q,
v,
],
outputs=[outputs_add, outputs_sub],
device=device,
)
assert_np_equal(outputs_add.numpy(), 2 * (q.numpy() + v.numpy()), tol=tol)
assert_np_equal(outputs_sub.numpy(), 2 * (q.numpy() - v.numpy()), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(7):
# test add gradients:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q, v], outputs=[outputs_add, outputs_sub], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_add, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(7, dtype=dtype)
expectedresult[i] = 2
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[v].numpy()[0], expectedresult, tol=tol)
tape.zero()
# test subtraction gradients:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q, v], outputs=[outputs_add, outputs_sub], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_sub, i], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(7, dtype=dtype)
expectedresult[i] = 2
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[v].numpy()[0], -expectedresult, tol=tol)
tape.zero()
def test_transform_get_trans_rot(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
transform = wp.types.transformation(dtype=wptype)
def check_transform_get_trans_rot(
s: wp.array(dtype=transform),
outputs: wp.array(dtype=wptype),
):
trans = wp.transform_get_translation(s[0])
q = wp.transform_get_rotation(s[0])
outputs[0] = wptype(2) * trans[0]
outputs[1] = wptype(2) * trans[1]
outputs[2] = wptype(2) * trans[2]
outputs[3] = wptype(2) * q[0]
outputs[4] = wptype(2) * q[1]
outputs[5] = wptype(2) * q[2]
outputs[6] = wptype(2) * q[3]
kernel = getkernel(check_transform_get_trans_rot, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=7).astype(dtype), dtype=transform, requires_grad=True, device=device)
outputs = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s,
],
outputs=[outputs],
device=device,
)
assert_np_equal(outputs.numpy(), 2.0 * s.numpy(), tol=tol)
for i in range(7):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
],
outputs=[outputs],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(7)
expectedgrads[i] = 2
assert_np_equal(tape.gradients[s].numpy(), expectedgrads.reshape((1, 7)))
tape.zero()
def test_transform_multiply(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
transform = wp.types.transformation(dtype=wptype)
def check_transform_multiply(
a: wp.array(dtype=transform),
b: wp.array(dtype=transform),
outputs: wp.array(dtype=wptype),
outputs_fn: wp.array(dtype=wptype),
outputs_manual: wp.array(dtype=wptype),
):
result = a[0] * b[0]
result_fn = wp.transform_multiply(a[0], b[0])
# let's just work out the transform multiplication manually
# and compare value/gradients with that:
atrans = wp.transform_get_translation(a[0])
arot = wp.transform_get_rotation(a[0])
btrans = wp.transform_get_translation(b[0])
brot = wp.transform_get_rotation(b[0])
trans = wp.quat_rotate(arot, btrans) + atrans
rot = arot * brot
result_manual = transform(trans, rot)
for i in range(7):
outputs[i] = wptype(2) * result[i]
outputs_fn[i] = wptype(2) * result_fn[i]
outputs_manual[i] = wptype(2) * result_manual[i]
kernel = getkernel(check_transform_multiply, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = rng.standard_normal(size=7)
s = rng.standard_normal(size=7)
q[3:] /= np.linalg.norm(q[3:])
s[3:] /= np.linalg.norm(s[3:])
q = wp.array(q.astype(dtype), dtype=transform, requires_grad=True, device=device)
s = wp.array(s.astype(dtype), dtype=transform, requires_grad=True, device=device)
outputs = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
outputs_fn = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
outputs_manual = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
q,
s,
],
outputs=[outputs, outputs_fn, outputs_manual],
device=device,
)
assert_np_equal(outputs.numpy(), outputs_fn.numpy(), tol=tol)
assert_np_equal(outputs.numpy(), outputs_manual.numpy(), tol=tol)
for i in range(7):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_fn = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
s,
],
outputs=[outputs, outputs_fn, outputs_manual],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[cmp], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_fn, i], outputs=[cmp_fn], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_manual, i], outputs=[cmp_manual], device=device)
tape.backward(loss=cmp)
qgrads = 1.0 * tape.gradients[q].numpy()
sgrads = 1.0 * tape.gradients[s].numpy()
tape.zero()
tape.backward(loss=cmp_fn)
qgrads_fn = 1.0 * tape.gradients[q].numpy()
sgrads_fn = 1.0 * tape.gradients[s].numpy()
tape.zero()
tape.backward(loss=cmp_manual)
qgrads_manual = 1.0 * tape.gradients[q].numpy()
sgrads_manual = 1.0 * tape.gradients[s].numpy()
tape.zero()
assert_np_equal(qgrads, qgrads_fn, tol=tol)
assert_np_equal(sgrads, sgrads_fn, tol=tol)
assert_np_equal(qgrads, qgrads_manual, tol=tol)
assert_np_equal(sgrads, sgrads_manual, tol=tol)
def test_transform_inverse(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
transform = wp.types.transformation(dtype=wptype)
def check_transform_inverse(
a: wp.array(dtype=transform),
outputs: wp.array(dtype=wptype),
outputs_shouldbeidentity: wp.array(dtype=wptype),
outputs_manual: wp.array(dtype=wptype),
):
result = wp.transform_inverse(a[0])
idt = result * a[0]
# let's just work out the transform inverse manually
# and compare value/gradients with that:
atrans = wp.transform_get_translation(a[0])
arot = wp.transform_get_rotation(a[0])
rotinv = wp.quat_inverse(arot)
result_manual = transform(-wp.quat_rotate(rotinv, atrans), rotinv)
for i in range(7):
outputs[i] = wptype(2) * result[i]
outputs_shouldbeidentity[i] = wptype(2) * idt[i]
outputs_manual[i] = wptype(2) * result_manual[i]
kernel = getkernel(check_transform_inverse, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = rng.standard_normal(size=7)
s = rng.standard_normal(size=7)
q[3:] /= np.linalg.norm(q[3:])
s[3:] /= np.linalg.norm(s[3:])
q = wp.array(q.astype(dtype), dtype=transform, requires_grad=True, device=device)
outputs = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
outputs_shouldbeidentity = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
outputs_manual = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
q,
],
outputs=[outputs, outputs_shouldbeidentity, outputs_manual],
device=device,
)
# check inverse:
assert_np_equal(outputs_shouldbeidentity.numpy(), np.array([0, 0, 0, 0, 0, 0, 2]), tol=tol)
# same as manual result:
assert_np_equal(outputs.numpy(), outputs_manual.numpy(), tol=tol)
for i in range(7):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
],
outputs=[outputs, outputs_shouldbeidentity, outputs_manual],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[cmp], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_manual, i], outputs=[cmp_manual], device=device)
tape.backward(loss=cmp)
qgrads = 1.0 * tape.gradients[q].numpy()
tape.zero()
tape.backward(loss=cmp_manual)
qgrads_manual = 1.0 * tape.gradients[q].numpy()
tape.zero()
# check gradients against manual result:
assert_np_equal(qgrads, qgrads_manual, tol=tol)
def test_transform_point_vector(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
transform = wp.types.transformation(dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
def check_transform_point_vector(
t: wp.array(dtype=transform),
v: wp.array(dtype=vec3),
outputs_pt: wp.array(dtype=wptype),
outputs_pt_manual: wp.array(dtype=wptype),
outputs_vec: wp.array(dtype=wptype),
outputs_vec_manual: wp.array(dtype=wptype),
):
result_pt = wp.transform_point(t[0], v[0])
result_pt_manual = wp.transform_get_translation(t[0]) + wp.quat_rotate(wp.transform_get_rotation(t[0]), v[0])
result_vec = wp.transform_vector(t[0], v[0])
result_vec_manual = wp.quat_rotate(wp.transform_get_rotation(t[0]), v[0])
for i in range(3):
outputs_pt[i] = wptype(2) * result_pt[i]
outputs_pt_manual[i] = wptype(2) * result_pt_manual[i]
outputs_vec[i] = wptype(2) * result_vec[i]
outputs_vec_manual[i] = wptype(2) * result_vec_manual[i]
kernel = getkernel(check_transform_point_vector, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = rng.standard_normal(size=7)
q[3:] /= np.linalg.norm(q[3:])
t = wp.array(q.astype(dtype), dtype=transform, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=3), dtype=vec3, requires_grad=True, device=device)
outputs_pt = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_pt_manual = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_vec = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_vec_manual = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[t, v],
outputs=[outputs_pt, outputs_pt_manual, outputs_vec, outputs_vec_manual],
device=device,
)
# same as manual results:
assert_np_equal(outputs_pt.numpy(), outputs_pt_manual.numpy(), tol=tol)
assert_np_equal(outputs_vec.numpy(), outputs_vec_manual.numpy(), tol=tol)
for i in range(3):
cmp_pt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_pt_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_vec = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_vec_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[t, v],
outputs=[outputs_pt, outputs_pt_manual, outputs_vec, outputs_vec_manual],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_pt, i], outputs=[cmp_pt], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outputs_pt_manual, i], outputs=[cmp_pt_manual], device=device
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_vec, i], outputs=[cmp_vec], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outputs_vec_manual, i], outputs=[cmp_vec_manual], device=device
)
tape.backward(loss=cmp_pt)
tgrads_pt = 1.0 * tape.gradients[t].numpy()
vgrads_pt = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_pt_manual)
tgrads_pt_manual = 1.0 * tape.gradients[t].numpy()
vgrads_pt_manual = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_vec)
tgrads_vec = 1.0 * tape.gradients[t].numpy()
vgrads_vec = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_vec_manual)
tgrads_vec_manual = 1.0 * tape.gradients[t].numpy()
vgrads_vec_manual = 1.0 * tape.gradients[v].numpy()
tape.zero()
# check gradients against manual result:
assert_np_equal(tgrads_pt, tgrads_pt_manual, tol=tol)
assert_np_equal(vgrads_pt, vgrads_pt_manual, tol=tol)
assert_np_equal(tgrads_vec, tgrads_vec_manual, tol=tol)
assert_np_equal(vgrads_vec, vgrads_vec_manual, tol=tol)
def test_spatial_matrix_constructors(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_matrix = wp.types.matrix(shape=(6, 6), dtype=wptype)
def check_spatial_matrix_constructor(
input: wp.array(dtype=wptype),
out: wp.array(dtype=wptype),
):
# multiply the output by 2 so we've got something to backpropagate:
result0 = spatial_matrix(
input[0],
input[1],
input[2],
input[3],
input[4],
input[5],
input[6],
input[7],
input[8],
input[9],
input[10],
input[11],
input[12],
input[13],
input[14],
input[15],
input[16],
input[17],
input[18],
input[19],
input[20],
input[21],
input[22],
input[23],
input[24],
input[25],
input[26],
input[27],
input[28],
input[29],
input[30],
input[31],
input[32],
input[33],
input[34],
input[35],
)
result1 = spatial_matrix()
idx = 0
for i in range(6):
for j in range(6):
out[idx] = wptype(2) * result0[i, j]
idx = idx + 1
for i in range(6):
for j in range(6):
out[idx] = result1[i, j]
idx = idx + 1
kernel = getkernel(check_spatial_matrix_constructor, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=6 * 6).astype(dtype), requires_grad=True, device=device)
output = wp.zeros(2 * 6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy()[: 6 * 6], 2 * input.numpy(), tol=tol)
assert_np_equal(output.numpy()[6 * 6 :], np.zeros_like(input.numpy()), tol=tol)
for i in range(len(input)):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
break
def test_spatial_matrix_indexing(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_matrix = wp.types.matrix(shape=(6, 6), dtype=wptype)
def check_spatial_matrix_indexing(
input: wp.array(dtype=spatial_matrix),
out: wp.array(dtype=wptype),
):
inpt = input[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(6):
for j in range(6):
out[idx] = wptype(2) * inpt[i, j]
idx = idx + 1
kernel = getkernel(check_spatial_matrix_indexing, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
outcmps = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcmps], device=device)
assert_np_equal(outcmps.numpy(), 2 * input.numpy().ravel(), tol=tol)
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
for j in range(6):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcmps], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcmps, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros((6, 6), dtype=dtype)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult)
tape.zero()
idx = idx + 1
def test_spatial_matrix_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_matrix = wp.types.matrix(shape=(6, 6), dtype=wptype)
def check_spatial_matrix_scalar_mul(
s: wp.array(dtype=wptype),
q: wp.array(dtype=spatial_matrix),
outcmps_l: wp.array(dtype=wptype),
outcmps_r: wp.array(dtype=wptype),
):
lresult = s[0] * q[0]
rresult = q[0] * s[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(6):
for j in range(6):
outcmps_l[idx] = wptype(2) * lresult[i, j]
outcmps_r[idx] = wptype(2) * rresult[i, j]
idx = idx + 1
kernel = getkernel(check_spatial_matrix_scalar_mul, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=1).astype(dtype), requires_grad=True, device=device)
q = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
outcmps_l = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
outcmps_r = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
outcmps_l,
outcmps_r,
],
device=device,
)
assert_np_equal(outcmps_l.numpy(), 2 * s.numpy()[0] * q.numpy(), tol=tol)
assert_np_equal(outcmps_r.numpy(), 2 * s.numpy()[0] * q.numpy(), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for i in range(6):
for j in range(6):
# test left/right mul gradients:
for wrt in [outcmps_l, outcmps_r]:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[s, q], outputs=[outcmps_l, outcmps_r], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[wrt, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros((6, 6), dtype=dtype)
expectedresult[i, j] = 2 * s.numpy()[0]
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[s].numpy()[0], 2 * q.numpy()[0, i, j], tol=tol)
tape.zero()
idx = idx + 1
def test_spatial_matrix_add_sub(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_matrix = wp.types.matrix(shape=(6, 6), dtype=wptype)
def check_spatial_matrix_add_sub(
q: wp.array(dtype=spatial_matrix),
v: wp.array(dtype=spatial_matrix),
outputs_add: wp.array(dtype=wptype),
outputs_sub: wp.array(dtype=wptype),
):
addresult = q[0] + v[0]
subresult = q[0] - v[0]
idx = 0
for i in range(6):
for j in range(6):
outputs_add[idx] = wptype(2) * addresult[i, j]
outputs_sub[idx] = wptype(2) * subresult[i, j]
idx = idx + 1
kernel = getkernel(check_spatial_matrix_add_sub, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
v = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
outputs_add = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
outputs_sub = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
q,
v,
],
outputs=[outputs_add, outputs_sub],
device=device,
)
assert_np_equal(outputs_add.numpy(), 2 * (q.numpy() + v.numpy()), tol=tol)
assert_np_equal(outputs_sub.numpy(), 2 * (q.numpy() - v.numpy()), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for i in range(6):
for j in range(6):
# test add gradients:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q, v], outputs=[outputs_add, outputs_sub], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_add, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros((6, 6), dtype=dtype)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[v].numpy()[0], expectedresult, tol=tol)
tape.zero()
# test subtraction gradients:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q, v], outputs=[outputs_add, outputs_sub], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_sub, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros((6, 6), dtype=dtype)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[q].numpy()[0], expectedresult, tol=tol)
assert_np_equal(tape.gradients[v].numpy()[0], -expectedresult, tol=tol)
tape.zero()
idx = idx + 1
def test_spatial_matvec_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-2,
np.float32: 5.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_matrix = wp.types.matrix(shape=(6, 6), dtype=wptype)
spatial_vector = wp.types.vector(length=6, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_spatial_mat_vec_mul(
v: wp.array(dtype=spatial_vector),
m: wp.array(dtype=spatial_matrix),
outcomponents: wp.array(dtype=wptype),
):
result = m[0] * v[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(6):
outcomponents[idx] = wptype(2) * result[i]
idx = idx + 1
kernel = getkernel(check_spatial_mat_vec_mul, suffix=dtype.__name__)
if register_kernels:
return
v = wp.array(
rng.standard_normal(size=(1, 6)).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device
)
m = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
outcomponents = wp.zeros(6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v, m], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy(), 2 * np.matmul(m.numpy()[0], v.numpy()[0]), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v, m], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, i], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(tape.gradients[v].numpy()[0], 2 * m.numpy()[0, i, :], tol=tol)
expectedresult = np.zeros((6, 6), dtype=dtype)
expectedresult[i, :] = 2 * v.numpy()[0]
assert_np_equal(tape.gradients[m].numpy()[0], expectedresult, tol=tol)
tape.zero()
def test_spatial_matmat_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-2,
np.float32: 5.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_matrix = wp.types.matrix(shape=(6, 6), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_mat_mul(
v: wp.array(dtype=spatial_matrix),
m: wp.array(dtype=spatial_matrix),
outcomponents: wp.array(dtype=wptype),
):
result = m[0] * v[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(6):
for j in range(6):
outcomponents[idx] = wptype(2) * result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_mat_mul, suffix=dtype.__name__)
if register_kernels:
return
v = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
m = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
outcomponents = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v, m], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy(), 2 * np.matmul(m.numpy()[0], v.numpy()[0]), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for i in range(6):
for j in range(6):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v, m], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros((6, 6), dtype=dtype)
expected[:, j] = 2 * m.numpy()[0, i, :]
assert_np_equal(tape.gradients[v].numpy()[0], expected, tol=10 * tol)
expected = np.zeros((6, 6), dtype=dtype)
expected[i, :] = 2 * v.numpy()[0, :, j]
assert_np_equal(tape.gradients[m].numpy()[0], expected, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_spatial_mat_transpose(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_matrix = wp.types.matrix(shape=(6, 6), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_spatial_mat_transpose(
m: wp.array(dtype=spatial_matrix),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
mat = wptype(2) * wp.transpose(m[0])
idx = 0
for i in range(6):
for j in range(6):
outcomponents[idx] = mat[i, j]
idx = idx + 1
kernel = getkernel(check_spatial_mat_transpose, suffix=dtype.__name__)
if register_kernels:
return
m = wp.array(
rng.standard_normal(size=(1, 6, 6)).astype(dtype), dtype=spatial_matrix, requires_grad=True, device=device
)
outcomponents = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy(), 2 * m.numpy()[0].T, tol=tol)
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
for j in range(6):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros((6, 6), dtype=dtype)
expectedresult[j, i] = 2
assert_np_equal(tape.gradients[m].numpy()[0], expectedresult)
tape.zero()
idx = idx + 1
def test_spatial_outer_product(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
spatial_vector = wp.types.vector(length=6, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_spatial_outer_product(
s: wp.array(dtype=spatial_vector),
v: wp.array(dtype=spatial_vector),
outcomponents: wp.array(dtype=wptype),
):
mresult = wptype(2) * wp.outer(s[0], v[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(6):
for j in range(6):
outcomponents[idx] = mresult[i, j]
idx = idx + 1
kernel = getkernel(check_spatial_outer_product, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(
rng.standard_normal(size=(1, 6)).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device
)
v = wp.array(
rng.standard_normal(size=(1, 6)).astype(dtype), dtype=spatial_vector, requires_grad=True, device=device
)
outcomponents = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[s, v], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy(), 2 * s.numpy()[0, :, None] * v.numpy()[0, None, :], tol=tol)
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
for j in range(6):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
v,
],
outputs=[outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
# this component's gonna be s_i * v_j, so its s gradient is gonna be nozero
# at the ith component and its v gradient will be nonzero at the jth component:
expectedresult = np.zeros((6), dtype=dtype)
expectedresult[i] = 2 * v.numpy()[0, j]
assert_np_equal(tape.gradients[s].numpy()[0], expectedresult, tol=10 * tol)
expectedresult = np.zeros((6), dtype=dtype)
expectedresult[j] = 2 * s.numpy()[0, i]
assert_np_equal(tape.gradients[v].numpy()[0], expectedresult, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_spatial_adjoint(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat3 = wp.types.matrix(shape=(3, 3), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_spatial_adjoint(
R: wp.array(dtype=mat3),
S: wp.array(dtype=mat3),
outcomponents: wp.array(dtype=wptype),
):
mresult = wptype(2) * wp.spatial_adjoint(R[0], S[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(6):
for j in range(6):
outcomponents[idx] = mresult[i, j]
idx = idx + 1
kernel = getkernel(check_spatial_adjoint, suffix=dtype.__name__)
if register_kernels:
return
R = wp.array(rng.standard_normal(size=(1, 3, 3)).astype(dtype), dtype=mat3, requires_grad=True, device=device)
S = wp.array(rng.standard_normal(size=(1, 3, 3)).astype(dtype), dtype=mat3, requires_grad=True, device=device)
outcomponents = wp.zeros(6 * 6, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[R, S], outputs=[outcomponents], device=device)
result = outcomponents.numpy().reshape(6, 6)
expected = np.zeros_like(result)
expected[:3, :3] = R.numpy()
expected[3:, 3:] = R.numpy()
expected[3:, :3] = S.numpy()
assert_np_equal(result, 2 * expected, tol=tol)
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(6):
for j in range(6):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
R,
S,
],
outputs=[outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
# this component's gonna be s_i * v_j, so its s gradient is gonna be nozero
# at the ith component and its v gradient will be nonzero at the jth component:
expectedresult = np.zeros((3, 3), dtype=dtype)
if (i // 3 == 0 and j // 3 == 0) or (i // 3 == 1 and j // 3 == 1):
expectedresult[i % 3, j % 3] = 2
assert_np_equal(tape.gradients[R].numpy()[0], expectedresult, tol=10 * tol)
expectedresult = np.zeros((3, 3), dtype=dtype)
if i // 3 == 1 and j // 3 == 0:
expectedresult[i % 3, j % 3] = 2
assert_np_equal(tape.gradients[S].numpy()[0], expectedresult, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_transform_identity(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def transform_identity_test(output: wp.array(dtype=wptype)):
t = wp.transform_identity(dtype=wptype)
for i in range(7):
output[i] = t[i]
def transform_identity_test_default(output: wp.array(dtype=wp.float32)):
t = wp.transform_identity()
for i in range(7):
output[i] = t[i]
quat_identity_kernel = getkernel(transform_identity_test, suffix=dtype.__name__)
quat_identity_default_kernel = getkernel(transform_identity_test_default, suffix=np.float32.__name__)
if register_kernels:
return
output = wp.zeros(7, dtype=wptype, device=device)
wp.launch(quat_identity_kernel, dim=1, inputs=[], outputs=[output], device=device)
expected = np.zeros_like(output.numpy())
expected[-1] = 1
assert_np_equal(output.numpy(), expected)
# let's just test that it defaults to float32:
output = wp.zeros(7, dtype=wp.float32, device=device)
wp.launch(quat_identity_default_kernel, dim=1, inputs=[], outputs=[output], device=device)
expected = np.zeros_like(output.numpy())
expected[-1] = 1
assert_np_equal(output.numpy(), expected)
def test_transform_anon_type_instance(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def transform_create_test(input: wp.array(dtype=wptype), output: wp.array(dtype=wptype)):
t = wp.transformation(
wp.vector(input[0], input[1], input[2]), wp.quaternion(input[3], input[4], input[5], input[6])
)
for i in range(7):
output[i] = wptype(2) * t[i]
transform_create_kernel = getkernel(transform_create_test, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=7).astype(dtype), requires_grad=True, device=device)
output = wp.zeros(7, dtype=wptype, requires_grad=True, device=device)
wp.launch(transform_create_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy())
for i in range(len(input)):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(transform_create_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
devices = get_test_devices()
class TestSpatial(unittest.TestCase):
pass
for dtype in np_float_types:
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_vector_constructors_{dtype.__name__}",
test_spatial_vector_constructors,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_vector_indexing_{dtype.__name__}",
test_spatial_vector_indexing,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_vector_scalar_multiplication_{dtype.__name__}",
test_spatial_vector_scalar_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_vector_add_sub_{dtype.__name__}",
test_spatial_vector_add_sub,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial, f"test_spatial_dot_{dtype.__name__}", test_spatial_dot, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpatial, f"test_spatial_cross_{dtype.__name__}", test_spatial_cross, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_top_bottom_{dtype.__name__}",
test_spatial_top_bottom,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_constructors_{dtype.__name__}",
test_transform_constructors,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_anon_type_instance_{dtype.__name__}",
test_transform_anon_type_instance,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_identity_{dtype.__name__}",
test_transform_identity,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_indexing_{dtype.__name__}",
test_transform_indexing,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_get_trans_rot_{dtype.__name__}",
test_transform_get_trans_rot,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_multiply_{dtype.__name__}",
test_transform_multiply,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_inverse_{dtype.__name__}",
test_transform_inverse,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_point_vector_{dtype.__name__}",
test_transform_point_vector,
devices=devices,
dtype=dtype,
)
# are these two valid? They don't seem to be doing things you'd want to do,
# maybe they should be removed
add_function_test_register_kernel(
TestSpatial,
f"test_transform_scalar_multiplication_{dtype.__name__}",
test_transform_scalar_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_transform_add_sub_{dtype.__name__}",
test_transform_add_sub,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_matrix_constructors_{dtype.__name__}",
test_spatial_matrix_constructors,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_matrix_indexing_{dtype.__name__}",
test_spatial_matrix_indexing,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_matrix_scalar_multiplication_{dtype.__name__}",
test_spatial_matrix_scalar_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_matrix_add_sub_{dtype.__name__}",
test_spatial_matrix_add_sub,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_matvec_multiplication_{dtype.__name__}",
test_spatial_matvec_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_matmat_multiplication_{dtype.__name__}",
test_spatial_matmat_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial,
f"test_spatial_outer_product_{dtype.__name__}",
test_spatial_outer_product,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestSpatial, f"test_spatial_adjoint_{dtype.__name__}", test_spatial_adjoint, devices=devices, dtype=dtype
)
# \TODO: test spatial_mass and spatial_jacobian
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 76,441 | Python | 34.604099 | 120 | 0.591502 |
NVIDIA/warp/warp/tests/test_arithmetic.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_signed_int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.byte,
]
np_unsigned_int_types = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.ubyte,
]
np_int_types = np_signed_int_types + np_unsigned_int_types
np_float_types = [np.float16, np.float32, np.float64]
np_scalar_types = np_int_types + np_float_types
def randvals(rng, shape, dtype):
if dtype in np_float_types:
return rng.standard_normal(size=shape).astype(dtype)
elif dtype in [np.int8, np.uint8, np.byte, np.ubyte]:
return rng.integers(1, high=3, size=shape, dtype=dtype)
return rng.integers(1, high=5, size=shape, dtype=dtype)
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
def get_select_kernel2(dtype):
def output_select_kernel2_fn(
input: wp.array(dtype=dtype, ndim=2),
index0: int,
index1: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index0, index1]
return getkernel(output_select_kernel2_fn, suffix=dtype.__name__)
def test_arrays(test, device, dtype):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
arr_np = randvals(rng, (10, 5), dtype)
arr = wp.array(arr_np, dtype=wptype, requires_grad=True, device=device)
assert_np_equal(arr.numpy(), arr_np, tol=tol)
def test_unary_ops(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_unary(
inputs: wp.array(dtype=wptype, ndim=2),
outputs: wp.array(dtype=wptype, ndim=2),
):
for i in range(10):
i0 = inputs[0, i]
i1 = inputs[1, i]
i2 = inputs[2, i]
i3 = inputs[3, i]
i4 = inputs[4, i]
# multiply outputs by 2 so we've got something to backpropagate:
outputs[0, i] = wptype(2.0) * (+i0)
outputs[1, i] = wptype(2.0) * (-i1)
outputs[2, i] = wptype(2.0) * wp.sign(i2)
outputs[3, i] = wptype(2.0) * wp.abs(i3)
outputs[4, i] = wptype(2.0) * wp.step(i4)
kernel = getkernel(check_unary, suffix=dtype.__name__)
output_select_kernel = get_select_kernel2(wptype)
if register_kernels:
return
if dtype in np_float_types:
inputs = wp.array(
rng.standard_normal(size=(5, 10)).astype(dtype), dtype=wptype, requires_grad=True, device=device
)
else:
inputs = wp.array(
rng.integers(-2, high=3, size=(5, 10), dtype=dtype), dtype=wptype, requires_grad=True, device=device
)
outputs = wp.zeros_like(inputs)
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
assert_np_equal(outputs.numpy()[0], 2 * inputs.numpy()[0], tol=tol)
assert_np_equal(outputs.numpy()[1], -2 * inputs.numpy()[1], tol=tol)
expected = 2 * np.sign(inputs.numpy()[2])
expected[expected == 0] = 2
assert_np_equal(outputs.numpy()[2], expected, tol=tol)
assert_np_equal(outputs.numpy()[3], 2 * np.abs(inputs.numpy()[3]), tol=tol)
assert_np_equal(outputs.numpy()[4], 2 * (1 - np.heaviside(inputs.numpy()[4], 1)), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(10):
# grad of 2x:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 0, i], outputs=[out], device=device)
tape.backward(loss=out)
expected_grads = np.zeros_like(inputs.numpy())
expected_grads[0, i] = 2
assert_np_equal(tape.gradients[inputs].numpy(), expected_grads, tol=tol)
tape.zero()
# grad of -2x:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 1, i], outputs=[out], device=device)
tape.backward(loss=out)
expected_grads = np.zeros_like(inputs.numpy())
expected_grads[1, i] = -2
assert_np_equal(tape.gradients[inputs].numpy(), expected_grads, tol=tol)
tape.zero()
# grad of 2 * sign(x):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 2, i], outputs=[out], device=device)
tape.backward(loss=out)
expected_grads = np.zeros_like(inputs.numpy())
assert_np_equal(tape.gradients[inputs].numpy(), expected_grads, tol=tol)
tape.zero()
# grad of 2 * abs(x):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 3, i], outputs=[out], device=device)
tape.backward(loss=out)
expected_grads = np.zeros_like(inputs.numpy())
expected_grads[3, i] = 2 * np.sign(inputs.numpy()[3, i])
assert_np_equal(tape.gradients[inputs].numpy(), expected_grads, tol=tol)
tape.zero()
# grad of 2 * step(x):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 4, i], outputs=[out], device=device)
tape.backward(loss=out)
expected_grads = np.zeros_like(inputs.numpy())
assert_np_equal(tape.gradients[inputs].numpy(), expected_grads, tol=tol)
tape.zero()
def test_nonzero(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_nonzero(
inputs: wp.array(dtype=wptype),
outputs: wp.array(dtype=wptype),
):
for i in range(10):
i0 = inputs[i]
outputs[i] = wp.nonzero(i0)
kernel = getkernel(check_nonzero, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
inputs = wp.array(rng.integers(-2, high=3, size=10).astype(dtype), dtype=wptype, requires_grad=True, device=device)
outputs = wp.zeros_like(inputs)
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
assert_np_equal(outputs.numpy(), (inputs.numpy() != 0))
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(10):
# grad should just be zero:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[out], device=device)
tape.backward(loss=out)
expected_grads = np.zeros_like(inputs.numpy())
assert_np_equal(tape.gradients[inputs].numpy(), expected_grads, tol=tol)
tape.zero()
def test_binary_ops(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_binary_ops(
in1: wp.array(dtype=wptype, ndim=2),
in2: wp.array(dtype=wptype, ndim=2),
outputs: wp.array(dtype=wptype, ndim=2),
):
for i in range(10):
i0 = in1[0, i]
i1 = in1[1, i]
i2 = in1[2, i]
i3 = in1[3, i]
i4 = in1[4, i]
i5 = in1[5, i]
i6 = in1[6, i]
i7 = in1[7, i]
j0 = in2[0, i]
j1 = in2[1, i]
j2 = in2[2, i]
j3 = in2[3, i]
j4 = in2[4, i]
j5 = in2[5, i]
j6 = in2[6, i]
j7 = in2[7, i]
outputs[0, i] = wptype(2) * wp.mul(i0, j0)
outputs[1, i] = wptype(2) * wp.div(i1, j1)
outputs[2, i] = wptype(2) * wp.add(i2, j2)
outputs[3, i] = wptype(2) * wp.sub(i3, j3)
outputs[4, i] = wptype(2) * wp.mod(i4, j4)
outputs[5, i] = wptype(2) * wp.min(i5, j5)
outputs[6, i] = wptype(2) * wp.max(i6, j6)
outputs[7, i] = wptype(2) * wp.floordiv(i7, j7)
kernel = getkernel(check_binary_ops, suffix=dtype.__name__)
output_select_kernel = get_select_kernel2(wptype)
if register_kernels:
return
vals1 = randvals(rng, [8, 10], dtype)
if dtype in [np_unsigned_int_types]:
vals2 = vals1 + randvals(rng, [8, 10], dtype)
else:
vals2 = np.abs(randvals(rng, [8, 10], dtype))
in1 = wp.array(vals1, dtype=wptype, requires_grad=True, device=device)
in2 = wp.array(vals2, dtype=wptype, requires_grad=True, device=device)
outputs = wp.zeros_like(in1)
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
assert_np_equal(outputs.numpy()[0], 2 * in1.numpy()[0] * in2.numpy()[0], tol=tol)
if dtype in np_float_types:
assert_np_equal(outputs.numpy()[1], 2 * in1.numpy()[1] / (in2.numpy()[1]), tol=tol)
else:
assert_np_equal(outputs.numpy()[1], 2 * (in1.numpy()[1] // (in2.numpy()[1])), tol=tol)
assert_np_equal(outputs.numpy()[2], 2 * (in1.numpy()[2] + (in2.numpy()[2])), tol=tol)
assert_np_equal(outputs.numpy()[3], 2 * (in1.numpy()[3] - (in2.numpy()[3])), tol=tol)
# ...so this is actually the desired behaviour right? Looks like wp.mod doesn't behave like
# python's % operator or np.mod()...
assert_np_equal(
outputs.numpy()[4],
2
* (
(in1.numpy()[4])
- (in2.numpy()[4]) * np.sign(in1.numpy()[4]) * np.floor(np.abs(in1.numpy()[4]) / (in2.numpy()[4]))
),
tol=tol,
)
assert_np_equal(outputs.numpy()[5], 2 * np.minimum(in1.numpy()[5], in2.numpy()[5]), tol=tol)
assert_np_equal(outputs.numpy()[6], 2 * np.maximum(in1.numpy()[6], in2.numpy()[6]), tol=tol)
assert_np_equal(outputs.numpy()[7], 2 * np.floor_divide(in1.numpy()[7], in2.numpy()[7]), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(10):
# multiplication:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 0, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[0, i] = 2.0 * in2.numpy()[0, i]
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[0, i] = 2.0 * in1.numpy()[0, i]
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# division:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 1, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[1, i] = 2.0 / (in2.numpy()[1, i])
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
# y = x1/x2
# dy/dx2 = -x1/x2^2
expected[1, i] = (-2.0) * (in1.numpy()[1, i] / (in2.numpy()[1, i] ** 2))
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# addition:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 2, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[2, i] = 2.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[2, i] = 2.0
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# subtraction:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 3, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[3, i] = 2.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[3, i] = -2.0
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# modulus. unless at discontinuities,
# d/dx1( x1 % x2 ) == 1
# d/dx2( x1 % x2 ) == 0
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 4, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[4, i] = 2.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[4, i] = 0.0
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# min
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 5, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[5, i] = 2.0 if (in1.numpy()[5, i] < in2.numpy()[5, i]) else 0.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[5, i] = 2.0 if (in2.numpy()[5, i] < in1.numpy()[5, i]) else 0.0
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# max
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 6, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[6, i] = 2.0 if (in1.numpy()[6, i] > in2.numpy()[6, i]) else 0.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[6, i] = 2.0 if (in2.numpy()[6, i] > in1.numpy()[6, i]) else 0.0
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# floor_divide. Returns integers so gradient is zero
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 7, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
def test_special_funcs(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_special_funcs(
inputs: wp.array(dtype=wptype, ndim=2),
outputs: wp.array(dtype=wptype, ndim=2),
):
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(10):
outputs[0, i] = wptype(2) * wp.log(inputs[0, i])
outputs[1, i] = wptype(2) * wp.log2(inputs[1, i])
outputs[2, i] = wptype(2) * wp.log10(inputs[2, i])
outputs[3, i] = wptype(2) * wp.exp(inputs[3, i])
outputs[4, i] = wptype(2) * wp.atan(inputs[4, i])
outputs[5, i] = wptype(2) * wp.sin(inputs[5, i])
outputs[6, i] = wptype(2) * wp.cos(inputs[6, i])
outputs[7, i] = wptype(2) * wp.sqrt(inputs[7, i])
outputs[8, i] = wptype(2) * wp.tan(inputs[8, i])
outputs[9, i] = wptype(2) * wp.sinh(inputs[9, i])
outputs[10, i] = wptype(2) * wp.cosh(inputs[10, i])
outputs[11, i] = wptype(2) * wp.tanh(inputs[11, i])
outputs[12, i] = wptype(2) * wp.acos(inputs[12, i])
outputs[13, i] = wptype(2) * wp.asin(inputs[13, i])
outputs[14, i] = wptype(2) * wp.cbrt(inputs[14, i])
kernel = getkernel(check_special_funcs, suffix=dtype.__name__)
output_select_kernel = get_select_kernel2(wptype)
if register_kernels:
return
invals = rng.normal(size=(15, 10)).astype(dtype)
invals[[0, 1, 2, 7, 14]] = 0.1 + np.abs(invals[[0, 1, 2, 7, 14]])
invals[12] = np.clip(invals[12], -0.9, 0.9)
invals[13] = np.clip(invals[13], -0.9, 0.9)
inputs = wp.array(invals, dtype=wptype, requires_grad=True, device=device)
outputs = wp.zeros_like(inputs)
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
assert_np_equal(outputs.numpy()[0], 2 * np.log(inputs.numpy()[0]), tol=tol)
assert_np_equal(outputs.numpy()[1], 2 * np.log2(inputs.numpy()[1]), tol=tol)
assert_np_equal(outputs.numpy()[2], 2 * np.log10(inputs.numpy()[2]), tol=tol)
assert_np_equal(outputs.numpy()[3], 2 * np.exp(inputs.numpy()[3]), tol=tol)
assert_np_equal(outputs.numpy()[4], 2 * np.arctan(inputs.numpy()[4]), tol=tol)
assert_np_equal(outputs.numpy()[5], 2 * np.sin(inputs.numpy()[5]), tol=tol)
assert_np_equal(outputs.numpy()[6], 2 * np.cos(inputs.numpy()[6]), tol=tol)
assert_np_equal(outputs.numpy()[7], 2 * np.sqrt(inputs.numpy()[7]), tol=tol)
assert_np_equal(outputs.numpy()[8], 2 * np.tan(inputs.numpy()[8]), tol=tol)
assert_np_equal(outputs.numpy()[9], 2 * np.sinh(inputs.numpy()[9]), tol=tol)
assert_np_equal(outputs.numpy()[10], 2 * np.cosh(inputs.numpy()[10]), tol=tol)
assert_np_equal(outputs.numpy()[11], 2 * np.tanh(inputs.numpy()[11]), tol=tol)
assert_np_equal(outputs.numpy()[12], 2 * np.arccos(inputs.numpy()[12]), tol=tol)
assert_np_equal(outputs.numpy()[13], 2 * np.arcsin(inputs.numpy()[13]), tol=tol)
assert_np_equal(outputs.numpy()[14], 2 * np.cbrt(inputs.numpy()[14]), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(10):
# log:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 0, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[0, i] = 2.0 / inputs.numpy()[0, i]
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# log2:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 1, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[1, i] = 2.0 / (inputs.numpy()[1, i] * np.log(2.0))
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# log10:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 2, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[2, i] = 2.0 / (inputs.numpy()[2, i] * np.log(10.0))
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# exp:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 3, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[3, i] = outputs.numpy()[3, i]
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# arctan:
# looks like the autodiff formula in warp was wrong? Was (1 + x^2) rather than
# 1/(1 + x^2)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 4, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[4, i] = 2.0 / (inputs.numpy()[4, i] ** 2 + 1)
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# sin:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 5, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[5, i] = np.cos(inputs.numpy()[5, i]) * 2
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# cos:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 6, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[6, i] = -np.sin(inputs.numpy()[6, i]) * 2.0
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# sqrt:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 7, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[7, i] = 1.0 / (np.sqrt(inputs.numpy()[7, i]))
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# tan:
# looks like there was a bug in autodiff formula here too - gradient was zero if cos(x) > 0
# (should have been "if(cosx != 0)")
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 8, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[8, i] = 2.0 / (np.cos(inputs.numpy()[8, i]) ** 2)
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=200 * tol)
tape.zero()
# sinh:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 9, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[9, i] = 2.0 * np.cosh(inputs.numpy()[9, i])
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# cosh:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 10, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[10, i] = 2.0 * np.sinh(inputs.numpy()[10, i])
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# tanh:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 11, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[11, i] = 2.0 / (np.cosh(inputs.numpy()[11, i]) ** 2)
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# arccos:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 12, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[12, i] = -2.0 / np.sqrt(1 - inputs.numpy()[12, i] ** 2)
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
# arcsin:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 13, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
expected[13, i] = 2.0 / np.sqrt(1 - inputs.numpy()[13, i] ** 2)
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=6 * tol)
tape.zero()
# cbrt:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 14, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(inputs.numpy())
cbrt = np.cbrt(inputs.numpy()[14, i], dtype=np.dtype(dtype))
expected[14, i] = (2.0 / 3.0) * (1.0 / (cbrt * cbrt))
assert_np_equal(tape.gradients[inputs].numpy(), expected, tol=tol)
tape.zero()
def test_special_funcs_2arg(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_special_funcs_2arg(
in1: wp.array(dtype=wptype, ndim=2),
in2: wp.array(dtype=wptype, ndim=2),
outputs: wp.array(dtype=wptype, ndim=2),
):
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(10):
outputs[0, i] = wptype(2) * wp.pow(in1[0, i], in2[0, i])
outputs[1, i] = wptype(2) * wp.atan2(in1[1, i], in2[1, i])
kernel = getkernel(check_special_funcs_2arg, suffix=dtype.__name__)
output_select_kernel = get_select_kernel2(wptype)
if register_kernels:
return
in1 = wp.array(np.abs(randvals(rng, [2, 10], dtype)), dtype=wptype, requires_grad=True, device=device)
in2 = wp.array(randvals(rng, [2, 10], dtype), dtype=wptype, requires_grad=True, device=device)
outputs = wp.zeros_like(in1)
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
assert_np_equal(outputs.numpy()[0], 2.0 * np.power(in1.numpy()[0], in2.numpy()[0]), tol=tol)
assert_np_equal(outputs.numpy()[1], 2.0 * np.arctan2(in1.numpy()[1], in2.numpy()[1]), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(10):
# pow:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 0, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[0, i] = 2.0 * in2.numpy()[0, i] * np.power(in1.numpy()[0, i], in2.numpy()[0, i] - 1)
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=5 * tol)
expected[0, i] = 2.0 * np.power(in1.numpy()[0, i], in2.numpy()[0, i]) * np.log(in1.numpy()[0, i])
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
# atan2:
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 1, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(in1.numpy())
expected[1, i] = 2.0 * in2.numpy()[1, i] / (in1.numpy()[1, i] ** 2 + in2.numpy()[1, i] ** 2)
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[1, i] = -2.0 * in1.numpy()[1, i] / (in1.numpy()[1, i] ** 2 + in2.numpy()[1, i] ** 2)
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
tape.zero()
def test_float_to_int(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_float_to_int(
inputs: wp.array(dtype=wptype, ndim=2),
outputs: wp.array(dtype=wptype, ndim=2),
):
for i in range(10):
outputs[0, i] = wp.round(inputs[0, i])
outputs[1, i] = wp.rint(inputs[1, i])
outputs[2, i] = wp.trunc(inputs[2, i])
outputs[3, i] = wp.floor(inputs[3, i])
outputs[4, i] = wp.ceil(inputs[4, i])
outputs[5, i] = wp.frac(inputs[5, i])
kernel = getkernel(check_float_to_int, suffix=dtype.__name__)
output_select_kernel = get_select_kernel2(wptype)
if register_kernels:
return
inputs = wp.array(rng.standard_normal(size=(6, 10)).astype(dtype), dtype=wptype, requires_grad=True, device=device)
outputs = wp.zeros_like(inputs)
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
assert_np_equal(outputs.numpy()[0], np.round(inputs.numpy()[0]))
assert_np_equal(outputs.numpy()[1], np.rint(inputs.numpy()[1]))
assert_np_equal(outputs.numpy()[2], np.trunc(inputs.numpy()[2]))
assert_np_equal(outputs.numpy()[3], np.floor(inputs.numpy()[3]))
assert_np_equal(outputs.numpy()[4], np.ceil(inputs.numpy()[4]))
assert_np_equal(outputs.numpy()[5], np.modf(inputs.numpy()[5])[0])
# all the gradients should be zero as these functions are piecewise constant:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(10):
for j in range(5):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[inputs], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, j, i], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(tape.gradients[inputs].numpy(), np.zeros_like(inputs.numpy()), tol=tol)
tape.zero()
def test_interp(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 5.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_interp(
in1: wp.array(dtype=wptype, ndim=2),
in2: wp.array(dtype=wptype, ndim=2),
in3: wp.array(dtype=wptype, ndim=2),
outputs: wp.array(dtype=wptype, ndim=2),
):
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(10):
outputs[0, i] = wptype(2) * wp.smoothstep(in1[0, i], in2[0, i], in3[0, i])
outputs[1, i] = wptype(2) * wp.lerp(in1[1, i], in2[1, i], in3[1, i])
kernel = getkernel(check_interp, suffix=dtype.__name__)
output_select_kernel = get_select_kernel2(wptype)
if register_kernels:
return
e0 = randvals(rng, [2, 10], dtype)
e1 = e0 + randvals(rng, [2, 10], dtype) + 0.1
in1 = wp.array(e0, dtype=wptype, requires_grad=True, device=device)
in2 = wp.array(e1, dtype=wptype, requires_grad=True, device=device)
in3 = wp.array(randvals(rng, [2, 10], dtype), dtype=wptype, requires_grad=True, device=device)
outputs = wp.zeros_like(in1)
wp.launch(kernel, dim=1, inputs=[in1, in2, in3], outputs=[outputs], device=device)
edge0 = in1.numpy()[0]
edge1 = in2.numpy()[0]
t_smoothstep = in3.numpy()[0]
x = np.clip((t_smoothstep - edge0) / (edge1 - edge0), 0, 1)
smoothstep_expected = 2.0 * x * x * (3 - 2 * x)
assert_np_equal(outputs.numpy()[0], smoothstep_expected, tol=tol)
a = in1.numpy()[1]
b = in2.numpy()[1]
t = in3.numpy()[1]
assert_np_equal(outputs.numpy()[1], 2.0 * (a * (1 - t) + b * t), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(10):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2, in3], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 0, i], outputs=[out], device=device)
tape.backward(loss=out)
# e0 = in1
# e1 = in2
# t = in3
# x = clamp((t - e0) / (e1 - e0), 0,1)
# dx/dt = 1 / (e1 - e0) if e0 < t < e1 else 0
# y = x * x * (3 - 2 * x)
# y = 3 * x * x - 2 * x * x * x
# dy/dx = 6 * ( x - x^2 )
dydx = 6 * x * (1 - x)
# dy/in1 = dy/dx dx/de0 de0/din1
dxde0 = (t_smoothstep - edge1) / ((edge1 - edge0) ** 2)
dxde0[x == 0] = 0
dxde0[x == 1] = 0
expected_grads = np.zeros_like(in1.numpy())
expected_grads[0, i] = 2.0 * dydx[i] * dxde0[i]
assert_np_equal(tape.gradients[in1].numpy(), expected_grads, tol=tol)
# dy/in2 = dy/dx dx/de1 de1/din2
dxde1 = (edge0 - t_smoothstep) / ((edge1 - edge0) ** 2)
dxde1[x == 0] = 0
dxde1[x == 1] = 0
expected_grads = np.zeros_like(in1.numpy())
expected_grads[0, i] = 2.0 * dydx[i] * dxde1[i]
assert_np_equal(tape.gradients[in2].numpy(), expected_grads, tol=tol)
# dy/in3 = dy/dx dx/dt dt/din3
dxdt = 1.0 / (edge1 - edge0)
dxdt[x == 0] = 0
dxdt[x == 1] = 0
expected_grads = np.zeros_like(in1.numpy())
expected_grads[0, i] = 2.0 * dydx[i] * dxdt[i]
assert_np_equal(tape.gradients[in3].numpy(), expected_grads, tol=tol)
tape.zero()
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2, in3], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, 1, i], outputs=[out], device=device)
tape.backward(loss=out)
# y = a*(1-t) + b*t
# a = in1
# b = in2
# t = in3
# y = in1*( 1 - in3 ) + in2*in3
# dy/din1 = (1-in3)
expected_grads = np.zeros_like(in1.numpy())
expected_grads[1, i] = 2.0 * (1 - in3.numpy()[1, i])
assert_np_equal(tape.gradients[in1].numpy(), expected_grads, tol=tol)
# dy/din2 = in3
expected_grads = np.zeros_like(in1.numpy())
expected_grads[1, i] = 2.0 * in3.numpy()[1, i]
assert_np_equal(tape.gradients[in2].numpy(), expected_grads, tol=tol)
# dy/din3 = 8*in2 - 1.5*4*in1
expected_grads = np.zeros_like(in1.numpy())
expected_grads[1, i] = 2.0 * (in2.numpy()[1, i] - in1.numpy()[1, i])
assert_np_equal(tape.gradients[in3].numpy(), expected_grads, tol=tol)
tape.zero()
def test_clamp(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-6,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_clamp(
in1: wp.array(dtype=wptype),
in2: wp.array(dtype=wptype),
in3: wp.array(dtype=wptype),
outputs: wp.array(dtype=wptype),
):
for i in range(100):
# multiply output by 2 so we've got something to backpropagate:
outputs[i] = wptype(2) * wp.clamp(in1[i], in2[i], in3[i])
kernel = getkernel(check_clamp, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
in1 = wp.array(randvals(rng, [100], dtype), dtype=wptype, requires_grad=True, device=device)
starts = randvals(rng, [100], dtype)
diffs = np.abs(randvals(rng, [100], dtype))
in2 = wp.array(starts, dtype=wptype, requires_grad=True, device=device)
in3 = wp.array(starts + diffs, dtype=wptype, requires_grad=True, device=device)
outputs = wp.zeros_like(in1)
wp.launch(kernel, dim=1, inputs=[in1, in2, in3], outputs=[outputs], device=device)
assert_np_equal(2 * np.clip(in1.numpy(), in2.numpy(), in3.numpy()), outputs.numpy(), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(100):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[in1, in2, in3], outputs=[outputs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[out], device=device)
tape.backward(loss=out)
t = in1.numpy()[i]
lower = in2.numpy()[i]
upper = in3.numpy()[i]
expected = np.zeros_like(in1.numpy())
if t < lower:
expected[i] = 2.0
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
expected[i] = 0.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
assert_np_equal(tape.gradients[in3].numpy(), expected, tol=tol)
elif t > upper:
expected[i] = 2.0
assert_np_equal(tape.gradients[in3].numpy(), expected, tol=tol)
expected[i] = 0.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
else:
expected[i] = 2.0
assert_np_equal(tape.gradients[in1].numpy(), expected, tol=tol)
expected[i] = 0.0
assert_np_equal(tape.gradients[in2].numpy(), expected, tol=tol)
assert_np_equal(tape.gradients[in3].numpy(), expected, tol=tol)
tape.zero()
devices = get_test_devices()
class TestArithmetic(unittest.TestCase):
pass
# these unary ops only make sense for signed values:
for dtype in np_signed_int_types + np_float_types:
add_function_test_register_kernel(
TestArithmetic, f"test_unary_ops_{dtype.__name__}", test_unary_ops, devices=devices, dtype=dtype
)
for dtype in np_float_types:
add_function_test_register_kernel(
TestArithmetic, f"test_special_funcs_{dtype.__name__}", test_special_funcs, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestArithmetic,
f"test_special_funcs_2arg_{dtype.__name__}",
test_special_funcs_2arg,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestArithmetic, f"test_interp_{dtype.__name__}", test_interp, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestArithmetic, f"test_float_to_int_{dtype.__name__}", test_float_to_int, devices=devices, dtype=dtype
)
for dtype in np_scalar_types:
add_function_test_register_kernel(
TestArithmetic, f"test_clamp_{dtype.__name__}", test_clamp, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestArithmetic, f"test_nonzero_{dtype.__name__}", test_nonzero, devices=devices, dtype=dtype
)
add_function_test(TestArithmetic, f"test_arrays_{dtype.__name__}", test_arrays, devices=devices, dtype=dtype)
add_function_test_register_kernel(
TestArithmetic, f"test_binary_ops_{dtype.__name__}", test_binary_ops, devices=devices, dtype=dtype
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 43,846 | Python | 39.263545 | 119 | 0.559504 |
NVIDIA/warp/warp/tests/test_builtins_resolution.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import contextlib
import unittest
import numpy as np
from warp.tests.unittest_utils import *
def nps(dtype, value):
"""Creates a NumPy scalar value based on the given data type."""
# Workaround to avoid deprecation warning messages for integer overflows.
return np.array((value,)).astype(dtype)[0]
def npv(dtype, values):
"""Creates a vector of NumPy scalar values based on the given data type."""
return tuple(nps(dtype, x) for x in values)
def npm(dtype, dim, values):
"""Creates a matrix of NumPy scalar values based on the given data type."""
return tuple(npv(dtype, values[i * dim : (i + 1) * dim]) for i in range(dim))
def wpv(dtype, values):
"""Creates a vector of Warp scalar values based on the given data type."""
return tuple(dtype(x) for x in values)
def wpm(dtype, dim, values):
"""Creates a matrix of Warp scalar values based on the given data type."""
return tuple(wpv(dtype, values[i * dim : (i + 1) * dim]) for i in range(dim))
def test_int_arg_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
value = -1234567890123456789
expected = wp.invert(dtype(value))
test.assertEqual(wp.invert(nps(np_type, value)), expected)
def test_float_arg_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
value = 1.23
expected = wp.sin(dtype(value))
test.assertEqual(wp.sin(nps(np_type, value)), expected)
def test_int_int_args_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
value = -1234567890
expected = wp.mul(dtype(value), dtype(value))
test.assertEqual(wp.mul(dtype(value), dtype(value)), expected)
test.assertEqual(wp.mul(dtype(value), nps(np_type, value)), expected)
test.assertEqual(wp.mul(nps(np_type, value), dtype(value)), expected)
test.assertEqual(wp.mul(nps(np_type, value), nps(np_type, value)), expected)
if dtype is wp.int32:
test.assertEqual(wp.mul(dtype(value), value), expected)
test.assertEqual(wp.mul(nps(np_type, value), value), expected)
test.assertEqual(wp.mul(value, value), expected)
test.assertEqual(wp.mul(value, dtype(value)), expected)
test.assertEqual(wp.mul(value, nps(np_type, value)), expected)
else:
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments '{dtype.__name__}, int'$",
):
wp.mul(dtype(value), value)
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments '{np_type.__name__}, int'$",
):
wp.mul(nps(np_type, value), value)
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments 'int, {dtype.__name__}'$",
):
wp.mul(value, dtype(value))
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments 'int, {np_type.__name__}'$",
):
wp.mul(value, nps(np_type, value))
def test_mat_arg_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
mat_cls = wp.types.matrix((3, 3), dtype)
values = (1.23, 2.34, 3.45, 4.56, 5.67, 6.78, 7.89, 8.90, 9.01)
expected = wp.trace(mat_cls(*values))
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
test.assertEqual(wp.trace(wpv(dtype, values)), expected)
test.assertEqual(wp.trace(wpm(dtype, 3, values)), expected)
test.assertEqual(wp.trace(npv(np_type, values)), expected)
test.assertEqual(wp.trace(npm(np_type, 3, values)), expected)
test.assertEqual(wp.trace(np.array(npv(np_type, values))), expected)
def test_mat_mat_args_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
mat_cls = wp.types.matrix((3, 3), dtype)
a_values = (0.12, 1.23, 2.34, 0.12, 1.23, 2.34, 0.12, 1.23, 2.34)
b_values = (2.34, 1.23, 0.12, 2.34, 1.23, 0.12, 2.34, 1.23, 0.12)
expected = wp.ddot(mat_cls(*a_values), mat_cls(*b_values))
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
test.assertEqual(wp.ddot(mat_cls(*a_values), mat_cls(*b_values)), expected)
test.assertEqual(wp.ddot(mat_cls(*a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.ddot(mat_cls(*a_values), wpm(dtype, 3, b_values)), expected)
test.assertEqual(wp.ddot(mat_cls(*a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.ddot(mat_cls(*a_values), npm(np_type, 3, b_values)), expected)
test.assertEqual(wp.ddot(mat_cls(*a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.ddot(wpv(dtype, a_values), mat_cls(*b_values)), expected)
test.assertEqual(wp.ddot(wpv(dtype, a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.ddot(wpv(dtype, a_values), wpm(dtype, 3, b_values)), expected)
test.assertEqual(wp.ddot(wpv(dtype, a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.ddot(wpv(dtype, a_values), npm(np_type, 3, b_values)), expected)
test.assertEqual(wp.ddot(wpv(dtype, a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.ddot(wpm(dtype, 3, a_values), mat_cls(*b_values)), expected)
test.assertEqual(wp.ddot(wpm(dtype, 3, a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.ddot(wpm(dtype, 3, a_values), wpm(dtype, 3, b_values)), expected)
test.assertEqual(wp.ddot(wpm(dtype, 3, a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.ddot(wpm(dtype, 3, a_values), npm(np_type, 3, b_values)), expected)
test.assertEqual(wp.ddot(wpm(dtype, 3, a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.ddot(npv(np_type, a_values), mat_cls(*b_values)), expected)
test.assertEqual(wp.ddot(npv(np_type, a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.ddot(npv(np_type, a_values), wpm(dtype, 3, b_values)), expected)
test.assertEqual(wp.ddot(npv(np_type, a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.ddot(npv(np_type, a_values), npm(np_type, 3, b_values)), expected)
test.assertEqual(wp.ddot(npv(np_type, a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.ddot(npm(np_type, 3, a_values), mat_cls(*b_values)), expected)
test.assertEqual(wp.ddot(npm(np_type, 3, a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.ddot(npm(np_type, 3, a_values), wpm(dtype, 3, b_values)), expected)
test.assertEqual(wp.ddot(npm(np_type, 3, a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.ddot(npm(np_type, 3, a_values), npm(np_type, 3, b_values)), expected)
test.assertEqual(wp.ddot(npm(np_type, 3, a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.ddot(np.array(npv(np_type, a_values)), mat_cls(*b_values)), expected)
test.assertEqual(wp.ddot(np.array(npv(np_type, a_values)), wpv(dtype, b_values)), expected)
test.assertEqual(wp.ddot(np.array(npv(np_type, a_values)), wpm(dtype, 3, b_values)), expected)
test.assertEqual(wp.ddot(np.array(npv(np_type, a_values)), npv(np_type, b_values)), expected)
test.assertEqual(wp.ddot(np.array(npv(np_type, a_values)), npm(np_type, 3, b_values)), expected)
test.assertEqual(wp.ddot(np.array(npv(np_type, a_values)), np.array(npv(np_type, b_values))), expected)
if dtype is wp.float32:
test.assertEqual(wp.ddot(mat_cls(*a_values), b_values), expected)
test.assertEqual(wp.ddot(wpv(dtype, a_values), b_values), expected)
test.assertEqual(wp.ddot(wpm(dtype, 3, a_values), b_values), expected)
test.assertEqual(wp.ddot(npv(np_type, a_values), b_values), expected)
test.assertEqual(wp.ddot(npm(np_type, 3, a_values), b_values), expected)
test.assertEqual(wp.ddot(a_values, b_values), expected)
test.assertEqual(wp.ddot(np.array(npv(np_type, a_values)), b_values), expected)
test.assertEqual(wp.ddot(a_values, mat_cls(*b_values)), expected)
test.assertEqual(wp.ddot(a_values, wpv(dtype, b_values)), expected)
test.assertEqual(wp.ddot(a_values, wpm(dtype, 3, b_values)), expected)
test.assertEqual(wp.ddot(a_values, npv(np_type, b_values)), expected)
test.assertEqual(wp.ddot(a_values, npm(np_type, 3, b_values)), expected)
test.assertEqual(wp.ddot(a_values, np.array(npv(np_type, b_values))), expected)
else:
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'mat_t, tuple'$",
):
wp.ddot(mat_cls(*a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(wpv(dtype, a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(wpm(dtype, 3, a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(npv(np_type, a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(npm(np_type, 3, a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'ndarray, tuple'$",
):
wp.ddot(np.array(npv(np_type, a_values)), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, mat_t'$",
):
wp.ddot(a_values, mat_cls(*b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(a_values, wpv(dtype, b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(a_values, wpm(dtype, 3, b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(a_values, npv(np_type, b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.ddot(a_values, npm(np_type, 3, b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'ddot' compatible with " r"the arguments 'tuple, ndarray'$",
):
wp.ddot(a_values, np.array(npv(np_type, b_values)))
def test_mat_float_args_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
mat_cls = wp.types.matrix((3, 3), dtype)
a_values = (1.23, 2.34, 3.45, 4.56, 5.67, 6.78, 7.89, 8.90, 9.01)
b_value = 0.12
expected = wp.mul(mat_cls(*a_values), dtype(b_value))
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
test.assertEqual(wp.mul(mat_cls(*a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(mat_cls(*a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(wpv(dtype, a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(wpv(dtype, a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(wpm(dtype, 3, a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(wpm(dtype, 3, a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(npv(np_type, a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(npv(np_type, a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(npm(np_type, 3, a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(npm(np_type, 3, a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(np.array(npv(np_type, a_values)), dtype(b_value)), expected)
test.assertEqual(wp.mul(np.array(npv(np_type, a_values)), nps(np_type, b_value)), expected)
if dtype is wp.float32:
test.assertEqual(wp.mul(mat_cls(*a_values), b_value), expected)
test.assertEqual(wp.mul(wpv(dtype, a_values), b_value), expected)
test.assertEqual(wp.mul(wpm(dtype, 3, a_values), b_value), expected)
test.assertEqual(wp.mul(npv(np_type, a_values), b_value), expected)
test.assertEqual(wp.mul(npm(np_type, 3, a_values), b_value), expected)
test.assertEqual(wp.mul(a_values, b_value), expected)
test.assertEqual(wp.mul(np.array(npv(np_type, a_values)), b_value), expected)
test.assertEqual(wp.mul(a_values, dtype(b_value)), expected)
test.assertEqual(wp.mul(a_values, nps(np_type, b_value)), expected)
else:
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'mat_t, float'$",
):
wp.mul(mat_cls(*a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'tuple, float'$",
):
wp.mul(wpv(dtype, a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'tuple, float'$",
):
wp.mul(wpm(dtype, 3, a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'tuple, float'$",
):
wp.mul(npv(np_type, a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'tuple, float'$",
):
wp.mul(npm(np_type, 3, a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'ndarray, float'$",
):
wp.mul(np.array(npv(np_type, a_values)), b_value)
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments 'tuple, {dtype.__name__}'$",
):
wp.mul(a_values, dtype(b_value))
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments 'tuple, {np_type.__name__}'$",
):
wp.mul(a_values, nps(np_type, b_value))
def test_vec_arg_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
vec_cls = wp.types.vector(3, dtype)
values = (1.23, 2.34, 3.45)
expected = wp.length(vec_cls(*values))
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
test.assertAlmostEqual(wp.length(wpv(dtype, values)), expected)
test.assertAlmostEqual(wp.length(npv(np_type, values)), expected)
test.assertAlmostEqual(wp.length(np.array(npv(np_type, values))), expected)
def test_vec_vec_args_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
vec_cls = wp.types.vector(3, dtype)
a_values = (1.23, 2.34, 3.45)
b_values = (4.56, 5.67, 6.78)
expected = wp.dot(vec_cls(*a_values), vec_cls(*b_values))
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
test.assertEqual(wp.dot(vec_cls(*a_values), vec_cls(*b_values)), expected)
test.assertEqual(wp.dot(vec_cls(*a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.dot(vec_cls(*a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.dot(vec_cls(*a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.dot(wpv(dtype, a_values), vec_cls(*b_values)), expected)
test.assertEqual(wp.dot(wpv(dtype, a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.dot(wpv(dtype, a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.dot(wpv(dtype, a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.dot(npv(np_type, a_values), vec_cls(*b_values)), expected)
test.assertEqual(wp.dot(npv(np_type, a_values), wpv(dtype, b_values)), expected)
test.assertEqual(wp.dot(npv(np_type, a_values), npv(np_type, b_values)), expected)
test.assertEqual(wp.dot(npv(np_type, a_values), np.array(npv(np_type, b_values))), expected)
test.assertEqual(wp.dot(np.array(npv(np_type, a_values)), vec_cls(*b_values)), expected)
test.assertEqual(wp.dot(np.array(npv(np_type, a_values)), wpv(dtype, b_values)), expected)
test.assertEqual(wp.dot(np.array(npv(np_type, a_values)), npv(np_type, b_values)), expected)
test.assertEqual(wp.dot(np.array(npv(np_type, a_values)), np.array(npv(np_type, b_values))), expected)
if dtype is wp.float32:
test.assertEqual(wp.dot(vec_cls(*a_values), b_values), expected)
test.assertEqual(wp.dot(wpv(dtype, a_values), b_values), expected)
test.assertEqual(wp.dot(npv(np_type, a_values), b_values), expected)
test.assertEqual(wp.dot(a_values, b_values), expected)
test.assertEqual(wp.dot(np.array(npv(np_type, a_values)), b_values), expected)
test.assertEqual(wp.dot(a_values, vec_cls(*b_values)), expected)
test.assertEqual(wp.dot(a_values, wpv(dtype, b_values)), expected)
test.assertEqual(wp.dot(a_values, npv(np_type, b_values)), expected)
test.assertEqual(wp.dot(a_values, np.array(npv(np_type, b_values))), expected)
else:
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'vec_t, tuple'$",
):
wp.dot(vec_cls(*a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.dot(wpv(dtype, a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.dot(npv(np_type, a_values), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'ndarray, tuple'$",
):
wp.dot(np.array(npv(np_type, a_values)), b_values)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'tuple, vec_t'$",
):
wp.dot(a_values, vec_cls(*b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.dot(a_values, wpv(dtype, b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'tuple, tuple'$",
):
wp.dot(a_values, npv(np_type, b_values))
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'dot' compatible with " r"the arguments 'tuple, ndarray'$",
):
wp.dot(a_values, np.array(npv(np_type, b_values)))
def test_vec_float_args_support(test, device, dtype):
np_type = wp.types.warp_type_to_np_dtype[dtype]
vec_cls = wp.types.vector(3, dtype)
a_values = (1.23, 2.34, 3.45)
b_value = 4.56
expected = wp.mul(vec_cls(*a_values), dtype(b_value))
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
test.assertEqual(wp.mul(vec_cls(*a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(vec_cls(*a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(wpv(dtype, a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(wpv(dtype, a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(npv(np_type, a_values), dtype(b_value)), expected)
test.assertEqual(wp.mul(npv(np_type, a_values), nps(np_type, b_value)), expected)
test.assertEqual(wp.mul(np.array(npv(np_type, a_values)), dtype(b_value)), expected)
test.assertEqual(wp.mul(np.array(npv(np_type, a_values)), nps(np_type, b_value)), expected)
if dtype is wp.float32:
test.assertEqual(wp.mul(vec_cls(*a_values), b_value), expected)
test.assertEqual(wp.mul(wpv(dtype, a_values), b_value), expected)
test.assertEqual(wp.mul(npv(np_type, a_values), b_value), expected)
test.assertEqual(wp.mul(a_values, b_value), expected)
test.assertEqual(wp.mul(np.array(npv(np_type, a_values)), b_value), expected)
test.assertEqual(wp.mul(a_values, dtype(b_value)), expected)
test.assertEqual(wp.mul(a_values, nps(np_type, b_value)), expected)
else:
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'vec_t, float'$",
):
wp.mul(vec_cls(*a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'tuple, float'$",
):
wp.mul(wpv(dtype, a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'tuple, float'$",
):
wp.mul(npv(np_type, a_values), b_value)
with test.assertRaisesRegex(
RuntimeError,
r"Couldn't find a function 'mul' compatible with " r"the arguments 'ndarray, float'$",
):
wp.mul(np.array(npv(np_type, a_values)), b_value)
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments 'tuple, {dtype.__name__}'$",
):
wp.mul(a_values, dtype(b_value))
with test.assertRaisesRegex(
RuntimeError,
rf"Couldn't find a function 'mul' compatible with " rf"the arguments 'tuple, {np_type.__name__}'$",
):
wp.mul(a_values, nps(np_type, b_value))
class TestBuiltinsResolution(unittest.TestCase):
def test_int_arg_overflow(self):
value = -1234567890123456789
self.assertEqual(wp.invert(wp.int8(value)), 20)
self.assertEqual(wp.invert(wp.int16(value)), -32492)
self.assertEqual(wp.invert(wp.int32(value)), 2112454932)
self.assertEqual(wp.invert(wp.int64(value)), 1234567890123456788)
self.assertEqual(wp.invert(wp.uint8(value)), 20)
self.assertEqual(wp.invert(wp.uint16(value)), 33044)
self.assertEqual(wp.invert(wp.uint32(value)), 2112454932)
self.assertEqual(wp.invert(wp.uint64(value)), 1234567890123456788)
self.assertEqual(wp.invert(value), wp.invert(wp.int32(value)))
def test_float_arg_precision(self):
value = 1.23
expected = 0.94248880193169748409
result = wp.sin(wp.float64(value))
self.assertAlmostEqual(result, expected, places=12)
result = wp.sin(wp.float32(value))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.sin(wp.float16(value))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
self.assertEqual(wp.sin(value), wp.sin(wp.float32(value)))
def test_int_int_args_overflow(self):
value = -1234567890
self.assertEqual(wp.mul(wp.int8(value), wp.int8(value)), 68)
self.assertEqual(wp.mul(wp.int16(value), wp.int16(value)), -3004)
self.assertEqual(wp.mul(wp.int32(value), wp.int32(value)), 304084036)
self.assertEqual(wp.mul(wp.int64(value), wp.int64(value)), 1524157875019052100)
self.assertEqual(wp.mul(wp.uint8(value), wp.uint8(value)), 68)
self.assertEqual(wp.mul(wp.uint16(value), wp.uint16(value)), 62532)
self.assertEqual(wp.mul(wp.uint32(value), wp.uint32(value)), 304084036)
self.assertEqual(wp.mul(wp.uint64(value), wp.uint64(value)), 1524157875019052100)
self.assertEqual(wp.mul(value, value), wp.mul(wp.int32(value), wp.int32(value)))
def test_mat22_arg_precision(self):
values = (1.23, 2.34, 3.45, 4.56)
values_2d = (values[0:2], values[2:4])
expected = 5.78999999999999914735
result = wp.trace(wp.mat22d(*values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.trace(wp.mat22f(*values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.trace(wp.mat22h(*values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.trace(values), wp.trace(wp.mat22f(*values)))
self.assertEqual(wp.trace(values_2d), wp.trace(wp.mat22f(*values)))
def test_mat33_arg_precision(self):
values = (1.23, 2.34, 3.45, 4.56, 5.67, 6.78, 7.89, 8.90, 9.01)
values_2d = (values[0:3], values[3:6], values[6:9])
expected = 15.91000000000000014211
result = wp.trace(wp.mat33d(*values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.trace(wp.mat33f(*values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.trace(wp.mat33h(*values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.trace(values), wp.trace(wp.mat33f(*values)))
self.assertEqual(wp.trace(values_2d), wp.trace(wp.mat33f(*values)))
def test_mat44_arg_precision(self):
values = (1.23, 2.34, 3.45, 4.56, 5.67, 6.78, 7.89, 8.90, 9.01, 10.12, 11.23, 12.34, 13.45, 14.56, 15.67, 16.78)
values_2d = (values[0:4], values[4:8], values[8:12], values[12:16])
expected = 36.02000000000000312639
result = wp.trace(wp.mat44d(*values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.trace(wp.mat44f(*values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.trace(wp.mat44h(*values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.trace(values), wp.trace(wp.mat44f(*values)))
self.assertEqual(wp.trace(values_2d), wp.trace(wp.mat44f(*values)))
def test_mat22_mat22_args_precision(self):
a_values = (0.12, 1.23, 0.12, 1.23)
a_values_2d = (a_values[0:2], a_values[2:4])
b_values = (1.23, 0.12, 1.23, 0.12)
b_values_2d = (b_values[0:2], b_values[2:4])
expected = 0.59039999999999992486
result = wp.ddot(wp.mat22d(*a_values), wp.mat22d(*b_values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.ddot(wp.mat22f(*a_values), wp.mat22f(*b_values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.ddot(wp.mat22h(*a_values), wp.mat22h(*b_values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.ddot(a_values, b_values), wp.ddot(wp.mat22f(*a_values), wp.mat22f(*b_values)))
self.assertEqual(wp.ddot(a_values_2d, b_values_2d), wp.ddot(wp.mat22f(*a_values), wp.mat22f(*b_values)))
self.assertEqual(wp.ddot(a_values, b_values_2d), wp.ddot(wp.mat22f(*a_values), wp.mat22f(*b_values)))
self.assertEqual(wp.ddot(a_values_2d, b_values), wp.ddot(wp.mat22f(*a_values), wp.mat22f(*b_values)))
def test_mat33_mat33_args_precision(self):
a_values = (0.12, 1.23, 2.34, 0.12, 1.23, 2.34, 0.12, 1.23, 2.34)
a_values_2d = (a_values[0:3], a_values[3:6], a_values[6:9])
b_values = (2.34, 1.23, 0.12, 2.34, 1.23, 0.12, 2.34, 1.23, 0.12)
b_values_2d = (b_values[0:3], b_values[3:6], b_values[6:9])
expected = 6.22350000000000047606
result = wp.ddot(wp.mat33d(*a_values), wp.mat33d(*b_values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.ddot(wp.mat33f(*a_values), wp.mat33f(*b_values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.ddot(wp.mat33h(*a_values), wp.mat33h(*b_values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.ddot(a_values, b_values), wp.ddot(wp.mat33f(*a_values), wp.mat33f(*b_values)))
self.assertEqual(wp.ddot(a_values_2d, b_values_2d), wp.ddot(wp.mat33f(*a_values), wp.mat33f(*b_values)))
self.assertEqual(wp.ddot(a_values, b_values_2d), wp.ddot(wp.mat33f(*a_values), wp.mat33f(*b_values)))
self.assertEqual(wp.ddot(a_values_2d, b_values), wp.ddot(wp.mat33f(*a_values), wp.mat33f(*b_values)))
def test_mat44_mat44_args(self):
a_values = (0.12, 1.23, 2.34, 3.45, 0.12, 1.23, 2.34, 3.45, 0.12, 1.23, 2.34, 3.45, 0.12, 1.23, 2.34, 3.45)
a_values_2d = (a_values[0:4], a_values[4:8], a_values[8:12], a_values[12:16])
b_values = (3.45, 2.34, 1.23, 0.12, 3.45, 2.34, 1.23, 0.12, 3.45, 2.34, 1.23, 0.12, 3.45, 2.34, 1.23, 0.12)
b_values_2d = (b_values[0:4], b_values[4:8], b_values[8:12], b_values[12:16])
expected = 26.33760000000000189857
result = wp.ddot(wp.mat44d(*a_values), wp.mat44d(*b_values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.ddot(wp.mat44f(*a_values), wp.mat44f(*b_values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.ddot(wp.mat44h(*a_values), wp.mat44h(*b_values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.ddot(a_values, b_values), wp.ddot(wp.mat44f(*a_values), wp.mat44f(*b_values)))
self.assertEqual(wp.ddot(a_values_2d, b_values_2d), wp.ddot(wp.mat44f(*a_values), wp.mat44f(*b_values)))
self.assertEqual(wp.ddot(a_values, b_values_2d), wp.ddot(wp.mat44f(*a_values), wp.mat44f(*b_values)))
self.assertEqual(wp.ddot(a_values_2d, b_values), wp.ddot(wp.mat44f(*a_values), wp.mat44f(*b_values)))
def test_mat22_float_args_precision(self):
a_values = (1.23, 2.34, 3.45, 4.56)
a_values_2d = (a_values[0:2], a_values[2:4])
b_value = 0.12
expected_00 = 0.14759999999999998122
expected_01 = 0.28079999999999999405
expected_10 = 0.41399999999999997913
expected_11 = 0.54719999999999990870
result = wp.mul(wp.mat22d(*a_values), wp.float64(b_value))
self.assertAlmostEqual(result[0][0], expected_00, places=12)
self.assertAlmostEqual(result[0][1], expected_01, places=12)
self.assertAlmostEqual(result[1][0], expected_10, places=12)
self.assertAlmostEqual(result[1][1], expected_11, places=12)
result = wp.mul(wp.mat22f(*a_values), wp.float32(b_value))
self.assertNotAlmostEqual(result[0][0], expected_00, places=12)
self.assertNotAlmostEqual(result[0][1], expected_01, places=12)
self.assertNotAlmostEqual(result[1][0], expected_10, places=12)
self.assertNotAlmostEqual(result[1][1], expected_11, places=12)
self.assertAlmostEqual(result[0][0], expected_00, places=5)
self.assertAlmostEqual(result[0][1], expected_01, places=5)
self.assertAlmostEqual(result[1][0], expected_10, places=5)
self.assertAlmostEqual(result[1][1], expected_11, places=5)
result = wp.mul(wp.mat22h(*a_values), wp.float16(b_value))
self.assertNotAlmostEqual(result[0][0], expected_00, places=5)
self.assertNotAlmostEqual(result[0][1], expected_01, places=5)
self.assertNotAlmostEqual(result[1][0], expected_10, places=5)
self.assertNotAlmostEqual(result[1][1], expected_11, places=5)
self.assertAlmostEqual(result[0][0], expected_00, places=1)
self.assertAlmostEqual(result[0][1], expected_01, places=1)
self.assertAlmostEqual(result[1][0], expected_10, places=1)
self.assertAlmostEqual(result[1][1], expected_11, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
# Multiplying a 1-D tuple of length 4 is ambiguous because it could match
# either the `vec4f` or `mat22f` overload. As a result, only the 2-D variant
# of the tuple is expected to resolve correctly.
self.assertEqual(wp.mul(a_values_2d, b_value), wp.mul(wp.mat22f(*a_values), wp.float32(b_value)))
def test_mat33_float_args_precision(self):
a_values = (1.23, 2.34, 3.45, 4.56, 5.67, 6.78, 7.89, 8.90, 9.01)
a_values_2d = (a_values[0:3], a_values[3:6], a_values[6:9])
b_value = 0.12
expected_00 = 0.14759999999999998122
expected_01 = 0.28079999999999999405
expected_02 = 0.41399999999999997913
expected_10 = 0.54719999999999990870
expected_11 = 0.68040000000000000480
expected_12 = 0.81359999999999998987
expected_20 = 0.94679999999999997495
expected_21 = 1.06800000000000006040
expected_22 = 1.08119999999999993889
result = wp.mul(wp.mat33d(*a_values), wp.float64(b_value))
self.assertAlmostEqual(result[0][0], expected_00, places=12)
self.assertAlmostEqual(result[0][1], expected_01, places=12)
self.assertAlmostEqual(result[0][2], expected_02, places=12)
self.assertAlmostEqual(result[1][0], expected_10, places=12)
self.assertAlmostEqual(result[1][1], expected_11, places=12)
self.assertAlmostEqual(result[1][2], expected_12, places=12)
self.assertAlmostEqual(result[2][0], expected_20, places=12)
self.assertAlmostEqual(result[2][1], expected_21, places=12)
self.assertAlmostEqual(result[2][2], expected_22, places=12)
result = wp.mul(wp.mat33f(*a_values), wp.float32(b_value))
self.assertNotAlmostEqual(result[0][0], expected_00, places=12)
self.assertNotAlmostEqual(result[0][1], expected_01, places=12)
self.assertNotAlmostEqual(result[0][2], expected_02, places=12)
self.assertNotAlmostEqual(result[1][0], expected_10, places=12)
self.assertNotAlmostEqual(result[1][1], expected_11, places=12)
self.assertNotAlmostEqual(result[1][2], expected_12, places=12)
self.assertNotAlmostEqual(result[2][0], expected_20, places=12)
self.assertNotAlmostEqual(result[2][1], expected_21, places=12)
self.assertNotAlmostEqual(result[2][2], expected_22, places=12)
self.assertAlmostEqual(result[0][0], expected_00, places=5)
self.assertAlmostEqual(result[0][1], expected_01, places=5)
self.assertAlmostEqual(result[0][2], expected_02, places=5)
self.assertAlmostEqual(result[1][0], expected_10, places=5)
self.assertAlmostEqual(result[1][1], expected_11, places=5)
self.assertAlmostEqual(result[1][2], expected_12, places=5)
self.assertAlmostEqual(result[2][0], expected_20, places=5)
self.assertAlmostEqual(result[2][1], expected_21, places=5)
self.assertAlmostEqual(result[2][2], expected_22, places=5)
result = wp.mul(wp.mat33h(*a_values), wp.float16(b_value))
self.assertNotAlmostEqual(result[0][0], expected_00, places=5)
self.assertNotAlmostEqual(result[0][1], expected_01, places=5)
self.assertNotAlmostEqual(result[0][2], expected_02, places=5)
self.assertNotAlmostEqual(result[1][0], expected_10, places=5)
self.assertNotAlmostEqual(result[1][1], expected_11, places=5)
self.assertNotAlmostEqual(result[1][2], expected_12, places=5)
self.assertNotAlmostEqual(result[2][0], expected_20, places=5)
self.assertNotAlmostEqual(result[2][1], expected_21, places=5)
self.assertNotAlmostEqual(result[2][2], expected_22, places=5)
self.assertAlmostEqual(result[0][0], expected_00, places=1)
self.assertAlmostEqual(result[0][1], expected_01, places=1)
self.assertAlmostEqual(result[0][2], expected_02, places=1)
self.assertAlmostEqual(result[1][0], expected_10, places=1)
self.assertAlmostEqual(result[1][1], expected_11, places=1)
self.assertAlmostEqual(result[1][2], expected_12, places=1)
self.assertAlmostEqual(result[2][0], expected_20, places=1)
self.assertAlmostEqual(result[2][1], expected_21, places=1)
self.assertAlmostEqual(result[2][2], expected_22, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.mul(a_values, b_value), wp.mul(wp.mat33f(*a_values), wp.float32(b_value)))
self.assertEqual(wp.mul(a_values_2d, b_value), wp.mul(wp.mat33f(*a_values), wp.float32(b_value)))
def test_mat44_float_args_precision(self):
a_values = (
1.23,
2.34,
3.45,
4.56,
5.67,
6.78,
7.89,
8.90,
9.01,
10.12,
11.23,
12.34,
13.45,
14.56,
15.67,
16.78,
)
a_values_2d = (a_values[0:4], a_values[4:8], a_values[8:12], a_values[12:16])
b_value = 0.12
expected_00 = 0.14759999999999998122
expected_01 = 0.28079999999999999405
expected_02 = 0.41399999999999997913
expected_03 = 0.54719999999999990870
expected_10 = 0.68040000000000000480
expected_11 = 0.81359999999999998987
expected_12 = 0.94679999999999997495
expected_13 = 1.06800000000000006040
expected_20 = 1.08119999999999993889
expected_21 = 1.21439999999999992397
expected_22 = 1.34759999999999990905
expected_23 = 1.48079999999999989413
expected_30 = 1.61399999999999987921
expected_31 = 1.74720000000000008633
expected_32 = 1.88039999999999984936
expected_33 = 2.01360000000000027853
result = wp.mul(wp.mat44d(*a_values), wp.float64(b_value))
self.assertAlmostEqual(result[0][0], expected_00, places=12)
self.assertAlmostEqual(result[0][1], expected_01, places=12)
self.assertAlmostEqual(result[0][2], expected_02, places=12)
self.assertAlmostEqual(result[0][3], expected_03, places=12)
self.assertAlmostEqual(result[1][0], expected_10, places=12)
self.assertAlmostEqual(result[1][1], expected_11, places=12)
self.assertAlmostEqual(result[1][2], expected_12, places=12)
self.assertAlmostEqual(result[1][3], expected_13, places=12)
self.assertAlmostEqual(result[2][0], expected_20, places=12)
self.assertAlmostEqual(result[2][1], expected_21, places=12)
self.assertAlmostEqual(result[2][2], expected_22, places=12)
self.assertAlmostEqual(result[2][3], expected_23, places=12)
self.assertAlmostEqual(result[3][0], expected_30, places=12)
self.assertAlmostEqual(result[3][1], expected_31, places=12)
self.assertAlmostEqual(result[3][2], expected_32, places=12)
self.assertAlmostEqual(result[3][3], expected_33, places=12)
result = wp.mul(wp.mat44f(*a_values), wp.float32(b_value))
self.assertNotAlmostEqual(result[0][0], expected_00, places=12)
self.assertNotAlmostEqual(result[0][1], expected_01, places=12)
self.assertNotAlmostEqual(result[0][2], expected_02, places=12)
self.assertNotAlmostEqual(result[0][3], expected_03, places=12)
self.assertNotAlmostEqual(result[1][0], expected_10, places=12)
self.assertNotAlmostEqual(result[1][1], expected_11, places=12)
self.assertNotAlmostEqual(result[1][2], expected_12, places=12)
self.assertNotAlmostEqual(result[1][3], expected_13, places=12)
self.assertNotAlmostEqual(result[2][0], expected_20, places=12)
self.assertNotAlmostEqual(result[2][1], expected_21, places=12)
self.assertNotAlmostEqual(result[2][2], expected_22, places=12)
self.assertNotAlmostEqual(result[2][3], expected_23, places=12)
self.assertNotAlmostEqual(result[3][0], expected_30, places=12)
self.assertNotAlmostEqual(result[3][1], expected_31, places=12)
self.assertNotAlmostEqual(result[3][2], expected_32, places=12)
self.assertNotAlmostEqual(result[3][3], expected_33, places=12)
self.assertAlmostEqual(result[0][0], expected_00, places=5)
self.assertAlmostEqual(result[0][1], expected_01, places=5)
self.assertAlmostEqual(result[0][2], expected_02, places=5)
self.assertAlmostEqual(result[0][3], expected_03, places=5)
self.assertAlmostEqual(result[1][0], expected_10, places=5)
self.assertAlmostEqual(result[1][1], expected_11, places=5)
self.assertAlmostEqual(result[1][2], expected_12, places=5)
self.assertAlmostEqual(result[1][3], expected_13, places=5)
self.assertAlmostEqual(result[2][0], expected_20, places=5)
self.assertAlmostEqual(result[2][1], expected_21, places=5)
self.assertAlmostEqual(result[2][2], expected_22, places=5)
self.assertAlmostEqual(result[2][3], expected_23, places=5)
self.assertAlmostEqual(result[3][0], expected_30, places=5)
self.assertAlmostEqual(result[3][1], expected_31, places=5)
self.assertAlmostEqual(result[3][2], expected_32, places=5)
self.assertAlmostEqual(result[3][3], expected_33, places=5)
result = wp.mul(wp.mat44h(*a_values), wp.float16(b_value))
self.assertNotAlmostEqual(result[0][0], expected_00, places=5)
self.assertNotAlmostEqual(result[0][1], expected_01, places=5)
self.assertNotAlmostEqual(result[0][2], expected_02, places=5)
self.assertNotAlmostEqual(result[0][3], expected_03, places=5)
self.assertNotAlmostEqual(result[1][0], expected_10, places=5)
self.assertNotAlmostEqual(result[1][1], expected_11, places=5)
self.assertNotAlmostEqual(result[1][2], expected_12, places=5)
self.assertNotAlmostEqual(result[1][3], expected_13, places=5)
self.assertNotAlmostEqual(result[2][0], expected_20, places=5)
self.assertNotAlmostEqual(result[2][1], expected_21, places=5)
self.assertNotAlmostEqual(result[2][2], expected_22, places=5)
self.assertNotAlmostEqual(result[2][3], expected_23, places=5)
self.assertNotAlmostEqual(result[3][0], expected_30, places=5)
self.assertNotAlmostEqual(result[3][1], expected_31, places=5)
self.assertNotAlmostEqual(result[3][2], expected_32, places=5)
self.assertNotAlmostEqual(result[3][3], expected_33, places=5)
self.assertAlmostEqual(result[0][0], expected_00, places=1)
self.assertAlmostEqual(result[0][1], expected_01, places=1)
self.assertAlmostEqual(result[0][2], expected_02, places=1)
self.assertAlmostEqual(result[0][3], expected_03, places=1)
self.assertAlmostEqual(result[1][0], expected_10, places=1)
self.assertAlmostEqual(result[1][1], expected_11, places=1)
self.assertAlmostEqual(result[1][2], expected_12, places=1)
self.assertAlmostEqual(result[1][3], expected_13, places=1)
self.assertAlmostEqual(result[2][0], expected_20, places=1)
self.assertAlmostEqual(result[2][1], expected_21, places=1)
self.assertAlmostEqual(result[2][2], expected_22, places=1)
self.assertAlmostEqual(result[2][3], expected_23, places=1)
self.assertAlmostEqual(result[3][0], expected_30, places=1)
self.assertAlmostEqual(result[3][1], expected_31, places=1)
self.assertAlmostEqual(result[3][2], expected_32, places=1)
self.assertAlmostEqual(result[3][3], expected_33, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.mul(a_values, b_value), wp.mul(wp.mat44f(*a_values), wp.float32(b_value)))
self.assertEqual(wp.mul(a_values_2d, b_value), wp.mul(wp.mat44f(*a_values), wp.float32(b_value)))
def test_vec2_arg_precision(self):
values = (1.23, 2.34)
expected = 2.64357712200722438922
result = wp.length(wp.vec2d(*values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.length(wp.vec2f(*values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.length(wp.vec2h(*values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.length(values), wp.length(wp.vec2f(*values)))
def test_vec2_arg_overflow(self):
values = (-1234567890, -1234567890)
self.assertEqual(wp.length_sq(wp.vec2b(*values)), -120)
self.assertEqual(wp.length_sq(wp.vec2s(*values)), -6008)
self.assertEqual(wp.length_sq(wp.vec2i(*values)), 608168072)
self.assertEqual(wp.length_sq(wp.vec2l(*values)), 3048315750038104200)
self.assertEqual(wp.length_sq(wp.vec2ub(*values)), 136)
self.assertEqual(wp.length_sq(wp.vec2us(*values)), 59528)
self.assertEqual(wp.length_sq(wp.vec2ui(*values)), 608168072)
self.assertEqual(wp.length_sq(wp.vec2ul(*values)), 3048315750038104200)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.length_sq(values), wp.length_sq(wp.vec2i(*values)))
def test_vec3_arg_precision(self):
values = (1.23, 2.34, 3.45)
expected = 4.34637780226247727455
result = wp.length(wp.vec3d(*values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.length(wp.vec3f(*values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.length(wp.vec3h(*values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.length(values), wp.length(wp.vec3f(*values)))
def test_vec3_arg_overflow(self):
values = (-1234567890, -1234567890, -1234567890)
self.assertEqual(wp.length_sq(wp.vec3b(*values)), -52)
self.assertEqual(wp.length_sq(wp.vec3s(*values)), -9012)
self.assertEqual(wp.length_sq(wp.vec3i(*values)), 912252108)
self.assertEqual(wp.length_sq(wp.vec3l(*values)), 4572473625057156300)
self.assertEqual(wp.length_sq(wp.vec3ub(*values)), 204)
self.assertEqual(wp.length_sq(wp.vec3us(*values)), 56524)
self.assertEqual(wp.length_sq(wp.vec3ui(*values)), 912252108)
self.assertEqual(wp.length_sq(wp.vec3ul(*values)), 4572473625057156300)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.length_sq(values), wp.length_sq(wp.vec3i(*values)))
def test_vec4_arg_precision(self):
values = (1.23, 2.34, 3.45, 4.56)
expected = 6.29957141399317777086
result = wp.length(wp.vec4d(*values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.length(wp.vec4f(*values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.length(wp.vec4h(*values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.length(values), wp.length(wp.vec4f(*values)))
def test_vec4_arg_overflow(self):
values = (-1234567890, -1234567890, -1234567890, -1234567890)
self.assertEqual(wp.length_sq(wp.vec4b(*values)), 16)
self.assertEqual(wp.length_sq(wp.vec4s(*values)), -12016)
self.assertEqual(wp.length_sq(wp.vec4i(*values)), 1216336144)
self.assertEqual(wp.length_sq(wp.vec4l(*values)), 6096631500076208400)
self.assertEqual(wp.length_sq(wp.vec4ub(*values)), 16)
self.assertEqual(wp.length_sq(wp.vec4us(*values)), 53520)
self.assertEqual(wp.length_sq(wp.vec4ui(*values)), 1216336144)
self.assertEqual(wp.length_sq(wp.vec4ul(*values)), 6096631500076208400)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.length_sq(values), wp.length_sq(wp.vec4i(*values)))
def test_vec2_vec2_args_precision(self):
a_values = (1.23, 2.34)
b_values = (3.45, 4.56)
expected = 14.91389999999999815827
result = wp.dot(wp.vec2d(*a_values), wp.vec2d(*b_values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.dot(wp.vec2f(*a_values), wp.vec2f(*b_values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.dot(wp.vec2h(*a_values), wp.vec2h(*b_values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.dot(a_values, b_values), wp.dot(wp.vec2f(*a_values), wp.vec2f(*b_values)))
def test_vec2_vec2_args_overflow(self):
values = (-1234567890, -1234567890)
self.assertEqual(wp.dot(wp.vec2b(*values), wp.vec2b(*values)), -120)
self.assertEqual(wp.dot(wp.vec2s(*values), wp.vec2s(*values)), -6008)
self.assertEqual(wp.dot(wp.vec2i(*values), wp.vec2i(*values)), 608168072)
self.assertEqual(wp.dot(wp.vec2l(*values), wp.vec2l(*values)), 3048315750038104200)
self.assertEqual(wp.dot(wp.vec2ub(*values), wp.vec2ub(*values)), 136)
self.assertEqual(wp.dot(wp.vec2us(*values), wp.vec2us(*values)), 59528)
self.assertEqual(wp.dot(wp.vec2ui(*values), wp.vec2ui(*values)), 608168072)
self.assertEqual(wp.dot(wp.vec2ul(*values), wp.vec2ul(*values)), 3048315750038104200)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.dot(values, values), wp.dot(wp.vec2i(*values), wp.vec2i(*values)))
def test_vec3_vec3_args_precision(self):
a_values = (1.23, 2.34, 3.45)
b_values = (4.56, 5.67, 6.78)
expected = 42.26760000000000161435
result = wp.dot(wp.vec3d(*a_values), wp.vec3d(*b_values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.dot(wp.vec3f(*a_values), wp.vec3f(*b_values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.dot(wp.vec3h(*a_values), wp.vec3h(*b_values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.dot(a_values, b_values), wp.dot(wp.vec3f(*a_values), wp.vec3f(*b_values)))
def test_vec3_vec3_args_overflow(self):
values = (-1234567890, -1234567890, -1234567890)
self.assertEqual(wp.dot(wp.vec3b(*values), wp.vec3b(*values)), -52)
self.assertEqual(wp.dot(wp.vec3s(*values), wp.vec3s(*values)), -9012)
self.assertEqual(wp.dot(wp.vec3i(*values), wp.vec3i(*values)), 912252108)
self.assertEqual(wp.dot(wp.vec3l(*values), wp.vec3l(*values)), 4572473625057156300)
self.assertEqual(wp.dot(wp.vec3ub(*values), wp.vec3ub(*values)), 204)
self.assertEqual(wp.dot(wp.vec3us(*values), wp.vec3us(*values)), 56524)
self.assertEqual(wp.dot(wp.vec3ui(*values), wp.vec3ui(*values)), 912252108)
self.assertEqual(wp.dot(wp.vec3ul(*values), wp.vec3ul(*values)), 4572473625057156300)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.dot(values, values), wp.dot(wp.vec3i(*values), wp.vec3i(*values)))
def test_vec4_vec4_args_precision(self):
a_values = (1.23, 2.34, 3.45, 4.56)
b_values = (5.67, 6.78, 7.89, 8.90)
expected = 90.64379999999999881766
result = wp.dot(wp.vec4d(*a_values), wp.vec4d(*b_values))
self.assertAlmostEqual(result, expected, places=12)
result = wp.dot(wp.vec4f(*a_values), wp.vec4f(*b_values))
self.assertNotAlmostEqual(result, expected, places=12)
self.assertAlmostEqual(result, expected, places=5)
result = wp.dot(wp.vec4h(*a_values), wp.vec4h(*b_values))
self.assertNotAlmostEqual(result, expected, places=5)
self.assertAlmostEqual(result, expected, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.dot(a_values, b_values), wp.dot(wp.vec4f(*a_values), wp.vec4f(*b_values)))
def test_vec4_vec4_args_overflow(self):
values = (-1234567890, -1234567890, -1234567890, -1234567890)
self.assertEqual(wp.dot(wp.vec4b(*values), wp.vec4b(*values)), 16)
self.assertEqual(wp.dot(wp.vec4s(*values), wp.vec4s(*values)), -12016)
self.assertEqual(wp.dot(wp.vec4i(*values), wp.vec4i(*values)), 1216336144)
self.assertEqual(wp.dot(wp.vec4l(*values), wp.vec4l(*values)), 6096631500076208400)
self.assertEqual(wp.dot(wp.vec4ub(*values), wp.vec4ub(*values)), 16)
self.assertEqual(wp.dot(wp.vec4us(*values), wp.vec4us(*values)), 53520)
self.assertEqual(wp.dot(wp.vec4ui(*values), wp.vec4ui(*values)), 1216336144)
self.assertEqual(wp.dot(wp.vec4ul(*values), wp.vec4ul(*values)), 6096631500076208400)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.dot(values, values), wp.dot(wp.vec4i(*values), wp.vec4i(*values)))
def test_vec2_float_args_precision(self):
a_values = (1.23, 2.34)
b_value = 3.45
expected_x = 4.24350000000000004974
expected_y = 8.07300000000000039790
result = wp.mul(wp.vec2d(*a_values), wp.float64(b_value))
self.assertAlmostEqual(result[0], expected_x, places=12)
self.assertAlmostEqual(result[1], expected_y, places=12)
result = wp.mul(wp.vec2f(*a_values), wp.float32(b_value))
self.assertNotAlmostEqual(result[0], expected_x, places=12)
self.assertNotAlmostEqual(result[1], expected_y, places=12)
self.assertAlmostEqual(result[0], expected_x, places=5)
self.assertAlmostEqual(result[1], expected_y, places=5)
result = wp.mul(wp.vec2h(*a_values), wp.float16(b_value))
self.assertNotAlmostEqual(result[0], expected_x, places=5)
self.assertNotAlmostEqual(result[1], expected_y, places=5)
self.assertAlmostEqual(result[0], expected_x, places=1)
self.assertAlmostEqual(result[1], expected_y, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.mul(a_values, b_value), wp.mul(wp.vec2f(*a_values), wp.float32(b_value)))
def test_vec3_float_args_precision(self):
a_values = (1.23, 2.34, 3.45)
b_value = 4.56
expected_x = 5.60879999999999956373
expected_y = 10.67039999999999899671
expected_z = 15.73199999999999931788
result = wp.mul(wp.vec3d(*a_values), wp.float64(b_value))
self.assertAlmostEqual(result[0], expected_x, places=12)
self.assertAlmostEqual(result[1], expected_y, places=12)
self.assertAlmostEqual(result[2], expected_z, places=12)
result = wp.mul(wp.vec3f(*a_values), wp.float32(b_value))
self.assertNotAlmostEqual(result[0], expected_x, places=12)
self.assertNotAlmostEqual(result[1], expected_y, places=12)
self.assertNotAlmostEqual(result[2], expected_z, places=12)
self.assertAlmostEqual(result[0], expected_x, places=5)
self.assertAlmostEqual(result[1], expected_y, places=5)
self.assertAlmostEqual(result[2], expected_z, places=5)
result = wp.mul(wp.vec3h(*a_values), wp.float16(b_value))
self.assertNotAlmostEqual(result[0], expected_x, places=5)
self.assertNotAlmostEqual(result[1], expected_y, places=5)
self.assertNotAlmostEqual(result[2], expected_z, places=5)
self.assertAlmostEqual(result[0], expected_x, places=1)
self.assertAlmostEqual(result[1], expected_y, places=1)
self.assertAlmostEqual(result[2], expected_z, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.mul(a_values, b_value), wp.mul(wp.vec3f(*a_values), wp.float32(b_value)))
def test_vec4_float_args_precision(self):
a_values = (1.23, 2.34, 3.45, 4.56)
b_value = 5.67
expected_x = 6.97409999999999996589
expected_y = 13.26779999999999937188
expected_z = 19.56150000000000233058
expected_w = 25.85519999999999640750
result = wp.mul(wp.vec4d(*a_values), wp.float64(b_value))
self.assertAlmostEqual(result[0], expected_x, places=12)
self.assertAlmostEqual(result[1], expected_y, places=12)
self.assertAlmostEqual(result[2], expected_z, places=12)
self.assertAlmostEqual(result[3], expected_w, places=12)
result = wp.mul(wp.vec4f(*a_values), wp.float32(b_value))
self.assertNotAlmostEqual(result[0], expected_x, places=12)
self.assertNotAlmostEqual(result[1], expected_y, places=12)
self.assertNotAlmostEqual(result[2], expected_z, places=12)
self.assertNotAlmostEqual(result[3], expected_w, places=12)
self.assertAlmostEqual(result[0], expected_x, places=5)
self.assertAlmostEqual(result[1], expected_y, places=5)
self.assertAlmostEqual(result[2], expected_z, places=5)
self.assertAlmostEqual(result[3], expected_w, places=5)
result = wp.mul(wp.vec4h(*a_values), wp.float16(b_value))
self.assertNotAlmostEqual(result[0], expected_x, places=5)
self.assertNotAlmostEqual(result[1], expected_y, places=5)
self.assertNotAlmostEqual(result[2], expected_z, places=5)
self.assertNotAlmostEqual(result[3], expected_w, places=5)
self.assertAlmostEqual(result[0], expected_x, places=1)
self.assertAlmostEqual(result[1], expected_y, places=1)
self.assertAlmostEqual(result[2], expected_z, places=1)
self.assertAlmostEqual(result[3], expected_w, places=1)
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
self.assertEqual(wp.mul(a_values, b_value), wp.mul(wp.vec4f(*a_values), wp.float32(b_value)))
for dtype in wp.types.int_types:
add_function_test(
TestBuiltinsResolution,
f"test_int_arg_support_{dtype.__name__}",
test_int_arg_support,
dtype=dtype,
)
add_function_test(
TestBuiltinsResolution,
f"test_int_int_args_support_{dtype.__name__}",
test_int_int_args_support,
dtype=dtype,
)
for dtype in wp.types.float_types:
add_function_test(
TestBuiltinsResolution,
f"test_float_arg_support_{dtype.__name__}",
test_float_arg_support,
dtype=dtype,
)
add_function_test(
TestBuiltinsResolution,
f"test_mat_arg_support_{dtype.__name__}",
test_mat_arg_support,
dtype=dtype,
)
add_function_test(
TestBuiltinsResolution,
f"test_mat_mat_args_support_{dtype.__name__}",
test_mat_mat_args_support,
dtype=dtype,
)
add_function_test(
TestBuiltinsResolution,
f"test_mat_float_args_support_{dtype.__name__}",
test_mat_float_args_support,
dtype=dtype,
)
add_function_test(
TestBuiltinsResolution,
f"test_vec_arg_support_{dtype.__name__}",
test_vec_arg_support,
dtype=dtype,
)
add_function_test(
TestBuiltinsResolution,
f"test_vec_vec_args_support_{dtype.__name__}",
test_vec_vec_args_support,
dtype=dtype,
)
add_function_test(
TestBuiltinsResolution,
f"test_vec_float_args_support_{dtype.__name__}",
test_vec_float_args_support,
dtype=dtype,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 63,995 | Python | 48.570875 | 120 | 0.632503 |
NVIDIA/warp/warp/tests/test_hash_grid.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
num_points = 4096
dim_x = 128
dim_y = 128
dim_z = 128
scale = 150.0
cell_radius = 8.0
query_radius = 8.0
num_runs = 4
print_enabled = False
@wp.kernel
def count_neighbors(grid: wp.uint64, radius: float, points: wp.array(dtype=wp.vec3), counts: wp.array(dtype=int)):
tid = wp.tid()
# order threads by cell
i = wp.hash_grid_point_id(grid, tid)
# query point
p = points[i]
count = int(0)
# construct query around point p
for index in wp.hash_grid_query(grid, p, radius):
# compute distance to point
d = wp.length(p - points[index])
if d <= radius:
count += 1
counts[i] = count
@wp.kernel
def count_neighbors_reference(
radius: float, points: wp.array(dtype=wp.vec3), counts: wp.array(dtype=int), num_points: int
):
tid = wp.tid()
i = tid % num_points
j = tid // num_points
# query point
p = points[i]
q = points[j]
# compute distance to point
d = wp.length(p - q)
if d <= radius:
wp.atomic_add(counts, i, 1)
def particle_grid(dim_x, dim_y, dim_z, lower, radius, jitter):
rng = np.random.default_rng(123)
points = np.meshgrid(np.linspace(0, dim_x, dim_x), np.linspace(0, dim_y, dim_y), np.linspace(0, dim_z, dim_z))
points_t = np.array((points[0], points[1], points[2])).T * radius * 2.0 + np.array(lower)
points_t = points_t + rng.random(size=points_t.shape) * radius * jitter
return points_t.reshape((-1, 3))
def test_hashgrid_query(test, device):
wp.load_module(device=device)
grid = wp.HashGrid(dim_x, dim_y, dim_z, device)
for i in range(num_runs):
if print_enabled:
print(f"Run: {i+1}")
print("---------")
points = particle_grid(16, 32, 16, (0.0, 0.3, 0.0), cell_radius * 0.25, 0.1)
points_arr = wp.array(points, dtype=wp.vec3, device=device)
counts_arr = wp.zeros(len(points), dtype=int, device=device)
counts_arr_ref = wp.zeros(len(points), dtype=int, device=device)
profiler = {}
with wp.ScopedTimer("grid operations", print=print_enabled, dict=profiler, synchronize=True):
with wp.ScopedTimer("brute", print=print_enabled, dict=profiler, synchronize=True):
wp.launch(
kernel=count_neighbors_reference,
dim=len(points) * len(points),
inputs=[query_radius, points_arr, counts_arr_ref, len(points)],
device=device,
)
wp.synchronize_device(device)
with wp.ScopedTimer("grid build", print=print_enabled, dict=profiler, synchronize=True):
grid.build(points_arr, cell_radius)
with wp.ScopedTimer("grid query", print=print_enabled, dict=profiler, synchronize=True):
wp.launch(
kernel=count_neighbors,
dim=len(points),
inputs=[grid.id, query_radius, points_arr, counts_arr],
device=device,
)
counts = counts_arr.numpy()
counts_ref = counts_arr_ref.numpy()
if print_enabled:
print(f"Grid min: {np.min(counts)} max: {np.max(counts)} avg: {np.mean(counts)}")
print(f"Ref min: {np.min(counts_ref)} max: {np.max(counts_ref)} avg: {np.mean(counts_ref)}")
print(f"Passed: {np.array_equal(counts, counts_ref)}")
assert_np_equal(counts, counts_ref)
def test_hashgrid_inputs(test, device):
points = particle_grid(16, 32, 16, (0.0, 0.3, 0.0), cell_radius * 0.25, 0.1)
points_ref = wp.array(points, dtype=wp.vec3, device=device)
counts_ref = wp.zeros(len(points), dtype=int, device=device)
grid = wp.HashGrid(dim_x, dim_y, dim_z, device)
grid.build(points_ref, cell_radius)
# get reference counts
wp.launch(
kernel=count_neighbors, dim=len(points), inputs=[grid.id, query_radius, points_ref, counts_ref], device=device
)
# test with strided 1d input arrays
for stride in [2, 3]:
with test.subTest(msg=f"stride_{stride}"):
points_buffer = wp.zeros(len(points) * stride, dtype=wp.vec3, device=device)
points_strided = points_buffer[::stride]
wp.copy(points_strided, points_ref)
counts_strided = wp.zeros(len(points), dtype=int, device=device)
grid = wp.HashGrid(dim_x, dim_y, dim_z, device)
grid.build(points_strided, cell_radius)
wp.launch(
kernel=count_neighbors,
dim=len(points),
inputs=[grid.id, query_radius, points_ref, counts_strided],
device=device,
)
assert_array_equal(counts_strided, counts_ref)
# test with multidimensional input arrays
for ndim in [2, 3, 4]:
with test.subTest(msg=f"ndim_{ndim}"):
shape = (len(points) // (2 ** (ndim - 1)), *((ndim - 1) * (2,)))
points_ndim = wp.zeros(shape, dtype=wp.vec3, device=device)
wp.copy(points_ndim, points_ref)
counts_ndim = wp.zeros(len(points), dtype=int, device=device)
grid = wp.HashGrid(dim_x, dim_y, dim_z, device)
grid.build(points_ndim, cell_radius)
wp.launch(
kernel=count_neighbors,
dim=len(points),
inputs=[grid.id, query_radius, points_ref, counts_ndim],
device=device,
)
assert_array_equal(counts_ndim, counts_ref)
devices = get_test_devices()
class TestHashGrid(unittest.TestCase):
def test_hashgrid_codegen_adjoints_with_select(self):
def kernel_fn(grid: wp.uint64):
v = wp.vec3(0.0, 0.0, 0.0)
if True:
query = wp.hash_grid_query(grid, v, 0.0)
else:
query = wp.hash_grid_query(grid, v, 0.0)
wp.Kernel(func=kernel_fn)
add_function_test(TestHashGrid, "test_hashgrid_query", test_hashgrid_query, devices=devices)
add_function_test(TestHashGrid, "test_hashgrid_inputs", test_hashgrid_inputs, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 6,741 | Python | 31.258373 | 118 | 0.598873 |
NVIDIA/warp/warp/tests/test_peer.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
def get_device_pair_with_peer_access_support():
devices = wp.get_cuda_devices()
for target_device in devices:
for peer_device in devices:
if target_device != peer_device:
if wp.is_peer_access_supported(target_device, peer_device):
return (target_device, peer_device)
return None
def get_device_pair_without_peer_access_support():
devices = wp.get_cuda_devices()
for target_device in devices:
for peer_device in devices:
if target_device != peer_device:
if not wp.is_peer_access_supported(target_device, peer_device):
return (target_device, peer_device)
return None
def test_peer_access_self(test, device):
device = wp.get_device(device)
assert device.is_cuda
# device can access self
can_access = wp.is_peer_access_supported(device, device)
test.assertTrue(can_access)
# setting peer access to self is a no-op
wp.set_peer_access_enabled(device, device, True)
wp.set_peer_access_enabled(device, device, False)
# should always be enabled
enabled = wp.is_peer_access_enabled(device, device)
test.assertTrue(enabled)
@unittest.skipUnless(get_device_pair_with_peer_access_support(), "Requires devices with peer access support")
def test_peer_access(test, _):
target_device, peer_device = get_device_pair_with_peer_access_support()
was_enabled = wp.is_peer_access_enabled(target_device, peer_device)
if was_enabled:
# try disabling
wp.set_peer_access_enabled(target_device, peer_device, False)
is_enabled = wp.is_peer_access_enabled(target_device, peer_device)
test.assertFalse(is_enabled)
# try re-enabling
wp.set_peer_access_enabled(target_device, peer_device, True)
is_enabled = wp.is_peer_access_enabled(target_device, peer_device)
test.assertTrue(is_enabled)
else:
# try enabling
wp.set_peer_access_enabled(target_device, peer_device, True)
is_enabled = wp.is_peer_access_enabled(target_device, peer_device)
test.assertTrue(is_enabled)
# try re-disabling
wp.set_peer_access_enabled(target_device, peer_device, False)
is_enabled = wp.is_peer_access_enabled(target_device, peer_device)
test.assertFalse(is_enabled)
@unittest.skipUnless(get_device_pair_without_peer_access_support(), "Requires devices without peer access support")
def test_peer_access_exceptions_unsupported(test, _):
# get a CUDA device pair without peer access support
target_device, peer_device = get_device_pair_without_peer_access_support()
# querying is ok, but must return False
test.assertFalse(wp.is_peer_access_enabled(target_device, peer_device))
# enabling should raise RuntimeError
with test.assertRaises(RuntimeError):
wp.set_peer_access_enabled(target_device, peer_device, True)
# disabling should not raise an error
wp.set_peer_access_enabled(target_device, peer_device, False)
@unittest.skipUnless(wp.is_cpu_available() and wp.is_cuda_available(), "Requires both CUDA and CPU devices")
def test_peer_access_exceptions_cpu(test, _):
# querying is ok, but must return False
test.assertFalse(wp.is_peer_access_enabled("cuda:0", "cpu"))
test.assertFalse(wp.is_peer_access_enabled("cpu", "cuda:0"))
# enabling should raise ValueError
with test.assertRaises(ValueError):
wp.set_peer_access_enabled("cpu", "cuda:0", True)
with test.assertRaises(ValueError):
wp.set_peer_access_enabled("cuda:0", "cpu", True)
# disabling should not raise an error
wp.set_peer_access_enabled("cpu", "cuda:0", False)
wp.set_peer_access_enabled("cuda:0", "cpu", False)
class TestPeer(unittest.TestCase):
pass
cuda_test_devices = get_cuda_test_devices()
add_function_test(TestPeer, "test_peer_access_self", test_peer_access_self, devices=cuda_test_devices)
# peer access tests
add_function_test(TestPeer, "test_peer_access", test_peer_access)
# peer access exceptions
add_function_test(TestPeer, "test_peer_access_exceptions_unsupported", test_peer_access_exceptions_unsupported)
add_function_test(TestPeer, "test_peer_access_exceptions_cpu", test_peer_access_exceptions_cpu)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 4,873 | Python | 35.924242 | 115 | 0.704699 |
NVIDIA/warp/warp/tests/test_fem.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
import numpy as np
import warp as wp
import warp.fem as fem
from warp.fem import Coords, D, Domain, Field, Sample, curl, div, grad, integrand, normal
from warp.fem.cache import dynamic_kernel
from warp.fem.geometry import DeformedGeometry
from warp.fem.geometry.closest_point import project_on_tet_at_origin, project_on_tri_at_origin
from warp.fem.space import shape
from warp.fem.types import make_free_sample
from warp.fem.utils import grid_to_hexes, grid_to_quads, grid_to_tets, grid_to_tris
from warp.tests.unittest_utils import *
@integrand
def linear_form(s: Sample, u: Field):
return u(s)
def test_integrate_gradient(test, device):
with wp.ScopedDevice(device):
# Grid geometry
geo = fem.Grid2D(res=wp.vec2i(5))
# Domain and function spaces
domain = fem.Cells(geometry=geo)
quadrature = fem.RegularQuadrature(domain=domain, order=3)
scalar_space = fem.make_polynomial_space(geo, degree=3)
u = scalar_space.make_field()
u.dof_values = wp.zeros_like(u.dof_values, requires_grad=True)
result = wp.empty(dtype=wp.float64, shape=(1), requires_grad=True)
tape = wp.Tape()
# forward pass
with tape:
fem.integrate(linear_form, quadrature=quadrature, fields={"u": u}, output=result)
tape.backward(result)
test_field = fem.make_test(space=scalar_space, domain=domain)
rhs = fem.integrate(linear_form, quadrature=quadrature, fields={"u": test_field})
err = np.linalg.norm(rhs.numpy() - u.dof_values.grad.numpy())
test.assertLess(err, 1.0e-8)
@fem.integrand
def bilinear_field(s: fem.Sample, domain: fem.Domain):
x = domain(s)
return x[0] * x[1]
@fem.integrand
def grad_field(s: fem.Sample, p: fem.Field):
return fem.grad(p, s)
def test_interpolate_gradient(test, device):
with wp.ScopedDevice(device):
# Quad mesh with single element
# so we can test gradient with respect to vertex positions
positions = wp.array([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0]], dtype=wp.vec2, requires_grad=True)
quads = wp.array([[0, 2, 3, 1]], dtype=int)
geo = fem.Quadmesh2D(quads, positions)
# Quadratic scalar space
scalar_space = fem.make_polynomial_space(geo, degree=2)
# Point-based vector space
# So we can test gradient with respect to inteprolation point position
point_coords = wp.array([[[0.5, 0.5, 0.0]]], dtype=fem.Coords, requires_grad=True)
interpolation_nodes = fem.PointBasisSpace(
fem.ExplicitQuadrature(domain=fem.Cells(geo), points=point_coords, weights=wp.array([[1.0]], dtype=float))
)
vector_space = fem.make_collocated_function_space(interpolation_nodes, dtype=wp.vec2)
# Initialize scalar field with known function
scalar_field = scalar_space.make_field()
scalar_field.dof_values.requires_grad = True
fem.interpolate(bilinear_field, dest=scalar_field)
# Interpolate gradient at center point
vector_field = vector_space.make_field()
vector_field.dof_values.requires_grad = True
vector_field_restriction = fem.make_restriction(vector_field)
tape = wp.Tape()
with tape:
fem.interpolate(
grad_field,
dest=vector_field_restriction,
fields={"p": scalar_field},
kernel_options={"enable_backward": True},
)
assert_np_equal(vector_field.dof_values.numpy(), np.array([[1.0, 1.0]]))
vector_field.dof_values.grad.assign([1.0, 0.0])
tape.backward()
assert_np_equal(scalar_field.dof_values.grad.numpy(), np.array([0.0, 0.0, 0.0, 0.0, 0.0, -0.5, 0.0, 0.5, 0.0]))
assert_np_equal(
geo.positions.grad.numpy(),
np.array(
[
[0.25, 0.25],
[0.25, 0.25],
[-0.25, -0.25],
[-0.25, -0.25],
]
),
)
assert_np_equal(point_coords.grad.numpy(), np.array([[[0.0, 2.0, 0.0]]]))
tape.zero()
scalar_field.dof_values.grad.zero_()
geo.positions.grad.zero_()
point_coords.grad.zero_()
vector_field.dof_values.grad.assign([0.0, 1.0])
tape.backward()
assert_np_equal(scalar_field.dof_values.grad.numpy(), np.array([0.0, 0.0, 0.0, 0.0, -0.5, 0.0, 0.5, 0.0, 0.0]))
assert_np_equal(
geo.positions.grad.numpy(),
np.array(
[
[0.25, 0.25],
[-0.25, -0.25],
[0.25, 0.25],
[-0.25, -0.25],
]
),
)
assert_np_equal(point_coords.grad.numpy(), np.array([[[2.0, 0.0, 0.0]]]))
@integrand
def vector_divergence_form(s: Sample, u: Field, q: Field):
return div(u, s) * q(s)
@integrand
def vector_grad_form(s: Sample, u: Field, q: Field):
return wp.dot(u(s), grad(q, s))
@integrand
def vector_boundary_form(domain: Domain, s: Sample, u: Field, q: Field):
return wp.dot(u(s) * q(s), normal(domain, s))
def test_vector_divergence_theorem(test, device):
rng = np.random.default_rng(123)
with wp.ScopedDevice(device):
# Grid geometry
geo = fem.Grid2D(res=wp.vec2i(5))
# Domain and function spaces
interior = fem.Cells(geometry=geo)
boundary = fem.BoundarySides(geometry=geo)
vector_space = fem.make_polynomial_space(geo, degree=2, dtype=wp.vec2)
scalar_space = fem.make_polynomial_space(geo, degree=1, dtype=float)
u = vector_space.make_field()
u.dof_values = rng.random(size=(u.dof_values.shape[0], 2))
# Divergence theorem
constant_one = scalar_space.make_field()
constant_one.dof_values.fill_(1.0)
interior_quadrature = fem.RegularQuadrature(domain=interior, order=vector_space.degree)
boundary_quadrature = fem.RegularQuadrature(domain=boundary, order=vector_space.degree)
div_int = fem.integrate(
vector_divergence_form,
quadrature=interior_quadrature,
fields={"u": u, "q": constant_one},
kernel_options={"enable_backward": False},
)
boundary_int = fem.integrate(
vector_boundary_form,
quadrature=boundary_quadrature,
fields={"u": u.trace(), "q": constant_one.trace()},
kernel_options={"enable_backward": False},
)
test.assertAlmostEqual(div_int, boundary_int, places=5)
# Integration by parts
q = scalar_space.make_field()
q.dof_values = rng.random(size=q.dof_values.shape[0])
interior_quadrature = fem.RegularQuadrature(domain=interior, order=vector_space.degree + scalar_space.degree)
boundary_quadrature = fem.RegularQuadrature(domain=boundary, order=vector_space.degree + scalar_space.degree)
div_int = fem.integrate(
vector_divergence_form,
quadrature=interior_quadrature,
fields={"u": u, "q": q},
kernel_options={"enable_backward": False},
)
grad_int = fem.integrate(
vector_grad_form,
quadrature=interior_quadrature,
fields={"u": u, "q": q},
kernel_options={"enable_backward": False},
)
boundary_int = fem.integrate(
vector_boundary_form,
quadrature=boundary_quadrature,
fields={"u": u.trace(), "q": q.trace()},
kernel_options={"enable_backward": False},
)
test.assertAlmostEqual(div_int + grad_int, boundary_int, places=5)
@integrand
def tensor_divergence_form(s: Sample, tau: Field, v: Field):
return wp.dot(div(tau, s), v(s))
@integrand
def tensor_grad_form(s: Sample, tau: Field, v: Field):
return wp.ddot(wp.transpose(tau(s)), grad(v, s))
@integrand
def tensor_boundary_form(domain: Domain, s: Sample, tau: Field, v: Field):
return wp.dot(tau(s) * v(s), normal(domain, s))
def test_tensor_divergence_theorem(test, device):
rng = np.random.default_rng(123)
with wp.ScopedDevice(device):
# Grid geometry
geo = fem.Grid2D(res=wp.vec2i(5))
# Domain and function spaces
interior = fem.Cells(geometry=geo)
boundary = fem.BoundarySides(geometry=geo)
tensor_space = fem.make_polynomial_space(geo, degree=2, dtype=wp.mat22)
vector_space = fem.make_polynomial_space(geo, degree=1, dtype=wp.vec2)
tau = tensor_space.make_field()
tau.dof_values = rng.random(size=(tau.dof_values.shape[0], 2, 2))
# Divergence theorem
constant_vec = vector_space.make_field()
constant_vec.dof_values.fill_(wp.vec2(0.5, 2.0))
interior_quadrature = fem.RegularQuadrature(domain=interior, order=tensor_space.degree)
boundary_quadrature = fem.RegularQuadrature(domain=boundary, order=tensor_space.degree)
div_int = fem.integrate(
tensor_divergence_form,
quadrature=interior_quadrature,
fields={"tau": tau, "v": constant_vec},
kernel_options={"enable_backward": False},
)
boundary_int = fem.integrate(
tensor_boundary_form,
quadrature=boundary_quadrature,
fields={"tau": tau.trace(), "v": constant_vec.trace()},
kernel_options={"enable_backward": False},
)
test.assertAlmostEqual(div_int, boundary_int, places=5)
# Integration by parts
v = vector_space.make_field()
v.dof_values = rng.random(size=(v.dof_values.shape[0], 2))
interior_quadrature = fem.RegularQuadrature(domain=interior, order=tensor_space.degree + vector_space.degree)
boundary_quadrature = fem.RegularQuadrature(domain=boundary, order=tensor_space.degree + vector_space.degree)
div_int = fem.integrate(
tensor_divergence_form,
quadrature=interior_quadrature,
fields={"tau": tau, "v": v},
kernel_options={"enable_backward": False},
)
grad_int = fem.integrate(
tensor_grad_form,
quadrature=interior_quadrature,
fields={"tau": tau, "v": v},
kernel_options={"enable_backward": False},
)
boundary_int = fem.integrate(
tensor_boundary_form,
quadrature=boundary_quadrature,
fields={"tau": tau.trace(), "v": v.trace()},
kernel_options={"enable_backward": False},
)
test.assertAlmostEqual(div_int + grad_int, boundary_int, places=5)
@integrand
def grad_decomposition(s: Sample, u: Field, v: Field):
return wp.length_sq(grad(u, s) * v(s) - D(u, s) * v(s) - wp.cross(curl(u, s), v(s)))
def test_grad_decomposition(test, device):
rng = np.random.default_rng(123)
with wp.ScopedDevice(device):
# Grid geometry
geo = fem.Grid3D(res=wp.vec3i(5))
# Domain and function spaces
domain = fem.Cells(geometry=geo)
quadrature = fem.RegularQuadrature(domain=domain, order=4)
vector_space = fem.make_polynomial_space(geo, degree=2, dtype=wp.vec3)
u = vector_space.make_field()
u.dof_values = rng.random(size=(u.dof_values.shape[0], 3))
err = fem.integrate(grad_decomposition, quadrature=quadrature, fields={"u": u, "v": u})
test.assertLess(err, 1.0e-8)
def _gen_trimesh(N):
x = np.linspace(0.0, 1.0, N + 1)
y = np.linspace(0.0, 1.0, N + 1)
positions = np.transpose(np.meshgrid(x, y, indexing="ij")).reshape(-1, 2)
vidx = grid_to_tris(N, N)
return wp.array(positions, dtype=wp.vec2), wp.array(vidx, dtype=int)
def _gen_quadmesh(N):
x = np.linspace(0.0, 1.0, N + 1)
y = np.linspace(0.0, 1.0, N + 1)
positions = np.transpose(np.meshgrid(x, y, indexing="ij")).reshape(-1, 2)
vidx = grid_to_quads(N, N)
return wp.array(positions, dtype=wp.vec2), wp.array(vidx, dtype=int)
def _gen_tetmesh(N):
x = np.linspace(0.0, 1.0, N + 1)
y = np.linspace(0.0, 1.0, N + 1)
z = np.linspace(0.0, 1.0, N + 1)
positions = np.transpose(np.meshgrid(x, y, z, indexing="ij")).reshape(-1, 3)
vidx = grid_to_tets(N, N, N)
return wp.array(positions, dtype=wp.vec3), wp.array(vidx, dtype=int)
def _gen_hexmesh(N):
x = np.linspace(0.0, 1.0, N + 1)
y = np.linspace(0.0, 1.0, N + 1)
z = np.linspace(0.0, 1.0, N + 1)
positions = np.transpose(np.meshgrid(x, y, z, indexing="ij")).reshape(-1, 3)
vidx = grid_to_hexes(N, N, N)
return wp.array(positions, dtype=wp.vec3), wp.array(vidx, dtype=int)
def _launch_test_geometry_kernel(geo: fem.Geometry, device):
@dynamic_kernel(suffix=geo.name, kernel_options={"enable_backward": False})
def test_geo_cells_kernel(
cell_arg: geo.CellArg,
qps: wp.array(dtype=Coords),
qp_weights: wp.array(dtype=float),
cell_measures: wp.array(dtype=float),
):
cell_index, q = wp.tid()
coords = qps[q]
s = make_free_sample(cell_index, coords)
wp.atomic_add(cell_measures, cell_index, geo.cell_measure(cell_arg, s) * qp_weights[q])
REF_MEASURE = geo.reference_side().measure()
@dynamic_kernel(suffix=geo.name, kernel_options={"enable_backward": False, "max_unroll": 1})
def test_geo_sides_kernel(
side_arg: geo.SideArg,
qps: wp.array(dtype=Coords),
qp_weights: wp.array(dtype=float),
side_measures: wp.array(dtype=float),
):
side_index, q = wp.tid()
coords = qps[q]
s = make_free_sample(side_index, coords)
cell_arg = geo.side_to_cell_arg(side_arg)
inner_cell_index = geo.side_inner_cell_index(side_arg, side_index)
outer_cell_index = geo.side_outer_cell_index(side_arg, side_index)
inner_cell_coords = geo.side_inner_cell_coords(side_arg, side_index, coords)
outer_cell_coords = geo.side_outer_cell_coords(side_arg, side_index, coords)
inner_s = make_free_sample(inner_cell_index, inner_cell_coords)
outer_s = make_free_sample(outer_cell_index, outer_cell_coords)
pos_side = geo.side_position(side_arg, s)
pos_inner = geo.cell_position(cell_arg, inner_s)
pos_outer = geo.cell_position(cell_arg, outer_s)
for k in range(type(pos_side).length):
wp.expect_near(pos_side[k], pos_inner[k], 0.0001)
wp.expect_near(pos_side[k], pos_outer[k], 0.0001)
inner_side_coords = geo.side_from_cell_coords(side_arg, side_index, inner_cell_index, inner_cell_coords)
outer_side_coords = geo.side_from_cell_coords(side_arg, side_index, outer_cell_index, outer_cell_coords)
wp.expect_near(coords, inner_side_coords, 0.0001)
wp.expect_near(coords, outer_side_coords, 0.0001)
area = geo.side_measure(side_arg, s)
wp.atomic_add(side_measures, side_index, area * qp_weights[q])
# test consistency of side normal, measure, and deformation gradient
F = geo.side_deformation_gradient(side_arg, s)
F_det = DeformedGeometry._side_measure(F)
wp.expect_near(F_det * REF_MEASURE, area)
nor = geo.side_normal(side_arg, s)
F_cross = DeformedGeometry._side_normal(F)
for k in range(type(pos_side).length):
wp.expect_near(F_cross[k], nor[k], 0.0001)
cell_measures = wp.zeros(dtype=float, device=device, shape=geo.cell_count())
cell_quadrature = fem.RegularQuadrature(fem.Cells(geo), order=2)
cell_qps = wp.array(cell_quadrature.points, dtype=Coords, device=device)
cell_qp_weights = wp.array(cell_quadrature.weights, dtype=float, device=device)
wp.launch(
kernel=test_geo_cells_kernel,
dim=(geo.cell_count(), cell_qps.shape[0]),
inputs=[geo.cell_arg_value(device), cell_qps, cell_qp_weights, cell_measures],
device=device,
)
side_measures = wp.zeros(dtype=float, device=device, shape=geo.side_count())
side_quadrature = fem.RegularQuadrature(fem.Sides(geo), order=2)
side_qps = wp.array(side_quadrature.points, dtype=Coords, device=device)
side_qp_weights = wp.array(side_quadrature.weights, dtype=float, device=device)
wp.launch(
kernel=test_geo_sides_kernel,
dim=(geo.side_count(), side_qps.shape[0]),
inputs=[geo.side_arg_value(device), side_qps, side_qp_weights, side_measures],
device=device,
)
return side_measures, cell_measures
def test_grid_2d(test, device):
N = 3
geo = fem.Grid2D(res=wp.vec2i(N))
test.assertEqual(geo.cell_count(), N**2)
test.assertEqual(geo.vertex_count(), (N + 1) ** 2)
test.assertEqual(geo.side_count(), 2 * (N + 1) * N)
test.assertEqual(geo.boundary_side_count(), 4 * N)
side_measures, cell_measures = _launch_test_geometry_kernel(geo, device)
assert_np_equal(side_measures.numpy(), np.full(side_measures.shape, 1.0 / (N)), tol=1.0e-4)
assert_np_equal(cell_measures.numpy(), np.full(cell_measures.shape, 1.0 / (N**2)), tol=1.0e-4)
def test_triangle_mesh(test, device):
N = 3
with wp.ScopedDevice(device):
positions, tri_vidx = _gen_trimesh(N)
geo = fem.Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
test.assertEqual(geo.cell_count(), 2 * (N) ** 2)
test.assertEqual(geo.vertex_count(), (N + 1) ** 2)
test.assertEqual(geo.side_count(), 2 * (N + 1) * N + (N**2))
test.assertEqual(geo.boundary_side_count(), 4 * N)
side_measures, cell_measures = _launch_test_geometry_kernel(geo, device)
assert_np_equal(cell_measures.numpy(), np.full(cell_measures.shape, 0.5 / (N**2)), tol=1.0e-4)
test.assertAlmostEqual(np.sum(side_measures.numpy()), 2 * (N + 1) + N * math.sqrt(2.0), places=4)
def test_quad_mesh(test, device):
N = 3
with wp.ScopedDevice(device):
positions, quad_vidx = _gen_quadmesh(N)
geo = fem.Quadmesh2D(quad_vertex_indices=quad_vidx, positions=positions)
test.assertEqual(geo.cell_count(), N**2)
test.assertEqual(geo.vertex_count(), (N + 1) ** 2)
test.assertEqual(geo.side_count(), 2 * (N + 1) * N)
test.assertEqual(geo.boundary_side_count(), 4 * N)
side_measures, cell_measures = _launch_test_geometry_kernel(geo, device)
assert_np_equal(side_measures.numpy(), np.full(side_measures.shape, 1.0 / (N)), tol=1.0e-4)
assert_np_equal(cell_measures.numpy(), np.full(cell_measures.shape, 1.0 / (N**2)), tol=1.0e-4)
def test_grid_3d(test, device):
N = 3
geo = fem.Grid3D(res=wp.vec3i(N))
test.assertEqual(geo.cell_count(), (N) ** 3)
test.assertEqual(geo.vertex_count(), (N + 1) ** 3)
test.assertEqual(geo.side_count(), 3 * (N + 1) * N**2)
test.assertEqual(geo.boundary_side_count(), 6 * N * N)
test.assertEqual(geo.edge_count(), 3 * N * (N + 1) ** 2)
side_measures, cell_measures = _launch_test_geometry_kernel(geo, device)
assert_np_equal(side_measures.numpy(), np.full(side_measures.shape, 1.0 / (N**2)), tol=1.0e-4)
assert_np_equal(cell_measures.numpy(), np.full(cell_measures.shape, 1.0 / (N**3)), tol=1.0e-4)
def test_tet_mesh(test, device):
N = 3
with wp.ScopedDevice(device):
positions, tet_vidx = _gen_tetmesh(N)
geo = fem.Tetmesh(tet_vertex_indices=tet_vidx, positions=positions)
test.assertEqual(geo.cell_count(), 5 * (N) ** 3)
test.assertEqual(geo.vertex_count(), (N + 1) ** 3)
test.assertEqual(geo.side_count(), 6 * (N + 1) * N**2 + (N**3) * 4)
test.assertEqual(geo.boundary_side_count(), 12 * N * N)
test.assertEqual(geo.edge_count(), 3 * N * (N + 1) * (2 * N + 1))
side_measures, cell_measures = _launch_test_geometry_kernel(geo, device)
test.assertAlmostEqual(np.sum(cell_measures.numpy()), 1.0, places=4)
test.assertAlmostEqual(np.sum(side_measures.numpy()), 0.5 * 6 * (N + 1) + N * 2 * math.sqrt(3.0), places=4)
def test_hex_mesh(test, device):
N = 3
with wp.ScopedDevice(device):
positions, tet_vidx = _gen_hexmesh(N)
geo = fem.Hexmesh(hex_vertex_indices=tet_vidx, positions=positions)
test.assertEqual(geo.cell_count(), (N) ** 3)
test.assertEqual(geo.vertex_count(), (N + 1) ** 3)
test.assertEqual(geo.side_count(), 3 * (N + 1) * N**2)
test.assertEqual(geo.boundary_side_count(), 6 * N * N)
test.assertEqual(geo.edge_count(), 3 * N * (N + 1) ** 2)
side_measures, cell_measures = _launch_test_geometry_kernel(geo, device)
assert_np_equal(side_measures.numpy(), np.full(side_measures.shape, 1.0 / (N**2)), tol=1.0e-4)
assert_np_equal(cell_measures.numpy(), np.full(cell_measures.shape, 1.0 / (N**3)), tol=1.0e-4)
def test_nanogrid(test, device):
N = 8
points = wp.array([[0.5, 0.5, 0.5]], dtype=float, device=device)
volume = wp.Volume.allocate_by_tiles(
tile_points=points, voxel_size=1.0 / N, translation=(0.0, 0.0, 0.0), bg_value=None, device=device
)
geo = fem.Nanogrid(volume)
test.assertEqual(geo.cell_count(), (N) ** 3)
test.assertEqual(geo.vertex_count(), (N + 1) ** 3)
test.assertEqual(geo.side_count(), 3 * (N + 1) * N**2)
test.assertEqual(geo.boundary_side_count(), 6 * N * N)
test.assertEqual(geo.edge_count(), 3 * N * (N + 1) ** 2)
side_measures, cell_measures = _launch_test_geometry_kernel(geo, device)
assert_np_equal(side_measures.numpy(), np.full(side_measures.shape, 1.0 / (N**2)), tol=1.0e-4)
assert_np_equal(cell_measures.numpy(), np.full(cell_measures.shape, 1.0 / (N**3)), tol=1.0e-4)
@integrand
def _rigid_deformation_field(s: Sample, domain: Domain, translation: wp.vec3, rotation: wp.vec3, scale: float):
q = wp.quat_from_axis_angle(wp.normalize(rotation), wp.length(rotation))
return translation + scale * wp.quat_rotate(q, domain(s)) - domain(s)
def test_deformed_geometry(test, device):
N = 3
with wp.ScopedDevice(device):
positions, tet_vidx = _gen_tetmesh(N)
geo = fem.Tetmesh(tet_vertex_indices=tet_vidx, positions=positions)
translation = [1.0, 2.0, 3.0]
rotation = [0.0, math.pi / 4.0, 0.0]
scale = 2.0
vector_space = fem.make_polynomial_space(geo, dtype=wp.vec3, degree=2)
pos_field = vector_space.make_field()
fem.interpolate(
_rigid_deformation_field,
dest=pos_field,
values={"translation": translation, "rotation": rotation, "scale": scale},
)
deformed_geo = pos_field.make_deformed_geometry()
# rigidly-deformed geometry
test.assertEqual(geo.cell_count(), 5 * (N) ** 3)
test.assertEqual(geo.vertex_count(), (N + 1) ** 3)
test.assertEqual(geo.side_count(), 6 * (N + 1) * N**2 + (N**3) * 4)
test.assertEqual(geo.boundary_side_count(), 12 * N * N)
side_measures, cell_measures = _launch_test_geometry_kernel(deformed_geo, wp.get_device())
test.assertAlmostEqual(
np.sum(cell_measures.numpy()), scale**3, places=4, msg=f"cell_measures = {cell_measures.numpy()}"
)
test.assertAlmostEqual(
np.sum(side_measures.numpy()), scale**2 * (0.5 * 6 * (N + 1) + N * 2 * math.sqrt(3.0)), places=4
)
@wp.kernel
def _test_deformed_geometry_normal(
geo_index_arg: geo.SideIndexArg, geo_arg: geo.SideArg, def_arg: deformed_geo.SideArg, rotation: wp.vec3
):
i = wp.tid()
side_index = deformed_geo.boundary_side_index(geo_index_arg, i)
s = make_free_sample(side_index, Coords(0.5, 0.5, 0.0))
geo_n = geo.side_normal(geo_arg, s)
def_n = deformed_geo.side_normal(def_arg, s)
q = wp.quat_from_axis_angle(wp.normalize(rotation), wp.length(rotation))
wp.expect_near(wp.quat_rotate(q, geo_n), def_n, 0.001)
wp.launch(
_test_deformed_geometry_normal,
dim=geo.boundary_side_count(),
inputs=[
geo.side_index_arg_value(wp.get_device()),
geo.side_arg_value(wp.get_device()),
deformed_geo.side_arg_value(wp.get_device()),
rotation,
],
)
wp.synchronize()
@wp.kernel
def _test_closest_point_on_tri_kernel(
e0: wp.vec2,
e1: wp.vec2,
points: wp.array(dtype=wp.vec2),
sq_dist: wp.array(dtype=float),
coords: wp.array(dtype=Coords),
):
i = wp.tid()
d2, c = project_on_tri_at_origin(points[i], e0, e1)
sq_dist[i] = d2
coords[i] = c
@wp.kernel
def _test_closest_point_on_tet_kernel(
e0: wp.vec3,
e1: wp.vec3,
e2: wp.vec3,
points: wp.array(dtype=wp.vec3),
sq_dist: wp.array(dtype=float),
coords: wp.array(dtype=Coords),
):
i = wp.tid()
d2, c = project_on_tet_at_origin(points[i], e0, e1, e2)
sq_dist[i] = d2
coords[i] = c
def test_closest_point_queries(test, device):
# Test some simple lookup queries
e0 = wp.vec2(2.0, 0.0)
e1 = wp.vec2(0.0, 2.0)
points = wp.array(
(
[-1.0, -1.0],
[0.5, 0.5],
[1.0, 1.0],
[2.0, 2.0],
),
dtype=wp.vec2,
device=device,
)
expected_sq_dist = np.array([2.0, 0.0, 0.0, 2.0])
expected_coords = np.array([[1.0, 0.0, 0.0], [0.5, 0.25, 0.25], [0.0, 0.5, 0.5], [0.0, 0.5, 0.5]])
sq_dist = wp.empty(shape=points.shape, dtype=float, device=device)
coords = wp.empty(shape=points.shape, dtype=Coords, device=device)
wp.launch(
_test_closest_point_on_tri_kernel, dim=points.shape, device=device, inputs=[e0, e1, points, sq_dist, coords]
)
assert_np_equal(coords.numpy(), expected_coords)
assert_np_equal(sq_dist.numpy(), expected_sq_dist)
# Tet
e0 = wp.vec3(3.0, 0.0, 0.0)
e1 = wp.vec3(0.0, 3.0, 0.0)
e2 = wp.vec3(0.0, 0.0, 3.0)
points = wp.array(
(
[-1.0, -1.0, -1.0],
[0.5, 0.5, 0.5],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
),
dtype=wp.vec3,
device=device,
)
expected_sq_dist = np.array([3.0, 0.0, 0.0, 3.0])
expected_coords = np.array(
[
[0.0, 0.0, 0.0],
[1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0],
[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0],
]
)
sq_dist = wp.empty(shape=points.shape, dtype=float, device=device)
coords = wp.empty(shape=points.shape, dtype=Coords, device=device)
wp.launch(
_test_closest_point_on_tet_kernel, dim=points.shape, device=device, inputs=[e0, e1, e2, points, sq_dist, coords]
)
assert_np_equal(coords.numpy(), expected_coords, tol=1.0e-4)
assert_np_equal(sq_dist.numpy(), expected_sq_dist, tol=1.0e-4)
def test_regular_quadrature(test, device):
from warp.fem.geometry.element import LinearEdge, Polynomial, Triangle
for family in Polynomial:
# test integrating monomials
for degree in range(8):
coords, weights = LinearEdge().instantiate_quadrature(degree, family=family)
res = sum(w * pow(c[0], degree) for w, c in zip(weights, coords))
ref = 1.0 / (degree + 1)
test.assertAlmostEqual(ref, res, places=4)
# test integrating y^k1 (1 - x)^k2 on triangle using transformation to square
for x_degree in range(4):
for y_degree in range(4):
coords, weights = Triangle().instantiate_quadrature(x_degree + y_degree, family=family)
res = 0.5 * sum(w * pow(1.0 - c[1], x_degree) * pow(c[2], y_degree) for w, c in zip(weights, coords))
ref = 1.0 / ((x_degree + y_degree + 2) * (y_degree + 1))
# print(x_degree, y_degree, family, len(coords), res, ref)
test.assertAlmostEqual(ref, res, places=4)
# test integrating y^k1 (1 - x)^k2 on triangle using direct formulas
for x_degree in range(5):
for y_degree in range(5):
coords, weights = Triangle().instantiate_quadrature(x_degree + y_degree, family=None)
res = 0.5 * sum(w * pow(1.0 - c[1], x_degree) * pow(c[2], y_degree) for w, c in zip(weights, coords))
ref = 1.0 / ((x_degree + y_degree + 2) * (y_degree + 1))
test.assertAlmostEqual(ref, res, places=4)
def test_dof_mapper(test, device):
matrix_types = [wp.mat22, wp.mat33]
# Symmetric mapper
for mapping in fem.SymmetricTensorMapper.Mapping:
for dtype in matrix_types:
mapper = fem.SymmetricTensorMapper(dtype, mapping=mapping)
dof_dtype = mapper.dof_dtype
for k in range(dof_dtype._length_):
elem = np.array(dof_dtype(0.0))
elem[k] = 1.0
dof_vec = dof_dtype(elem)
mat = mapper.dof_to_value(dof_vec)
dof_round_trip = mapper.value_to_dof(mat)
# Check that value_to_dof(dof_to_value) is idempotent
assert_np_equal(np.array(dof_round_trip), np.array(dof_vec))
# Check that value is unitary for Frobenius norm 0.5 * |tau:tau|
frob_norm2 = 0.5 * wp.ddot(mat, mat)
test.assertAlmostEqual(frob_norm2, 1.0, places=6)
# Skew-symmetric mapper
for dtype in matrix_types:
mapper = fem.SkewSymmetricTensorMapper(dtype)
dof_dtype = mapper.dof_dtype
if hasattr(dof_dtype, "_length_"):
for k in range(dof_dtype._length_):
elem = np.array(dof_dtype(0.0))
elem[k] = 1.0
dof_vec = dof_dtype(elem)
mat = mapper.dof_to_value(dof_vec)
dof_round_trip = mapper.value_to_dof(mat)
# Check that value_to_dof(dof_to_value) is idempotent
assert_np_equal(np.array(dof_round_trip), np.array(dof_vec))
# Check that value is unitary for Frobenius norm 0.5 * |tau:tau|
frob_norm2 = 0.5 * wp.ddot(mat, mat)
test.assertAlmostEqual(frob_norm2, 1.0, places=6)
else:
dof_val = 1.0
mat = mapper.dof_to_value(dof_val)
dof_round_trip = mapper.value_to_dof(mat)
test.assertAlmostEqual(dof_round_trip, dof_val)
# Check that value is unitary for Frobenius norm 0.5 * |tau:tau|
frob_norm2 = 0.5 * wp.ddot(mat, mat)
test.assertAlmostEqual(frob_norm2, 1.0, places=6)
def test_shape_function_weight(test, shape: shape.ShapeFunction, coord_sampler, CENTER_COORDS):
NODE_COUNT = shape.NODES_PER_ELEMENT
weight_fn = shape.make_element_inner_weight()
node_coords_fn = shape.make_node_coords_in_element()
# Weight at node should be 1
@dynamic_kernel(suffix=shape.name, kernel_options={"enable_backward": False})
def node_unity_test():
n = wp.tid()
node_w = weight_fn(node_coords_fn(n), n)
wp.expect_near(node_w, 1.0, places=5)
wp.launch(node_unity_test, dim=NODE_COUNT, inputs=[])
# Sum of node quadrature weights should be one (order 0)
# Sum of weighted quadrature coords should be element center (order 1)
node_quadrature_weight_fn = shape.make_node_quadrature_weight()
@dynamic_kernel(suffix=shape.name, kernel_options={"enable_backward": False})
def node_quadrature_unity_test():
sum_node_qp = float(0.0)
sum_node_qp_coords = Coords(0.0)
for n in range(NODE_COUNT):
w = node_quadrature_weight_fn(n)
sum_node_qp += w
sum_node_qp_coords += w * node_coords_fn(n)
wp.expect_near(sum_node_qp, 1.0, 0.0001)
wp.expect_near(sum_node_qp_coords, CENTER_COORDS, 0.0001)
wp.launch(node_quadrature_unity_test, dim=1, inputs=[])
@dynamic_kernel(suffix=shape.name, kernel_options={"enable_backward": False})
def partition_of_unity_test():
rng_state = wp.rand_init(4321, wp.tid())
coords = coord_sampler(rng_state)
# sum of node weights anywhere should be 1.0
w_sum = float(0.0)
for n in range(NODE_COUNT):
w_sum += weight_fn(coords, n)
wp.expect_near(w_sum, 1.0, 0.0001)
n_samples = 100
wp.launch(partition_of_unity_test, dim=n_samples, inputs=[])
def test_shape_function_trace(test, shape: shape.ShapeFunction, CENTER_COORDS):
NODE_COUNT = shape.NODES_PER_ELEMENT
node_coords_fn = shape.make_node_coords_in_element()
# Sum of node quadrature weights should be one (order 0)
# Sum of weighted quadrature coords should be element center (order 1)
trace_node_quadrature_weight_fn = shape.make_trace_node_quadrature_weight()
@dynamic_kernel(suffix=shape.name, kernel_options={"enable_backward": False})
def trace_node_quadrature_unity_test():
sum_node_qp = float(0.0)
sum_node_qp_coords = Coords(0.0)
for n in range(NODE_COUNT):
coords = node_coords_fn(n)
if wp.abs(coords[0]) < 1.0e-6:
w = trace_node_quadrature_weight_fn(n)
sum_node_qp += w
sum_node_qp_coords += w * node_coords_fn(n)
wp.expect_near(sum_node_qp, 1.0, 0.0001)
wp.expect_near(sum_node_qp_coords, CENTER_COORDS, 0.0001)
wp.launch(trace_node_quadrature_unity_test, dim=1, inputs=[])
def test_shape_function_gradient(test, shape: shape.ShapeFunction, coord_sampler, coord_delta_sampler):
weight_fn = shape.make_element_inner_weight()
weight_gradient_fn = shape.make_element_inner_weight_gradient()
@dynamic_kernel(suffix=shape.name, kernel_options={"enable_backward": False})
def finite_difference_test():
i, n = wp.tid()
rng_state = wp.rand_init(1234, i)
coords = coord_sampler(rng_state)
epsilon = 0.003
param_delta, coords_delta = coord_delta_sampler(epsilon, rng_state)
w_p = weight_fn(coords + coords_delta, n)
w_m = weight_fn(coords - coords_delta, n)
gp = weight_gradient_fn(coords + coords_delta, n)
gm = weight_gradient_fn(coords - coords_delta, n)
# 2nd-order finite-difference test
# See Schroeder 2019, Practical course on computing derivatives in code
delta_ref = w_p - w_m
delta_est = wp.dot(gp + gm, param_delta)
# wp.printf("%d %f %f \n", n, delta_ref, delta_est)
wp.expect_near(delta_ref, delta_est, 0.0001)
n_samples = 100
wp.launch(finite_difference_test, dim=(n_samples, shape.NODES_PER_ELEMENT), inputs=[])
def test_square_shape_functions(test, device):
SQUARE_CENTER_COORDS = wp.constant(Coords(0.5, 0.5, 0.0))
SQUARE_SIDE_CENTER_COORDS = wp.constant(Coords(0.0, 0.5, 0.0))
@wp.func
def square_coord_sampler(state: wp.uint32):
return Coords(wp.randf(state), wp.randf(state), 0.0)
@wp.func
def square_coord_delta_sampler(epsilon: float, state: wp.uint32):
param_delta = wp.normalize(wp.vec2(wp.randf(state), wp.randf(state))) * epsilon
return param_delta, Coords(param_delta[0], param_delta[1], 0.0)
Q_1 = shape.SquareBipolynomialShapeFunctions(degree=1, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
Q_2 = shape.SquareBipolynomialShapeFunctions(degree=2, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
Q_3 = shape.SquareBipolynomialShapeFunctions(degree=3, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
test_shape_function_weight(test, Q_1, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_weight(test, Q_2, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_weight(test, Q_3, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_trace(test, Q_1, SQUARE_SIDE_CENTER_COORDS)
test_shape_function_trace(test, Q_2, SQUARE_SIDE_CENTER_COORDS)
test_shape_function_trace(test, Q_3, SQUARE_SIDE_CENTER_COORDS)
test_shape_function_gradient(test, Q_1, square_coord_sampler, square_coord_delta_sampler)
test_shape_function_gradient(test, Q_2, square_coord_sampler, square_coord_delta_sampler)
test_shape_function_gradient(test, Q_3, square_coord_sampler, square_coord_delta_sampler)
Q_1 = shape.SquareBipolynomialShapeFunctions(degree=1, family=fem.Polynomial.GAUSS_LEGENDRE)
Q_2 = shape.SquareBipolynomialShapeFunctions(degree=2, family=fem.Polynomial.GAUSS_LEGENDRE)
Q_3 = shape.SquareBipolynomialShapeFunctions(degree=3, family=fem.Polynomial.GAUSS_LEGENDRE)
test_shape_function_weight(test, Q_1, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_weight(test, Q_2, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_weight(test, Q_3, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_gradient(test, Q_1, square_coord_sampler, square_coord_delta_sampler)
test_shape_function_gradient(test, Q_2, square_coord_sampler, square_coord_delta_sampler)
test_shape_function_gradient(test, Q_3, square_coord_sampler, square_coord_delta_sampler)
S_2 = shape.SquareSerendipityShapeFunctions(degree=2, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
S_3 = shape.SquareSerendipityShapeFunctions(degree=3, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
test_shape_function_weight(test, S_2, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_weight(test, S_3, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_trace(test, S_2, SQUARE_SIDE_CENTER_COORDS)
test_shape_function_trace(test, S_3, SQUARE_SIDE_CENTER_COORDS)
test_shape_function_gradient(test, S_2, square_coord_sampler, square_coord_delta_sampler)
test_shape_function_gradient(test, S_3, square_coord_sampler, square_coord_delta_sampler)
P_c1 = shape.SquareNonConformingPolynomialShapeFunctions(degree=1)
P_c2 = shape.SquareNonConformingPolynomialShapeFunctions(degree=2)
P_c3 = shape.SquareNonConformingPolynomialShapeFunctions(degree=3)
test_shape_function_weight(test, P_c1, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_weight(test, P_c2, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_weight(test, P_c3, square_coord_sampler, SQUARE_CENTER_COORDS)
test_shape_function_gradient(test, P_c1, square_coord_sampler, square_coord_delta_sampler)
test_shape_function_gradient(test, P_c2, square_coord_sampler, square_coord_delta_sampler)
test_shape_function_gradient(test, P_c3, square_coord_sampler, square_coord_delta_sampler)
wp.synchronize()
def test_cube_shape_functions(test, device):
CUBE_CENTER_COORDS = wp.constant(Coords(0.5, 0.5, 0.5))
CUBE_SIDE_CENTER_COORDS = wp.constant(Coords(0.0, 0.5, 0.5))
@wp.func
def cube_coord_sampler(state: wp.uint32):
return Coords(wp.randf(state), wp.randf(state), wp.randf(state))
@wp.func
def cube_coord_delta_sampler(epsilon: float, state: wp.uint32):
param_delta = wp.normalize(wp.vec3(wp.randf(state), wp.randf(state), wp.randf(state))) * epsilon
return param_delta, param_delta
Q_1 = shape.CubeTripolynomialShapeFunctions(degree=1, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
Q_2 = shape.CubeTripolynomialShapeFunctions(degree=2, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
Q_3 = shape.CubeTripolynomialShapeFunctions(degree=3, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
test_shape_function_weight(test, Q_1, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_weight(test, Q_2, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_weight(test, Q_3, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_trace(test, Q_1, CUBE_SIDE_CENTER_COORDS)
test_shape_function_trace(test, Q_2, CUBE_SIDE_CENTER_COORDS)
test_shape_function_trace(test, Q_3, CUBE_SIDE_CENTER_COORDS)
test_shape_function_gradient(test, Q_1, cube_coord_sampler, cube_coord_delta_sampler)
test_shape_function_gradient(test, Q_2, cube_coord_sampler, cube_coord_delta_sampler)
test_shape_function_gradient(test, Q_3, cube_coord_sampler, cube_coord_delta_sampler)
Q_1 = shape.CubeTripolynomialShapeFunctions(degree=1, family=fem.Polynomial.GAUSS_LEGENDRE)
Q_2 = shape.CubeTripolynomialShapeFunctions(degree=2, family=fem.Polynomial.GAUSS_LEGENDRE)
Q_3 = shape.CubeTripolynomialShapeFunctions(degree=3, family=fem.Polynomial.GAUSS_LEGENDRE)
test_shape_function_weight(test, Q_1, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_weight(test, Q_2, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_weight(test, Q_3, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_gradient(test, Q_1, cube_coord_sampler, cube_coord_delta_sampler)
test_shape_function_gradient(test, Q_2, cube_coord_sampler, cube_coord_delta_sampler)
test_shape_function_gradient(test, Q_3, cube_coord_sampler, cube_coord_delta_sampler)
S_2 = shape.CubeSerendipityShapeFunctions(degree=2, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
S_3 = shape.CubeSerendipityShapeFunctions(degree=3, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
test_shape_function_weight(test, S_2, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_weight(test, S_3, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_trace(test, S_2, CUBE_SIDE_CENTER_COORDS)
test_shape_function_trace(test, S_3, CUBE_SIDE_CENTER_COORDS)
test_shape_function_gradient(test, S_2, cube_coord_sampler, cube_coord_delta_sampler)
test_shape_function_gradient(test, S_3, cube_coord_sampler, cube_coord_delta_sampler)
P_c1 = shape.CubeNonConformingPolynomialShapeFunctions(degree=1)
P_c2 = shape.CubeNonConformingPolynomialShapeFunctions(degree=2)
P_c3 = shape.CubeNonConformingPolynomialShapeFunctions(degree=3)
test_shape_function_weight(test, P_c1, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_weight(test, P_c2, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_weight(test, P_c3, cube_coord_sampler, CUBE_CENTER_COORDS)
test_shape_function_gradient(test, P_c1, cube_coord_sampler, cube_coord_delta_sampler)
test_shape_function_gradient(test, P_c2, cube_coord_sampler, cube_coord_delta_sampler)
test_shape_function_gradient(test, P_c3, cube_coord_sampler, cube_coord_delta_sampler)
wp.synchronize()
def test_tri_shape_functions(test, device):
TRI_CENTER_COORDS = wp.constant(Coords(1 / 3.0, 1 / 3.0, 1 / 3.0))
TRI_SIDE_CENTER_COORDS = wp.constant(Coords(0.0, 0.5, 0.5))
@wp.func
def tri_coord_sampler(state: wp.uint32):
a = wp.randf(state)
b = wp.randf(state)
return Coords(1.0 - a - b, a, b)
@wp.func
def tri_coord_delta_sampler(epsilon: float, state: wp.uint32):
param_delta = wp.normalize(wp.vec2(wp.randf(state), wp.randf(state))) * epsilon
a = param_delta[0]
b = param_delta[1]
return param_delta, Coords(-a - b, a, b)
P_1 = shape.Triangle2DPolynomialShapeFunctions(degree=1)
P_2 = shape.Triangle2DPolynomialShapeFunctions(degree=2)
P_3 = shape.Triangle2DPolynomialShapeFunctions(degree=3)
test_shape_function_weight(test, P_1, tri_coord_sampler, TRI_CENTER_COORDS)
test_shape_function_weight(test, P_2, tri_coord_sampler, TRI_CENTER_COORDS)
test_shape_function_weight(test, P_3, tri_coord_sampler, TRI_CENTER_COORDS)
test_shape_function_trace(test, P_1, TRI_SIDE_CENTER_COORDS)
test_shape_function_trace(test, P_2, TRI_SIDE_CENTER_COORDS)
test_shape_function_trace(test, P_3, TRI_SIDE_CENTER_COORDS)
test_shape_function_gradient(test, P_1, tri_coord_sampler, tri_coord_delta_sampler)
test_shape_function_gradient(test, P_2, tri_coord_sampler, tri_coord_delta_sampler)
test_shape_function_gradient(test, P_3, tri_coord_sampler, tri_coord_delta_sampler)
P_1d = shape.Triangle2DNonConformingPolynomialShapeFunctions(degree=1)
P_2d = shape.Triangle2DNonConformingPolynomialShapeFunctions(degree=2)
P_3d = shape.Triangle2DNonConformingPolynomialShapeFunctions(degree=3)
test_shape_function_weight(test, P_1d, tri_coord_sampler, TRI_CENTER_COORDS)
test_shape_function_weight(test, P_2d, tri_coord_sampler, TRI_CENTER_COORDS)
test_shape_function_weight(test, P_3d, tri_coord_sampler, TRI_CENTER_COORDS)
test_shape_function_gradient(test, P_1d, tri_coord_sampler, tri_coord_delta_sampler)
test_shape_function_gradient(test, P_2d, tri_coord_sampler, tri_coord_delta_sampler)
test_shape_function_gradient(test, P_3d, tri_coord_sampler, tri_coord_delta_sampler)
wp.synchronize()
def test_tet_shape_functions(test, device):
TET_CENTER_COORDS = wp.constant(Coords(1 / 4.0, 1 / 4.0, 1 / 4.0))
TET_SIDE_CENTER_COORDS = wp.constant(Coords(0.0, 1.0 / 3.0, 1.0 / 3.0))
@wp.func
def tet_coord_sampler(state: wp.uint32):
return Coords(wp.randf(state), wp.randf(state), wp.randf(state))
@wp.func
def tet_coord_delta_sampler(epsilon: float, state: wp.uint32):
param_delta = wp.normalize(wp.vec3(wp.randf(state), wp.randf(state), wp.randf(state))) * epsilon
return param_delta, param_delta
P_1 = shape.TetrahedronPolynomialShapeFunctions(degree=1)
P_2 = shape.TetrahedronPolynomialShapeFunctions(degree=2)
P_3 = shape.TetrahedronPolynomialShapeFunctions(degree=3)
test_shape_function_weight(test, P_1, tet_coord_sampler, TET_CENTER_COORDS)
test_shape_function_weight(test, P_2, tet_coord_sampler, TET_CENTER_COORDS)
test_shape_function_weight(test, P_3, tet_coord_sampler, TET_CENTER_COORDS)
test_shape_function_trace(test, P_1, TET_SIDE_CENTER_COORDS)
test_shape_function_trace(test, P_2, TET_SIDE_CENTER_COORDS)
test_shape_function_trace(test, P_3, TET_SIDE_CENTER_COORDS)
test_shape_function_gradient(test, P_1, tet_coord_sampler, tet_coord_delta_sampler)
test_shape_function_gradient(test, P_2, tet_coord_sampler, tet_coord_delta_sampler)
test_shape_function_gradient(test, P_3, tet_coord_sampler, tet_coord_delta_sampler)
P_1d = shape.TetrahedronNonConformingPolynomialShapeFunctions(degree=1)
P_2d = shape.TetrahedronNonConformingPolynomialShapeFunctions(degree=2)
P_3d = shape.TetrahedronNonConformingPolynomialShapeFunctions(degree=3)
test_shape_function_weight(test, P_1d, tet_coord_sampler, TET_CENTER_COORDS)
test_shape_function_weight(test, P_2d, tet_coord_sampler, TET_CENTER_COORDS)
test_shape_function_weight(test, P_3d, tet_coord_sampler, TET_CENTER_COORDS)
test_shape_function_gradient(test, P_1d, tet_coord_sampler, tet_coord_delta_sampler)
test_shape_function_gradient(test, P_2d, tet_coord_sampler, tet_coord_delta_sampler)
test_shape_function_gradient(test, P_3d, tet_coord_sampler, tet_coord_delta_sampler)
wp.synchronize()
def test_point_basis(test, device):
geo = fem.Grid2D(res=wp.vec2i(2))
domain = fem.Cells(geo)
quadrature = fem.RegularQuadrature(domain, order=2, family=fem.Polynomial.GAUSS_LEGENDRE)
point_basis = fem.PointBasisSpace(quadrature)
point_space = fem.make_collocated_function_space(point_basis)
point_test = fem.make_test(point_space, domain=domain)
# Sample at particle positions
ones = fem.integrate(linear_form, fields={"u": point_test}, nodal=True)
test.assertAlmostEqual(np.sum(ones.numpy()), 1.0, places=5)
# Sampling outside of particle positions
other_quadrature = fem.RegularQuadrature(domain, order=2, family=fem.Polynomial.LOBATTO_GAUSS_LEGENDRE)
zeros = fem.integrate(linear_form, quadrature=other_quadrature, fields={"u": point_test})
test.assertAlmostEqual(np.sum(zeros.numpy()), 0.0, places=5)
@fem.integrand
def _bicubic(s: Sample, domain: Domain):
x = domain(s)
return wp.pow(x[0], 3.0) * wp.pow(x[1], 3.0)
@fem.integrand
def _piecewise_constant(s: Sample):
return float(s.element_index)
def test_particle_quadratures(test, device):
geo = fem.Grid2D(res=wp.vec2i(2))
domain = fem.Cells(geo)
points, weights = domain.reference_element().instantiate_quadrature(order=4, family=fem.Polynomial.GAUSS_LEGENDRE)
points_per_cell = len(points)
points = points * domain.element_count()
weights = weights * domain.element_count()
points = wp.array(points, shape=(domain.element_count(), points_per_cell), dtype=Coords, device=device)
weights = wp.array(weights, shape=(domain.element_count(), points_per_cell), dtype=float, device=device)
explicit_quadrature = fem.ExplicitQuadrature(domain, points, weights)
test.assertEqual(explicit_quadrature.points_per_element(), points_per_cell)
test.assertEqual(explicit_quadrature.total_point_count(), points_per_cell * geo.cell_count())
val = fem.integrate(_bicubic, quadrature=explicit_quadrature)
test.assertAlmostEqual(val, 1.0 / 16, places=5)
element_indices = wp.array([3, 3, 2], dtype=int, device=device)
element_coords = wp.array(
[
[0.25, 0.5, 0.0],
[0.5, 0.25, 0.0],
[0.5, 0.5, 0.0],
],
dtype=Coords,
device=device,
)
pic_quadrature = fem.PicQuadrature(domain, positions=(element_indices, element_coords))
test.assertIsNone(pic_quadrature.points_per_element())
test.assertEqual(pic_quadrature.total_point_count(), 3)
test.assertEqual(pic_quadrature.active_cell_count(), 2)
val = fem.integrate(_piecewise_constant, quadrature=pic_quadrature)
test.assertAlmostEqual(val, 1.25, places=5)
devices = get_test_devices()
cuda_devices = get_selected_cuda_test_devices()
class TestFem(unittest.TestCase):
pass
add_function_test(TestFem, "test_regular_quadrature", test_regular_quadrature)
add_function_test(TestFem, "test_closest_point_queries", test_closest_point_queries)
add_function_test(TestFem, "test_grad_decomposition", test_grad_decomposition, devices=devices)
add_function_test(TestFem, "test_integrate_gradient", test_integrate_gradient, devices=devices)
add_function_test(TestFem, "test_interpolate_gradient", test_interpolate_gradient, devices=devices)
add_function_test(TestFem, "test_vector_divergence_theorem", test_vector_divergence_theorem, devices=devices)
add_function_test(TestFem, "test_tensor_divergence_theorem", test_tensor_divergence_theorem, devices=devices)
add_function_test(TestFem, "test_grid_2d", test_grid_2d, devices=devices)
add_function_test(TestFem, "test_triangle_mesh", test_triangle_mesh, devices=devices)
add_function_test(TestFem, "test_quad_mesh", test_quad_mesh, devices=devices)
add_function_test(TestFem, "test_grid_3d", test_grid_3d, devices=devices)
add_function_test(TestFem, "test_tet_mesh", test_tet_mesh, devices=devices)
add_function_test(TestFem, "test_hex_mesh", test_hex_mesh, devices=devices)
add_function_test(TestFem, "test_nanogrid", test_nanogrid, devices=cuda_devices)
add_function_test(TestFem, "test_deformed_geometry", test_deformed_geometry, devices=devices)
add_function_test(TestFem, "test_dof_mapper", test_dof_mapper)
add_function_test(TestFem, "test_point_basis", test_point_basis)
add_function_test(TestFem, "test_particle_quadratures", test_particle_quadratures)
class TestFemShapeFunctions(unittest.TestCase):
pass
add_function_test(TestFemShapeFunctions, "test_square_shape_functions", test_square_shape_functions)
add_function_test(TestFemShapeFunctions, "test_cube_shape_functions", test_cube_shape_functions)
add_function_test(TestFemShapeFunctions, "test_tri_shape_functions", test_tri_shape_functions)
add_function_test(TestFemShapeFunctions, "test_tet_shape_functions", test_tet_shape_functions)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 52,028 | Python | 39.053118 | 120 | 0.648439 |
NVIDIA/warp/warp/tests/test_sparse.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.sparse import (
bsr_axpy,
bsr_axpy_work_arrays,
bsr_copy,
bsr_diag,
bsr_get_diag,
bsr_identity,
bsr_mm,
bsr_mm_work_arrays,
bsr_mv,
bsr_scale,
bsr_set_from_triplets,
bsr_set_transpose,
bsr_transposed,
bsr_zeros,
)
from warp.tests.unittest_utils import *
def _get_block(mat, row, col, block_shape):
return mat[row * block_shape[0] : (row + 1) * block_shape[0], col * block_shape[1] : (col + 1) * block_shape[1]]
def _triplets_to_dense(shape, rows, cols, values):
mat = np.zeros(shape)
rows = rows.numpy()
cols = cols.numpy()
values = values.numpy()
block_shape = values.shape[1:] if values.ndim == 3 else (1, 1)
for row, col, val in zip(rows, cols, values):
mat_block = _get_block(mat, row, col, block_shape)
mat_block += val
return mat
def _bsr_to_dense(bsr):
mat = np.zeros(bsr.shape)
offsets = bsr.offsets.numpy()
columns = bsr.columns.numpy()
values = bsr.values.numpy()
for row in range(bsr.nrow):
beg = offsets[row]
end = offsets[row + 1]
for block in range(beg, end):
mat_block = _get_block(mat, row, columns[block], bsr.block_shape)
mat_block += values[block]
return mat
def test_csr_from_triplets(test, device):
rng = np.random.default_rng(123)
shape = (8, 6)
n = 100
rows = wp.array(rng.integers(0, high=shape[0], size=n, dtype=int), dtype=int, device=device)
cols = wp.array(rng.integers(0, high=shape[1], size=n, dtype=int), dtype=int, device=device)
vals = wp.array(rng.random(size=n), dtype=float, device=device)
ref = _triplets_to_dense(shape, rows, cols, vals)
csr = bsr_zeros(shape[0], shape[1], float, device=device)
bsr_set_from_triplets(csr, rows, cols, vals)
test.assertEqual(csr.block_size, 1)
res = _bsr_to_dense(csr)
assert_np_equal(res, ref, 0.0001)
def test_bsr_from_triplets(test, device):
rng = np.random.default_rng(123)
block_shape = (3, 2)
nrow = 4
ncol = 9
shape = (block_shape[0] * nrow, block_shape[1] * ncol)
n = 50
rows = wp.array(rng.integers(0, high=nrow, size=n, dtype=int), dtype=int, device=device)
cols = wp.array(rng.integers(0, high=ncol, size=n, dtype=int), dtype=int, device=device)
vals = wp.array(rng.random(size=(n, block_shape[0], block_shape[1])), dtype=float, device=device)
ref = _triplets_to_dense(shape, rows, cols, vals)
bsr = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=float), device=device)
bsr_set_from_triplets(bsr, rows, cols, vals)
test.assertEqual(bsr.block_size, block_shape[0] * block_shape[1])
res = _bsr_to_dense(bsr)
assert_np_equal(res, ref, 0.0001)
# test zero-length inputs
bsr_set_from_triplets(
bsr,
wp.array([], dtype=int, device=device),
wp.array([], dtype=int, device=device),
wp.array([], shape=(0, block_shape[0], block_shape[1]), dtype=float, device=device),
)
test.assertEqual(bsr.nnz, 0)
def test_bsr_get_set_diag(test, device):
rng = np.random.default_rng(123)
block_shape = (3, 3)
nrow = 4
ncol = 4
nnz = 6
rows = wp.array([0, 1, 2, 3, 2, 1], dtype=int, device=device)
cols = wp.array([1, 1, 1, 3, 2, 2], dtype=int, device=device)
vals_np = rng.random(size=(nnz, block_shape[0], block_shape[1]))
vals = wp.array(vals_np, dtype=float, device=device)
bsr = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=float), device=device)
bsr_set_from_triplets(bsr, rows, cols, vals)
diag = bsr_get_diag(bsr)
diag_np = diag.numpy()
assert_np_equal(diag_np[0], np.zeros(block_shape))
assert_np_equal(diag_np[1], vals_np[1], tol=0.00001)
assert_np_equal(diag_np[2], vals_np[4], tol=0.00001)
assert_np_equal(diag_np[3], vals_np[3], tol=0.00001)
# Test set_diag/get_diag round-trips with various block types
# Array of blocks
diag_bsr = bsr_diag(diag)
bsr_get_diag(diag_bsr, out=diag)
assert_np_equal(diag_np, diag.numpy())
diag_scalar_np = rng.random(size=nrow)
diag_scalar = wp.array(diag_scalar_np, device=device)
diag_bsr = bsr_diag(diag_scalar)
diag = bsr_get_diag(diag_bsr)
assert_np_equal(diag_scalar_np, diag.numpy(), tol=0.000001)
# Uniform block diagonal
with test.assertRaisesRegex(ValueError, "BsrMatrix block type must be either warp matrix or scalar"):
# 1d block type -- invalid
diag_bsr = bsr_diag(diag=vals_np[0, 0], rows_of_blocks=nrow, cols_of_blocks=nrow + 1)
diag_bsr = bsr_diag(diag=vals_np[0], rows_of_blocks=nrow, cols_of_blocks=nrow + 1)
assert diag_bsr.values.shape[0] == nrow
assert_np_equal(diag_bsr.values.numpy(), np.broadcast_to(vals_np[0], shape=(nrow, *block_shape)), tol=0.000001)
diag_bsr = bsr_diag(diag=float(diag_scalar_np[0]), rows_of_blocks=nrow, cols_of_blocks=nrow + 1)
assert diag_bsr.values.shape[0] == nrow
assert_np_equal(diag_bsr.values.numpy(), np.full(nrow, diag_scalar_np[0]), tol=0.000001)
# Identity matrix
diag_bsr = bsr_identity(nrow, block_type=wp.mat44, device=device)
assert diag_bsr.values.shape[0] == nrow
assert_np_equal(diag_bsr.values.numpy(), np.broadcast_to(np.eye(4), shape=(nrow, 4, 4)), tol=0.000001)
diag_csr = bsr_identity(nrow, block_type=wp.float64, device=device)
assert np.all(diag_csr.values.numpy() == np.ones(nrow, dtype=float))
def make_test_bsr_transpose(block_shape, scalar_type):
def test_bsr_transpose(test, device):
rng = np.random.default_rng(123)
nrow = 4
ncol = 5
nnz = 6
rows = wp.array([0, 1, 2, 3, 2, 1], dtype=int, device=device)
cols = wp.array([1, 4, 1, 3, 0, 2], dtype=int, device=device)
vals_np = rng.random(size=(nnz, block_shape[0], block_shape[1]))
vals = wp.array(vals_np, dtype=scalar_type, device=device).reshape((nnz, block_shape[0], block_shape[1]))
bsr = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=scalar_type), device=device)
bsr_set_from_triplets(bsr, rows, cols, vals)
ref = np.transpose(_bsr_to_dense(bsr))
bsr_transposed = bsr_zeros(
ncol, nrow, wp.types.matrix(shape=block_shape[::-1], dtype=scalar_type), device=device
)
bsr_set_transpose(dest=bsr_transposed, src=bsr)
res = _bsr_to_dense(bsr_transposed)
assert_np_equal(res, ref, 0.0001)
if block_shape[0] != block_shape[-1]:
# test incompatible block shape
with test.assertRaisesRegex(ValueError, "Destination block shape must be"):
bsr_set_transpose(dest=bsr, src=bsr)
return test_bsr_transpose
def make_test_bsr_axpy(block_shape, scalar_type):
def test_bsr_axpy(test, device):
rng = np.random.default_rng(123)
nrow = 2
ncol = 3
nnz = 6
alphas = [-1.0, 0.0, 1.0]
betas = [2.0, -1.0, 0.0]
x_rows = wp.array(rng.integers(0, high=nrow, size=nnz, dtype=int), dtype=int, device=device)
x_cols = wp.array(rng.integers(0, high=ncol, size=nnz, dtype=int), dtype=int, device=device)
x_vals = wp.array(rng.random(size=(nnz, block_shape[0], block_shape[1])), dtype=scalar_type, device=device)
x_vals = x_vals.reshape((nnz, block_shape[0], block_shape[1]))
x = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=scalar_type), device=device)
bsr_set_from_triplets(x, x_rows, x_cols, x_vals)
y_rows = wp.array(rng.integers(0, high=nrow, size=nnz, dtype=int), dtype=int, device=device)
y_cols = wp.array(rng.integers(0, high=ncol, size=nnz, dtype=int), dtype=int, device=device)
y_vals = wp.array(rng.random(size=(nnz, block_shape[0], block_shape[1])), dtype=scalar_type, device=device)
y_vals = y_vals.reshape((nnz, block_shape[0], block_shape[1]))
y = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=scalar_type), device=device)
bsr_set_from_triplets(y, y_rows, y_cols, y_vals)
work_arrays = bsr_axpy_work_arrays()
for alpha, beta in zip(alphas, betas):
ref = alpha * _bsr_to_dense(x) + beta * _bsr_to_dense(y)
if beta == 0.0:
y = bsr_axpy(x, alpha=alpha, beta=beta, work_arrays=work_arrays)
else:
bsr_axpy(x, y, alpha, beta, work_arrays=work_arrays)
res = _bsr_to_dense(y)
assert_np_equal(res, ref, 0.0001)
# test aliasing
ref = 3.0 * _bsr_to_dense(y)
bsr_axpy(y, y, alpha=1.0, beta=2.0)
res = _bsr_to_dense(y)
assert_np_equal(res, ref, 0.0001)
# test incompatible shapes
y.ncol = y.ncol + 1
with test.assertRaisesRegex(ValueError, "Matrices must have the same number of rows and columns"):
bsr_axpy(x, y)
return test_bsr_axpy
def make_test_bsr_mm(block_shape, scalar_type):
def test_bsr_mm(test, device):
rng = np.random.default_rng(123)
x_nrow = 3
x_ncol = 2
x_block_shape = block_shape
y_nrow = 2
y_ncol = 3
y_block_shape = block_shape[::-1]
z_nrow = x_nrow
z_ncol = y_ncol
z_block_shape = (x_block_shape[0], y_block_shape[1])
nnz = 6
alphas = [-1.0, 0.0, 1.0]
betas = [2.0, -1.0, 0.0]
x_rows = wp.array(rng.integers(0, high=x_nrow, size=nnz, dtype=int), dtype=int, device=device)
x_cols = wp.array(rng.integers(0, high=x_ncol, size=nnz, dtype=int), dtype=int, device=device)
x_vals = wp.array(rng.random(size=(nnz, x_block_shape[0], x_block_shape[1])), dtype=scalar_type, device=device)
x_vals = x_vals.reshape((nnz, x_block_shape[0], x_block_shape[1]))
x = bsr_zeros(x_nrow, x_ncol, wp.types.matrix(shape=x_block_shape, dtype=scalar_type), device=device)
bsr_set_from_triplets(x, x_rows, x_cols, x_vals)
y_rows = wp.array(rng.integers(0, high=y_nrow, size=nnz, dtype=int), dtype=int, device=device)
y_cols = wp.array(rng.integers(0, high=y_ncol, size=nnz, dtype=int), dtype=int, device=device)
y_vals = wp.array(rng.random(size=(nnz, y_block_shape[0], y_block_shape[1])), dtype=scalar_type, device=device)
y_vals = y_vals.reshape((nnz, y_block_shape[0], y_block_shape[1]))
y = bsr_zeros(y_nrow, y_ncol, wp.types.matrix(shape=y_block_shape, dtype=scalar_type), device=device)
bsr_set_from_triplets(y, y_rows, y_cols, y_vals)
z_rows = wp.array(rng.integers(0, high=z_nrow, size=nnz, dtype=int), dtype=int, device=device)
z_cols = wp.array(rng.integers(0, high=z_ncol, size=nnz, dtype=int), dtype=int, device=device)
z_vals = wp.array(rng.random(size=(nnz, z_block_shape[0], z_block_shape[1])), dtype=scalar_type, device=device)
z_vals = z_vals.reshape((nnz, z_block_shape[0], z_block_shape[1]))
z = bsr_zeros(z_nrow, z_ncol, wp.types.matrix(shape=z_block_shape, dtype=scalar_type), device=device)
bsr_set_from_triplets(z, z_rows, z_cols, z_vals)
work_arrays = bsr_mm_work_arrays()
for alpha, beta in zip(alphas, betas):
ref = alpha * (_bsr_to_dense(x) @ _bsr_to_dense(y)) + beta * _bsr_to_dense(z)
bsr_mm(x, y, z, alpha, beta, work_arrays=work_arrays)
res = _bsr_to_dense(z)
assert_np_equal(res, ref, 0.0001)
# test aliasing of matrix arguments
# x = alpha * z * x + beta * x
alpha, beta = alphas[0], betas[0]
ref = alpha * (_bsr_to_dense(z) @ _bsr_to_dense(x)) + beta * _bsr_to_dense(x)
bsr_mm(z, x, x, alpha, beta)
res = _bsr_to_dense(x)
assert_np_equal(res, ref, 0.0001)
# z = alpha * z * z + beta * z
ref = alpha * (_bsr_to_dense(z) @ _bsr_to_dense(z)) + beta * _bsr_to_dense(z)
bsr_mm(z, z, z, alpha, beta)
res = _bsr_to_dense(z)
assert_np_equal(res, ref, 0.0001)
# test incompatible shapes
if block_shape[0] != block_shape[-1]:
with test.assertRaisesRegex(ValueError, "Incompatible block sizes"):
bsr_mm(z, y)
y.ncol = y.ncol * 2
with test.assertRaisesRegex(ValueError, "Incompatible number of rows/columns"):
bsr_mm(y, z)
return test_bsr_mm
def make_test_bsr_mv(block_shape, scalar_type):
def test_bsr_mv(test, device):
rng = np.random.default_rng(123)
nrow = 2
ncol = 3
nnz = 6
alphas = [-1.0, 0.0, 1.0]
betas = [2.0, -1.0, 0.0]
A_rows = wp.array(rng.integers(0, high=nrow, size=nnz, dtype=int), dtype=int, device=device)
A_cols = wp.array(rng.integers(0, high=ncol, size=nnz, dtype=int), dtype=int, device=device)
A_vals = wp.array(rng.random(size=(nnz, block_shape[0], block_shape[1])), dtype=scalar_type, device=device)
A_vals = A_vals.reshape((nnz, block_shape[0], block_shape[1]))
A = bsr_zeros(nrow, ncol, wp.types.matrix(shape=block_shape, dtype=scalar_type), device=device)
bsr_set_from_triplets(A, A_rows, A_cols, A_vals)
if block_shape[1] == 1:
x = wp.array(rng.random(size=ncol), dtype=scalar_type, device=device)
else:
x = wp.array(
rng.random(size=(ncol, block_shape[1])),
dtype=wp.vec(length=block_shape[1], dtype=scalar_type),
device=device,
)
if block_shape[0] == 1:
y = wp.array(rng.random(size=nrow), dtype=scalar_type, device=device)
else:
y = wp.array(
rng.random(size=(nrow, block_shape[0])),
dtype=wp.vec(length=block_shape[0], dtype=scalar_type),
device=device,
)
work_buffer = wp.empty_like(y)
for alpha, beta in zip(alphas, betas):
ref = alpha * _bsr_to_dense(A) @ x.numpy().flatten() + beta * y.numpy().flatten()
if beta == 0.0:
y = bsr_mv(A, x, alpha=alpha, beta=beta, work_buffer=work_buffer)
else:
bsr_mv(A, x, y, alpha, beta, work_buffer=work_buffer)
res = y.numpy().flatten()
assert_np_equal(res, ref, 0.0001)
# test aliasing
alpha, beta = alphas[0], betas[0]
AAt = bsr_mm(A, bsr_transposed(A))
ref = alpha * _bsr_to_dense(AAt) @ y.numpy().flatten() + beta * y.numpy().flatten()
bsr_mv(AAt, y, y, alpha, beta)
res = y.numpy().flatten()
assert_np_equal(res, ref, 0.0001)
A.ncol = A.ncol + 1
with test.assertRaisesRegex(ValueError, "Number of columns"):
bsr_mv(A, x, y)
A.ncol = A.ncol - 1
A.nrow = A.nrow - 1
with test.assertRaisesRegex(ValueError, "Number of rows"):
bsr_mv(A, x, y)
return test_bsr_mv
devices = get_test_devices()
class TestSparse(unittest.TestCase):
def test_bsr_copy_scale(self):
nrow = 6
bsize = 2
diag_bsr = bsr_diag(diag=np.eye(bsize, dtype=float) * 2.0, rows_of_blocks=nrow)
diag_copy = bsr_copy(diag_bsr, scalar_type=wp.float64)
self.assertTrue(wp.types.types_equal(diag_copy.values.dtype, wp.mat(shape=(bsize, bsize), dtype=wp.float64)))
bsr_scale(x=diag_copy, alpha=0.5)
res = _bsr_to_dense(diag_copy)
ref = np.eye(nrow * bsize)
assert_np_equal(res, ref, 0.0001)
bsr_scale(x=diag_copy, alpha=0.0)
self.assertEqual(diag_copy.nrow, nrow)
self.assertEqual(diag_copy.ncol, nrow)
self.assertEqual(diag_copy.nnz, 0)
add_function_test(TestSparse, "test_csr_from_triplets", test_csr_from_triplets, devices=devices)
add_function_test(TestSparse, "test_bsr_from_triplets", test_bsr_from_triplets, devices=devices)
add_function_test(TestSparse, "test_bsr_get_diag", test_bsr_get_set_diag, devices=devices)
add_function_test(TestSparse, "test_csr_transpose", make_test_bsr_transpose((1, 1), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_transpose_1_3", make_test_bsr_transpose((1, 3), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_transpose_3_3", make_test_bsr_transpose((3, 3), wp.float64), devices=devices)
add_function_test(TestSparse, "test_csr_axpy", make_test_bsr_axpy((1, 1), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_axpy_1_3", make_test_bsr_axpy((1, 3), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_axpy_3_3", make_test_bsr_axpy((3, 3), wp.float64), devices=devices)
add_function_test(TestSparse, "test_csr_mm", make_test_bsr_mm((1, 1), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_mm_1_3", make_test_bsr_mm((1, 3), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_mm_3_3", make_test_bsr_mm((3, 3), wp.float64), devices=devices)
add_function_test(TestSparse, "test_csr_mv", make_test_bsr_mv((1, 1), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_mv_1_3", make_test_bsr_mv((1, 3), wp.float32), devices=devices)
add_function_test(TestSparse, "test_bsr_mv_3_3", make_test_bsr_mv((3, 3), wp.float64), devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 17,822 | Python | 37.164882 | 119 | 0.615026 |
NVIDIA/warp/warp/tests/test_grad.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def scalar_grad(x: wp.array(dtype=float), y: wp.array(dtype=float)):
y[0] = x[0] ** 2.0
def test_scalar_grad(test, device):
x = wp.array([3.0], dtype=float, device=device, requires_grad=True)
y = wp.zeros_like(x)
tape = wp.Tape()
with tape:
wp.launch(scalar_grad, dim=1, inputs=[x, y], device=device)
tape.backward(y)
assert_np_equal(tape.gradients[x].numpy(), np.array(6.0))
@wp.kernel
def for_loop_grad(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
sum = float(0.0)
for i in range(n):
sum = sum + x[i] * 2.0
s[0] = sum
def test_for_loop_grad(test, device):
n = 32
val = np.ones(n, dtype=np.float32)
x = wp.array(val, device=device, requires_grad=True)
sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(for_loop_grad, dim=1, inputs=[n, x, sum], device=device)
# ensure forward pass outputs correct
assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
tape.backward(loss=sum)
# ensure forward pass outputs persist
assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
# ensure gradients correct
assert_np_equal(tape.gradients[x].numpy(), 2.0 * val)
def test_for_loop_graph_grad(test, device):
wp.load_module(device=device)
n = 32
val = np.ones(n, dtype=np.float32)
x = wp.array(val, device=device, requires_grad=True)
sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
wp.capture_begin(device, force_module_load=False)
try:
tape = wp.Tape()
with tape:
wp.launch(for_loop_grad, dim=1, inputs=[n, x, sum], device=device)
tape.backward(loss=sum)
finally:
graph = wp.capture_end(device)
wp.capture_launch(graph)
wp.synchronize_device(device)
# ensure forward pass outputs persist
assert_np_equal(sum.numpy(), 2.0 * np.sum(x.numpy()))
# ensure gradients correct
assert_np_equal(x.grad.numpy(), 2.0 * val)
wp.capture_launch(graph)
wp.synchronize_device(device)
@wp.kernel
def for_loop_nested_if_grad(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
sum = float(0.0)
for i in range(n):
if i < 16:
if i < 8:
sum = sum + x[i] * 2.0
else:
sum = sum + x[i] * 4.0
else:
if i < 24:
sum = sum + x[i] * 6.0
else:
sum = sum + x[i] * 8.0
s[0] = sum
def test_for_loop_nested_if_grad(test, device):
n = 32
val = np.ones(n, dtype=np.float32)
# fmt: off
expected_val = [
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0,
]
expected_grad = [
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0,
]
# fmt: on
x = wp.array(val, device=device, requires_grad=True)
sum = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(for_loop_nested_if_grad, dim=1, inputs=[n, x, sum], device=device)
assert_np_equal(sum.numpy(), np.sum(expected_val))
tape.backward(loss=sum)
assert_np_equal(sum.numpy(), np.sum(expected_val))
assert_np_equal(tape.gradients[x].numpy(), np.array(expected_grad))
@wp.kernel
def for_loop_grad_nested(n: int, x: wp.array(dtype=float), s: wp.array(dtype=float)):
sum = float(0.0)
for i in range(n):
for j in range(n):
sum = sum + x[i * n + j] * float(i * n + j) + 1.0
s[0] = sum
def test_for_loop_nested_for_grad(test, device):
x = wp.zeros(9, dtype=float, device=device, requires_grad=True)
s = wp.zeros(1, dtype=float, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(for_loop_grad_nested, dim=1, inputs=[3, x, s], device=device)
tape.backward(s)
assert_np_equal(s.numpy(), np.array([9.0]))
assert_np_equal(tape.gradients[x].numpy(), np.arange(0.0, 9.0, 1.0))
# differentiating thought most while loops is not supported
# since doing things like i = i + 1 breaks adjointing
# @wp.kernel
# def while_loop_grad(n: int,
# x: wp.array(dtype=float),
# c: wp.array(dtype=int),
# s: wp.array(dtype=float)):
# tid = wp.tid()
# while i < n:
# s[0] = s[0] + x[i]*2.0
# i = i + 1
# def test_while_loop_grad(test, device):
# n = 32
# x = wp.array(np.ones(n, dtype=np.float32), device=device, requires_grad=True)
# c = wp.zeros(1, dtype=int, device=device)
# sum = wp.zeros(1, dtype=wp.float32, device=device)
# tape = wp.Tape()
# with tape:
# wp.launch(while_loop_grad, dim=1, inputs=[n, x, c, sum], device=device)
# tape.backward(loss=sum)
# assert_np_equal(sum.numpy(), 2.0*np.sum(x.numpy()))
# assert_np_equal(tape.gradients[x].numpy(), 2.0*np.ones_like(x.numpy()))
@wp.kernel
def preserve_outputs(
n: int, x: wp.array(dtype=float), c: wp.array(dtype=float), s1: wp.array(dtype=float), s2: wp.array(dtype=float)
):
tid = wp.tid()
# plain store
c[tid] = x[tid] * 2.0
# atomic stores
wp.atomic_add(s1, 0, x[tid] * 3.0)
wp.atomic_sub(s2, 0, x[tid] * 2.0)
# tests that outputs from the forward pass are
# preserved by the backward pass, i.e.: stores
# are omitted during the forward reply
def test_preserve_outputs_grad(test, device):
n = 32
val = np.ones(n, dtype=np.float32)
x = wp.array(val, device=device, requires_grad=True)
c = wp.zeros_like(x)
s1 = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
s2 = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(preserve_outputs, dim=n, inputs=[n, x, c, s1, s2], device=device)
# ensure forward pass results are correct
assert_np_equal(x.numpy(), val)
assert_np_equal(c.numpy(), val * 2.0)
assert_np_equal(s1.numpy(), np.array(3.0 * n))
assert_np_equal(s2.numpy(), np.array(-2.0 * n))
# run backward on first loss
tape.backward(loss=s1)
# ensure inputs, copy and sum are unchanged by backwards pass
assert_np_equal(x.numpy(), val)
assert_np_equal(c.numpy(), val * 2.0)
assert_np_equal(s1.numpy(), np.array(3.0 * n))
assert_np_equal(s2.numpy(), np.array(-2.0 * n))
# ensure gradients are correct
assert_np_equal(tape.gradients[x].numpy(), 3.0 * val)
# run backward on second loss
tape.zero()
tape.backward(loss=s2)
assert_np_equal(x.numpy(), val)
assert_np_equal(c.numpy(), val * 2.0)
assert_np_equal(s1.numpy(), np.array(3.0 * n))
assert_np_equal(s2.numpy(), np.array(-2.0 * n))
# ensure gradients are correct
assert_np_equal(tape.gradients[x].numpy(), -2.0 * val)
def gradcheck(func, func_name, inputs, device, eps=1e-4, tol=1e-2):
"""
Checks that the gradient of the Warp kernel is correct by comparing it to the
numerical gradient computed using finite differences.
"""
kernel = wp.Kernel(func=func, key=func_name)
def f(xs):
# call the kernel without taping for finite differences
wp_xs = [wp.array(xs[i], ndim=1, dtype=inputs[i].dtype, device=device) for i in range(len(inputs))]
output = wp.zeros(1, dtype=wp.float32, device=device)
wp.launch(kernel, dim=1, inputs=wp_xs, outputs=[output], device=device)
return output.numpy()[0]
# compute numerical gradient
numerical_grad = []
np_xs = []
for i in range(len(inputs)):
np_xs.append(inputs[i].numpy().flatten().copy())
numerical_grad.append(np.zeros_like(np_xs[-1]))
inputs[i].requires_grad = True
for i in range(len(np_xs)):
for j in range(len(np_xs[i])):
np_xs[i][j] += eps
y1 = f(np_xs)
np_xs[i][j] -= 2 * eps
y2 = f(np_xs)
np_xs[i][j] += eps
numerical_grad[i][j] = (y1 - y2) / (2 * eps)
# compute analytical gradient
tape = wp.Tape()
output = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
with tape:
wp.launch(kernel, dim=1, inputs=inputs, outputs=[output], device=device)
tape.backward(loss=output)
# compare gradients
for i in range(len(inputs)):
grad = tape.gradients[inputs[i]]
assert_np_equal(grad.numpy(), numerical_grad[i], tol=tol)
tape.zero()
def test_vector_math_grad(test, device):
rng = np.random.default_rng(123)
# test unary operations
for dim, vec_type in [(2, wp.vec2), (3, wp.vec3), (4, wp.vec4), (4, wp.quat)]:
def check_length(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
out[0] = wp.length(vs[0])
def check_length_sq(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
out[0] = wp.length_sq(vs[0])
def check_normalize(vs: wp.array(dtype=vec_type), out: wp.array(dtype=float)):
out[0] = wp.length_sq(wp.normalize(vs[0])) # compress to scalar output
# run the tests with 5 different random inputs
for _ in range(5):
x = wp.array(rng.random(size=(1, dim), dtype=np.float32), dtype=vec_type, device=device)
gradcheck(check_length, f"check_length_{vec_type.__name__}", [x], device)
gradcheck(check_length_sq, f"check_length_sq_{vec_type.__name__}", [x], device)
gradcheck(check_normalize, f"check_normalize_{vec_type.__name__}", [x], device)
def test_matrix_math_grad(test, device):
rng = np.random.default_rng(123)
# test unary operations
for dim, mat_type in [(2, wp.mat22), (3, wp.mat33), (4, wp.mat44)]:
def check_determinant(vs: wp.array(dtype=mat_type), out: wp.array(dtype=float)):
out[0] = wp.determinant(vs[0])
def check_trace(vs: wp.array(dtype=mat_type), out: wp.array(dtype=float)):
out[0] = wp.trace(vs[0])
# run the tests with 5 different random inputs
for _ in range(5):
x = wp.array(rng.random(size=(1, dim, dim), dtype=np.float32), ndim=1, dtype=mat_type, device=device)
gradcheck(check_determinant, f"check_length_{mat_type.__name__}", [x], device)
gradcheck(check_trace, f"check_length_sq_{mat_type.__name__}", [x], device)
def test_3d_math_grad(test, device):
rng = np.random.default_rng(123)
# test binary operations
def check_cross(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
out[0] = wp.length(wp.cross(vs[0], vs[1]))
def check_dot(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
out[0] = wp.dot(vs[0], vs[1])
def check_mat33(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
a = vs[0]
b = vs[1]
c = wp.cross(a, b)
m = wp.mat33(a[0], b[0], c[0], a[1], b[1], c[1], a[2], b[2], c[2])
out[0] = wp.determinant(m)
def check_trace_diagonal(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
a = vs[0]
b = vs[1]
c = wp.cross(a, b)
m = wp.mat33(
1.0 / (a[0] + 10.0),
0.0,
0.0,
0.0,
1.0 / (b[1] + 10.0),
0.0,
0.0,
0.0,
1.0 / (c[2] + 10.0),
)
out[0] = wp.trace(m)
def check_rot_rpy(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
v = vs[0]
q = wp.quat_rpy(v[0], v[1], v[2])
out[0] = wp.length(wp.quat_rotate(q, vs[1]))
def check_rot_axis_angle(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
v = wp.normalize(vs[0])
q = wp.quat_from_axis_angle(v, 0.5)
out[0] = wp.length(wp.quat_rotate(q, vs[1]))
def check_rot_quat_inv(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
v = vs[0]
q = wp.normalize(wp.quat(v[0], v[1], v[2], 1.0))
out[0] = wp.length(wp.quat_rotate_inv(q, vs[1]))
# run the tests with 5 different random inputs
for _ in range(5):
x = wp.array(
rng.standard_normal(size=(2, 3), dtype=np.float32), dtype=wp.vec3, device=device, requires_grad=True
)
gradcheck(check_cross, "check_cross_3d", [x], device)
gradcheck(check_dot, "check_dot_3d", [x], device)
gradcheck(check_mat33, "check_mat33_3d", [x], device, eps=2e-2)
gradcheck(check_trace_diagonal, "check_trace_diagonal_3d", [x], device)
gradcheck(check_rot_rpy, "check_rot_rpy_3d", [x], device)
gradcheck(check_rot_axis_angle, "check_rot_axis_angle_3d", [x], device)
gradcheck(check_rot_quat_inv, "check_rot_quat_inv_3d", [x], device)
def test_multi_valued_function_grad(test, device):
rng = np.random.default_rng(123)
@wp.func
def multi_valued(x: float, y: float, z: float):
return wp.sin(x), wp.cos(y) * z, wp.sqrt(wp.abs(z)) / wp.abs(x)
# test multi-valued functions
def check_multi_valued(vs: wp.array(dtype=wp.vec3), out: wp.array(dtype=float)):
tid = wp.tid()
v = vs[tid]
a, b, c = multi_valued(v[0], v[1], v[2])
out[tid] = a + b + c
# run the tests with 5 different random inputs
for _ in range(5):
x = wp.array(
rng.standard_normal(size=(2, 3), dtype=np.float32), dtype=wp.vec3, device=device, requires_grad=True
)
gradcheck(check_multi_valued, "check_multi_valued_3d", [x], device)
def test_mesh_grad(test, device):
pos = wp.array(
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
],
dtype=wp.vec3,
device=device,
requires_grad=True,
)
indices = wp.array(
[0, 1, 2, 0, 2, 3, 0, 3, 1, 1, 3, 2],
dtype=wp.int32,
device=device,
)
mesh = wp.Mesh(points=pos, indices=indices)
@wp.func
def compute_triangle_area(mesh_id: wp.uint64, tri_id: int):
mesh = wp.mesh_get(mesh_id)
i, j, k = mesh.indices[tri_id * 3 + 0], mesh.indices[tri_id * 3 + 1], mesh.indices[tri_id * 3 + 2]
a = mesh.points[i]
b = mesh.points[j]
c = mesh.points[k]
return wp.length(wp.cross(b - a, c - a)) * 0.5
@wp.kernel
def compute_area(mesh_id: wp.uint64, out: wp.array(dtype=wp.float32)):
wp.atomic_add(out, 0, compute_triangle_area(mesh_id, wp.tid()))
num_tris = int(len(indices) / 3)
# compute analytical gradient
tape = wp.Tape()
output = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
with tape:
wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
tape.backward(loss=output)
ad_grad = mesh.points.grad.numpy()
# compute finite differences
eps = 1e-3
pos_np = pos.numpy()
fd_grad = np.zeros_like(ad_grad)
for i in range(len(pos)):
for j in range(3):
pos_np[i, j] += eps
pos = wp.array(pos_np, dtype=wp.vec3, device=device)
mesh = wp.Mesh(points=pos, indices=indices)
output.zero_()
wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
f1 = output.numpy()[0]
pos_np[i, j] -= 2 * eps
pos = wp.array(pos_np, dtype=wp.vec3, device=device)
mesh = wp.Mesh(points=pos, indices=indices)
output.zero_()
wp.launch(compute_area, dim=num_tris, inputs=[mesh.id], outputs=[output], device=device)
f2 = output.numpy()[0]
pos_np[i, j] += eps
fd_grad[i, j] = (f1 - f2) / (2 * eps)
assert np.allclose(ad_grad, fd_grad, atol=1e-3)
@wp.func
def name_clash(a: float, b: float) -> float:
return a + b
@wp.func_grad(name_clash)
def adj_name_clash(a: float, b: float, adj_ret: float):
# names `adj_a` and `adj_b` must not clash with function args of generated function
adj_a = 0.0
adj_b = 0.0
if a < 0.0:
adj_a = adj_ret
if b > 0.0:
adj_b = adj_ret
wp.adjoint[a] += adj_a
wp.adjoint[b] += adj_b
@wp.kernel
def name_clash_kernel(
input_a: wp.array(dtype=float),
input_b: wp.array(dtype=float),
output: wp.array(dtype=float),
):
tid = wp.tid()
output[tid] = name_clash(input_a[tid], input_b[tid])
def test_name_clash(test, device):
# tests that no name clashes occur when variable names such as `adj_a` are used in custom gradient code
with wp.ScopedDevice(device):
input_a = wp.array([1.0, -2.0, 3.0], dtype=wp.float32, requires_grad=True)
input_b = wp.array([4.0, 5.0, -6.0], dtype=wp.float32, requires_grad=True)
output = wp.zeros(3, dtype=wp.float32, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(name_clash_kernel, dim=len(input_a), inputs=[input_a, input_b], outputs=[output])
tape.backward(grads={output: wp.array(np.ones(len(input_a), dtype=np.float32))})
assert_np_equal(input_a.grad.numpy(), np.array([0.0, 1.0, 0.0]))
assert_np_equal(input_b.grad.numpy(), np.array([1.0, 1.0, 0.0]))
@wp.struct
class NestedStruct:
v: wp.vec2
@wp.struct
class ParentStruct:
a: float
n: NestedStruct
@wp.func
def noop(a: Any):
pass
@wp.func
def sum2(v: wp.vec2):
return v[0] + v[1]
@wp.kernel
def test_struct_attribute_gradient_kernel(src: wp.array(dtype=float), res: wp.array(dtype=float)):
tid = wp.tid()
p = ParentStruct(src[tid], NestedStruct(wp.vec2(2.0 * src[tid])))
# test that we are not losing gradients when accessing attributes
noop(p.a)
noop(p.n)
noop(p.n.v)
res[tid] = p.a + sum2(p.n.v)
def test_struct_attribute_gradient(test, device):
with wp.ScopedDevice(device):
src = wp.array([1], dtype=float, requires_grad=True)
res = wp.empty_like(src)
tape = wp.Tape()
with tape:
wp.launch(test_struct_attribute_gradient_kernel, dim=1, inputs=[src, res])
res.grad.fill_(1.0)
tape.backward()
test.assertEqual(src.grad.numpy()[0], 5.0)
@wp.kernel
def copy_kernel(a: wp.array(dtype=wp.float32), b: wp.array(dtype=wp.float32)):
tid = wp.tid()
ai = a[tid]
bi = ai
b[tid] = bi
def test_copy(test, device):
with wp.ScopedDevice(device):
a = wp.array([-1.0, 2.0, 3.0], dtype=wp.float32, requires_grad=True)
b = wp.array([0.0, 0.0, 0.0], dtype=wp.float32, requires_grad=True)
wp.launch(copy_kernel, 1, inputs=[a, b])
b.grad = wp.array([1.0, 1.0, 1.0], dtype=wp.float32)
wp.launch(copy_kernel, a.shape[0], inputs=[a, b], adjoint=True, adj_inputs=[None, None])
assert_np_equal(a.grad.numpy(), np.array([1.0, 1.0, 1.0]))
@wp.kernel
def aliasing_kernel(a: wp.array(dtype=wp.float32), b: wp.array(dtype=wp.float32)):
tid = wp.tid()
x = a[tid]
y = x
if y > 0.0:
y = x * x
else:
y = x * x * x
b[tid] = y
def test_aliasing(test, device):
with wp.ScopedDevice(device):
a = wp.array([-1.0, 2.0, 3.0], dtype=wp.float32, requires_grad=True)
b = wp.array([0.0, 0.0, 0.0], dtype=wp.float32, requires_grad=True)
wp.launch(aliasing_kernel, 1, inputs=[a, b])
b.grad = wp.array([1.0, 1.0, 1.0], dtype=wp.float32)
wp.launch(aliasing_kernel, a.shape[0], inputs=[a, b], adjoint=True, adj_inputs=[None, None])
assert_np_equal(a.grad.numpy(), np.array([3.0, 4.0, 6.0]))
@wp.kernel
def square_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float)):
tid = wp.tid()
y[tid] = x[tid] ** 2.0
def test_gradient_internal(test, device):
with wp.ScopedDevice(device):
a = wp.array([1.0, 2.0, 3.0], dtype=float, requires_grad=True)
b = wp.array([0.0, 0.0, 0.0], dtype=float, requires_grad=True)
wp.launch(square_kernel, a.size, inputs=[a, b])
# use internal gradients (.grad), adj_inputs are None
b.grad = wp.array([1.0, 1.0, 1.0], dtype=float)
wp.launch(square_kernel, a.shape[0], inputs=[a, b], adjoint=True, adj_inputs=[None, None])
assert_np_equal(a.grad.numpy(), np.array([2.0, 4.0, 6.0]))
def test_gradient_external(test, device):
with wp.ScopedDevice(device):
a = wp.array([1.0, 2.0, 3.0], dtype=float, requires_grad=False)
b = wp.array([0.0, 0.0, 0.0], dtype=float, requires_grad=False)
wp.launch(square_kernel, a.size, inputs=[a, b])
# use external gradients passed in adj_inputs
a_grad = wp.array([0.0, 0.0, 0.0], dtype=float)
b_grad = wp.array([1.0, 1.0, 1.0], dtype=float)
wp.launch(square_kernel, a.shape[0], inputs=[a, b], adjoint=True, adj_inputs=[a_grad, b_grad])
assert_np_equal(a_grad.numpy(), np.array([2.0, 4.0, 6.0]))
def test_gradient_precedence(test, device):
with wp.ScopedDevice(device):
a = wp.array([1.0, 2.0, 3.0], dtype=float, requires_grad=True)
b = wp.array([0.0, 0.0, 0.0], dtype=float, requires_grad=True)
wp.launch(square_kernel, a.size, inputs=[a, b])
# if both internal and external gradients are present, the external one takes precedence,
# because it's explicitly passed by the user in adj_inputs
a_grad = wp.array([0.0, 0.0, 0.0], dtype=float)
b_grad = wp.array([1.0, 1.0, 1.0], dtype=float)
wp.launch(square_kernel, a.shape[0], inputs=[a, b], adjoint=True, adj_inputs=[a_grad, b_grad])
assert_np_equal(a_grad.numpy(), np.array([2.0, 4.0, 6.0])) # used
assert_np_equal(a.grad.numpy(), np.array([0.0, 0.0, 0.0])) # unused
devices = get_test_devices()
class TestGrad(unittest.TestCase):
pass
# add_function_test(TestGrad, "test_while_loop_grad", test_while_loop_grad, devices=devices)
add_function_test(TestGrad, "test_for_loop_nested_for_grad", test_for_loop_nested_for_grad, devices=devices)
add_function_test(TestGrad, "test_scalar_grad", test_scalar_grad, devices=devices)
add_function_test(TestGrad, "test_for_loop_grad", test_for_loop_grad, devices=devices)
add_function_test(
TestGrad, "test_for_loop_graph_grad", test_for_loop_graph_grad, devices=get_selected_cuda_test_devices()
)
add_function_test(TestGrad, "test_for_loop_nested_if_grad", test_for_loop_nested_if_grad, devices=devices)
add_function_test(TestGrad, "test_preserve_outputs_grad", test_preserve_outputs_grad, devices=devices)
add_function_test(TestGrad, "test_vector_math_grad", test_vector_math_grad, devices=devices)
add_function_test(TestGrad, "test_matrix_math_grad", test_matrix_math_grad, devices=devices)
add_function_test(TestGrad, "test_3d_math_grad", test_3d_math_grad, devices=devices)
add_function_test(TestGrad, "test_multi_valued_function_grad", test_multi_valued_function_grad, devices=devices)
add_function_test(TestGrad, "test_mesh_grad", test_mesh_grad, devices=devices)
add_function_test(TestGrad, "test_name_clash", test_name_clash, devices=devices)
add_function_test(TestGrad, "test_struct_attribute_gradient", test_struct_attribute_gradient, devices=devices)
add_function_test(TestGrad, "test_copy", test_copy, devices=devices)
add_function_test(TestGrad, "test_aliasing", test_aliasing, devices=devices)
add_function_test(TestGrad, "test_gradient_internal", test_gradient_internal, devices=devices)
add_function_test(TestGrad, "test_gradient_external", test_gradient_external, devices=devices)
add_function_test(TestGrad, "test_gradient_precedence", test_gradient_precedence, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 24,431 | Python | 31.794631 | 116 | 0.599157 |
NVIDIA/warp/warp/tests/test_mesh_query_point.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def sample_mesh_query(
mesh: wp.uint64,
query_points: wp.array(dtype=wp.vec3),
query_faces: wp.array(dtype=int),
query_signs: wp.array(dtype=float),
query_dist: wp.array(dtype=float),
):
tid = wp.tid()
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
max_dist = 10012.0
p = query_points[tid]
wp.mesh_query_point(mesh, p, max_dist, sign, face_index, face_u, face_v)
cp = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
query_signs[tid] = sign
query_faces[tid] = face_index
query_dist[tid] = wp.length(cp - p)
query = wp.mesh_query_point(mesh, p, max_dist)
wp.expect_eq(query.sign, sign)
wp.expect_eq(query.face, face_index)
wp.expect_eq(query.u, face_u)
wp.expect_eq(query.v, face_v)
@wp.kernel
def sample_mesh_query_no_sign(
mesh: wp.uint64,
query_points: wp.array(dtype=wp.vec3),
query_faces: wp.array(dtype=int),
query_dist: wp.array(dtype=float),
):
tid = wp.tid()
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
max_dist = 10012.0
p = query_points[tid]
wp.mesh_query_point_no_sign(mesh, p, max_dist, face_index, face_u, face_v)
cp = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
query_faces[tid] = face_index
query_dist[tid] = wp.length(cp - p)
query = wp.mesh_query_point_no_sign(mesh, p, max_dist)
wp.expect_eq(query.face, face_index)
wp.expect_eq(query.u, face_u)
wp.expect_eq(query.v, face_v)
@wp.kernel
def sample_mesh_query_sign_normal(
mesh: wp.uint64,
query_points: wp.array(dtype=wp.vec3),
query_faces: wp.array(dtype=int),
query_signs: wp.array(dtype=float),
query_dist: wp.array(dtype=float),
):
tid = wp.tid()
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
max_dist = 10012.0
p = query_points[tid]
wp.mesh_query_point_sign_normal(mesh, p, max_dist, sign, face_index, face_u, face_v)
cp = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
query_signs[tid] = sign
query_faces[tid] = face_index
query_dist[tid] = wp.length(cp - p)
query = wp.mesh_query_point_sign_normal(mesh, p, max_dist)
wp.expect_eq(query.sign, sign)
wp.expect_eq(query.face, face_index)
wp.expect_eq(query.u, face_u)
wp.expect_eq(query.v, face_v)
@wp.kernel
def sample_mesh_query_sign_winding_number(
mesh: wp.uint64,
query_points: wp.array(dtype=wp.vec3),
query_faces: wp.array(dtype=int),
query_signs: wp.array(dtype=float),
query_dist: wp.array(dtype=float),
):
tid = wp.tid()
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
max_dist = 10012.0
p = query_points[tid]
wp.mesh_query_point_sign_winding_number(mesh, p, max_dist, sign, face_index, face_u, face_v)
cp = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
query_signs[tid] = sign
query_faces[tid] = face_index
query_dist[tid] = wp.length(cp - p)
query = wp.mesh_query_point_sign_winding_number(mesh, p, max_dist)
wp.expect_eq(query.sign, sign)
wp.expect_eq(query.face, face_index)
wp.expect_eq(query.u, face_u)
wp.expect_eq(query.v, face_v)
@wp.func
def triangle_closest_point(a: wp.vec3, b: wp.vec3, c: wp.vec3, p: wp.vec3):
ab = b - a
ac = c - a
ap = p - a
d1 = wp.dot(ab, ap)
d2 = wp.dot(ac, ap)
if d1 <= 0.0 and d2 <= 0.0:
return wp.vec2(1.0, 0.0)
bp = p - b
d3 = wp.dot(ab, bp)
d4 = wp.dot(ac, bp)
if d3 >= 0.0 and d4 <= d3:
return wp.vec2(0.0, 1.0)
vc = d1 * d4 - d3 * d2
v = d1 / (d1 - d3)
if vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0:
return wp.vec2(1.0 - v, v)
cp = p - c
d5 = wp.dot(ab, cp)
d6 = wp.dot(ac, cp)
if d6 >= 0.0 and d5 <= d6:
return wp.vec2(0.0, 0.0)
vb = d5 * d2 - d1 * d6
w = d2 / (d2 - d6)
if vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0:
return wp.vec2(1.0 - w, 0.0)
va = d3 * d6 - d5 * d4
w = (d4 - d3) / ((d4 - d3) + (d5 - d6))
if va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0:
return wp.vec2(0.0, 1.0 - w)
denom = 1.0 / (va + vb + vc)
v = vb * denom
w = vc * denom
u = 1.0 - v - w
return wp.vec2(u, v)
@wp.func
def solid_angle(v0: wp.vec3, v1: wp.vec3, v2: wp.vec3, p: wp.vec3):
a = v0 - p
b = v1 - p
c = v2 - p
a_len = wp.length(a)
b_len = wp.length(b)
c_len = wp.length(c)
det = wp.dot(a, wp.cross(b, c))
den = a_len * b_len * c_len + wp.dot(a, b) * c_len + wp.dot(b, c) * a_len + wp.dot(c, a) * b_len
return 2.0 * wp.atan2(det, den)
@wp.kernel
def sample_mesh_brute(
tri_points: wp.array(dtype=wp.vec3),
tri_indices: wp.array(dtype=int),
tri_count: int,
query_points: wp.array(dtype=wp.vec3),
query_faces: wp.array(dtype=int),
query_signs: wp.array(dtype=float),
query_dist: wp.array(dtype=float),
):
tid = wp.tid()
min_face = int(0)
min_dist = float(1.0e6)
sum_solid_angle = float(0.0)
p = query_points[tid]
for i in range(0, tri_count):
a = tri_points[tri_indices[i * 3 + 0]]
b = tri_points[tri_indices[i * 3 + 1]]
c = tri_points[tri_indices[i * 3 + 2]]
sum_solid_angle += solid_angle(a, b, c, p)
bary = triangle_closest_point(a, b, c, p)
u = bary[0]
v = bary[1]
cp = u * a + v * b + (1.0 - u - v) * c
cp_dist = wp.length(cp - p)
if cp_dist < min_dist:
min_dist = cp_dist
min_face = i
# for an inside point, the sum of the solid angle should be 4PI
# for an outside point, the sum should be 0
query_faces[tid] = min_face
query_signs[tid] = sum_solid_angle
query_dist[tid] = min_dist
# constructs a grid of evenly spaced particles
def particle_grid(dim_x, dim_y, dim_z, lower, radius, jitter):
rng = np.random.default_rng(123)
points = np.meshgrid(np.linspace(0, dim_x, dim_x), np.linspace(0, dim_y, dim_y), np.linspace(0, dim_z, dim_z))
points_t = np.array((points[0], points[1], points[2])).T * radius * 2.0 + np.array(lower)
points_t = points_t + rng.random(points_t.shape) * radius * jitter
return points_t.reshape((-1, 3))
# triangulate a list of polygon face indices
def triangulate(face_counts, face_indices):
num_tris = np.sum(np.subtract(face_counts, 2))
num_tri_vtx = num_tris * 3
tri_indices = np.zeros(num_tri_vtx, dtype=int)
ctr = 0
wedgeIdx = 0
for nb in face_counts:
for i in range(nb - 2):
tri_indices[ctr] = face_indices[wedgeIdx]
tri_indices[ctr + 1] = face_indices[wedgeIdx + i + 1]
tri_indices[ctr + 2] = face_indices[wedgeIdx + i + 2]
ctr += 3
wedgeIdx += nb
return tri_indices
@unittest.skipUnless(USD_AVAILABLE, "Requires usd-core")
def test_mesh_query_point(test, device):
from pxr import Usd, UsdGeom
mesh = Usd.Stage.Open(os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/spiky.usd")))
mesh_geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/Cube/Cube"))
mesh_counts = mesh_geom.GetFaceVertexCountsAttr().Get()
mesh_indices = mesh_geom.GetFaceVertexIndicesAttr().Get()
tri_indices = triangulate(mesh_counts, mesh_indices)
mesh_points = wp.array(np.array(mesh_geom.GetPointsAttr().Get()), dtype=wp.vec3, device=device)
mesh_indices = wp.array(np.array(tri_indices), dtype=int, device=device)
# create mesh
mesh = wp.Mesh(points=mesh_points, velocities=None, indices=mesh_indices, support_winding_number=True)
p = particle_grid(32, 32, 32, np.array([-1.1, -1.1, -1.1]), 0.05, 0.0)
query_count = len(p)
query_points = wp.array(p, dtype=wp.vec3, device=device)
signs_query = wp.zeros(query_count, dtype=float, device=device)
faces_query = wp.zeros(query_count, dtype=int, device=device)
dist_query = wp.zeros(query_count, dtype=float, device=device)
faces_query_no_sign = wp.zeros(query_count, dtype=int, device=device)
dist_query_no_sign = wp.zeros(query_count, dtype=float, device=device)
signs_query_normal = wp.zeros(query_count, dtype=float, device=device)
faces_query_normal = wp.zeros(query_count, dtype=int, device=device)
dist_query_normal = wp.zeros(query_count, dtype=float, device=device)
signs_query_winding_number = wp.zeros(query_count, dtype=float, device=device)
faces_query_winding_number = wp.zeros(query_count, dtype=int, device=device)
dist_query_winding_number = wp.zeros(query_count, dtype=float, device=device)
signs_brute = wp.zeros(query_count, dtype=float, device=device)
faces_brute = wp.zeros(query_count, dtype=int, device=device)
dist_brute = wp.zeros(query_count, dtype=float, device=device)
wp.launch(
kernel=sample_mesh_query,
dim=query_count,
inputs=[mesh.id, query_points, faces_query, signs_query, dist_query],
device=device,
)
wp.launch(
kernel=sample_mesh_query_no_sign,
dim=query_count,
inputs=[mesh.id, query_points, faces_query_no_sign, dist_query_no_sign],
device=device,
)
wp.launch(
kernel=sample_mesh_query_sign_normal,
dim=query_count,
inputs=[mesh.id, query_points, faces_query_normal, signs_query_normal, dist_query_normal],
device=device,
)
wp.launch(
kernel=sample_mesh_query_sign_winding_number,
dim=query_count,
inputs=[
mesh.id,
query_points,
faces_query_winding_number,
signs_query_winding_number,
dist_query_winding_number,
],
device=device,
)
wp.launch(
kernel=sample_mesh_brute,
dim=query_count,
inputs=[
mesh_points,
mesh_indices,
int(len(mesh_indices) / 3),
query_points,
faces_brute,
signs_brute,
dist_brute,
],
device=device,
)
signs_query = signs_query.numpy()
faces_query = faces_query.numpy()
dist_query = dist_query.numpy()
faces_query_no_sign = faces_query_no_sign.numpy()
dist_query_no_sign = dist_query_no_sign.numpy()
signs_query_normal = signs_query_normal.numpy()
faces_query_normal = faces_query_normal.numpy()
dist_query_normal = dist_query_normal.numpy()
signs_query_winding_number = signs_query_winding_number.numpy()
faces_query_winding_number = faces_query_winding_number.numpy()
dist_query_winding_number = dist_query_winding_number.numpy()
signs_brute = signs_brute.numpy()
faces_brute = faces_brute.numpy()
dist_brute = dist_brute.numpy()
query_points = query_points.numpy()
inside_query = [[0.0, 0.0, 0.0]]
inside_query_normal = [[0.0, 0.0, 0.0]]
inside_query_winding_number = [[0.0, 0.0, 0.0]]
inside_brute = [[0.0, 0.0, 0.0]]
for i in range(query_count):
if signs_query[i] < 0.0:
inside_query.append(query_points[i].tolist())
if signs_query_normal[i] < 0.0:
inside_query_normal.append(query_points[i].tolist())
if signs_query_winding_number[i] < 0.0:
inside_query_winding_number.append(query_points[i].tolist())
if signs_brute[i] > math.pi * 2.0:
inside_brute.append(query_points[i].tolist())
inside_query = np.array(inside_query)
inside_query_normal = np.array(inside_query_normal)
inside_query_winding_number = np.array(inside_query_winding_number)
inside_brute = np.array(inside_brute)
# import warp.render
# stage = warp.render.UsdRenderer("tests/outputs/test_mesh_query_point.usd")
# radius = 0.1
# stage.begin_frame(0.0)
# stage.render_mesh(points=mesh_points.numpy(), indices=mesh_indices.numpy(), name="mesh")
# stage.render_points(points=inside_query, radius=radius, name="query")
# stage.render_points(points=inside_brute, radius=radius, name="brute")
# stage.render_points(points=query_points, radius=radius, name="all")
# stage.end_frame()
# stage.save()
test.assertTrue(len(inside_query) == len(inside_brute))
test.assertTrue(len(inside_query_normal) == len(inside_brute))
test.assertTrue(len(inside_query_winding_number) == len(inside_brute))
tolerance = 1.5e-4
dist_error = np.max(np.abs(dist_query - dist_brute))
sign_error = np.max(np.abs(inside_query - inside_brute))
test.assertTrue(dist_error < tolerance, f"mesh_query_point dist_error is {dist_error} which is >= {tolerance}")
test.assertTrue(sign_error < tolerance, f"mesh_query_point sign_error is {sign_error} which is >= {tolerance}")
dist_error = np.max(np.abs(dist_query_no_sign - dist_brute))
test.assertTrue(
dist_error < tolerance, f"mesh_query_point_no_sign dist_error is {dist_error} which is >= {tolerance}"
)
dist_error = np.max(np.abs(dist_query_normal - dist_brute))
sign_error = np.max(np.abs(inside_query_normal - inside_brute))
test.assertTrue(
dist_error < tolerance, f"mesh_query_point_sign_normal dist_error is {dist_error} which is >= {tolerance}"
)
test.assertTrue(
sign_error < tolerance, f"mesh_query_point_sign_normal sign_error is {sign_error} which is >= {tolerance}"
)
dist_error = np.max(np.abs(dist_query_winding_number - dist_brute))
sign_error = np.max(np.abs(inside_query_winding_number - inside_brute))
test.assertTrue(
dist_error < tolerance,
f"mesh_query_point_sign_winding_number dist_error is {dist_error} which is >= {tolerance}",
)
test.assertTrue(
sign_error < tolerance,
f"mesh_query_point_sign_winding_number sign_error is {sign_error} which is >= {tolerance}",
)
@wp.kernel
def mesh_query_point_loss(
mesh: wp.uint64,
query_points: wp.array(dtype=wp.vec3),
projected_points: wp.array(dtype=wp.vec3),
loss: wp.array(dtype=float),
):
tid = wp.tid()
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
max_dist = 10012.0
p = query_points[tid]
wp.mesh_query_point(mesh, p, max_dist, sign, face_index, face_u, face_v)
q = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
projected_points[tid] = q
dist = wp.length(wp.sub(p, q))
loss[tid] = dist
@unittest.skipUnless(USD_AVAILABLE, "Requires usd-core")
def test_adj_mesh_query_point(test, device):
from pxr import Usd, UsdGeom
mesh = Usd.Stage.Open(os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/torus.usda")))
mesh_geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/World/Torus"))
mesh_counts = mesh_geom.GetFaceVertexCountsAttr().Get()
mesh_indices = mesh_geom.GetFaceVertexIndicesAttr().Get()
tri_indices = triangulate(mesh_counts, mesh_indices)
mesh_points = wp.array(np.array(mesh_geom.GetPointsAttr().Get()), dtype=wp.vec3, device=device)
mesh_indices = wp.array(np.array(tri_indices), dtype=int, device=device)
# test tri
# print("Testing Single Triangle")
# mesh_points = wp.array(np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0], [0.0, 2.0, 0.0]]), dtype=wp.vec3, device=device)
# mesh_indices = wp.array(np.array([0,1,2]), dtype=int, device=device)
# create mesh
mesh = wp.Mesh(points=mesh_points, velocities=None, indices=mesh_indices, support_winding_number=True)
# p = particle_grid(32, 32, 32, np.array([-5.0, -5.0, -5.0]), 0.1, 0.1)*100.0
p = wp.vec3(50.0, 50.0, 50.0)
tape = wp.Tape()
# analytic gradients
with tape:
query_points = wp.array(p, dtype=wp.vec3, device=device, requires_grad=True)
projected_points = wp.zeros(n=1, dtype=wp.vec3, device=device)
loss = wp.zeros(n=1, dtype=float, device=device, requires_grad=True)
wp.launch(
kernel=mesh_query_point_loss, dim=1, inputs=[mesh.id, query_points, projected_points, loss], device=device
)
tape.backward(loss=loss)
analytic = tape.gradients[query_points].numpy().flatten()
# numeric gradients
eps = 1.0e-3
loss_values = []
numeric = np.zeros(3)
offset_query_points = [
wp.vec3(p[0] - eps, p[1], p[2]),
wp.vec3(p[0] + eps, p[1], p[2]),
wp.vec3(p[0], p[1] - eps, p[2]),
wp.vec3(p[0], p[1] + eps, p[2]),
wp.vec3(p[0], p[1], p[2] - eps),
wp.vec3(p[0], p[1], p[2] + eps),
]
for i in range(6):
q = offset_query_points[i]
query_points = wp.array(q, dtype=wp.vec3, device=device)
projected_points = wp.zeros(n=1, dtype=wp.vec3, device=device)
loss = wp.zeros(n=1, dtype=float, device=device)
wp.launch(
kernel=mesh_query_point_loss, dim=1, inputs=[mesh.id, query_points, projected_points, loss], device=device
)
loss_values.append(loss.numpy()[0])
for i in range(3):
l_0 = loss_values[i * 2]
l_1 = loss_values[i * 2 + 1]
gradient = (l_1 - l_0) / (2.0 * eps)
numeric[i] = gradient
error = ((analytic - numeric) * (analytic - numeric)).sum(axis=0)
tolerance = 1.0e-3
test.assertTrue(error < tolerance, f"error is {error} which is >= {tolerance}")
@wp.kernel
def sample_furthest_points(mesh: wp.uint64, query_points: wp.array(dtype=wp.vec3), query_result: wp.array(dtype=float)):
tid = wp.tid()
p = query_points[tid]
face = int(0)
bary_u = float(0.0)
bary_v = float(0.0)
if wp.mesh_query_furthest_point_no_sign(mesh, p, 0.0, face, bary_u, bary_v):
closest = wp.mesh_eval_position(mesh, face, bary_u, bary_v)
query_result[tid] = wp.length_sq(p - closest)
query = wp.mesh_query_furthest_point_no_sign(mesh, p, 0.0)
wp.expect_eq(query.face, face)
wp.expect_eq(query.u, bary_u)
wp.expect_eq(query.v, bary_v)
@wp.kernel
def sample_furthest_points_brute(
mesh_points: wp.array(dtype=wp.vec3), query_points: wp.array(dtype=wp.vec3), query_result: wp.array(dtype=float)
):
tid = wp.tid()
p = query_points[tid]
max_dist_sq = float(0.0)
for i in range(mesh_points.shape[0]):
dist_sq = wp.length_sq(p - mesh_points[i])
if dist_sq > max_dist_sq:
max_dist_sq = dist_sq
query_result[tid] = max_dist_sq
@unittest.skipUnless(USD_AVAILABLE, "Requires usd-core")
def test_mesh_query_furthest_point(test, device):
from pxr import Usd, UsdGeom
mesh = Usd.Stage.Open(os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/spiky.usd")))
mesh_geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/Cube/Cube"))
mesh_counts = mesh_geom.GetFaceVertexCountsAttr().Get()
mesh_indices = mesh_geom.GetFaceVertexIndicesAttr().Get()
tri_indices = triangulate(mesh_counts, mesh_indices)
mesh_points = wp.array(np.array(mesh_geom.GetPointsAttr().Get()), dtype=wp.vec3, device=device)
mesh_indices = wp.array(np.array(tri_indices), dtype=int, device=device)
# create mesh
mesh = wp.Mesh(points=mesh_points, indices=mesh_indices)
p = particle_grid(32, 32, 32, np.array([-1.1, -1.1, -1.1]), 0.05, 0.0)
query_count = len(p)
query_points = wp.array(p, dtype=wp.vec3, device=device)
dist_query = wp.zeros(query_count, dtype=float, device=device)
dist_brute = wp.zeros(query_count, dtype=float, device=device)
wp.launch(sample_furthest_points, dim=query_count, inputs=[mesh.id, query_points, dist_query], device=device)
wp.launch(
sample_furthest_points_brute, dim=query_count, inputs=[mesh_points, query_points, dist_brute], device=device
)
assert_np_equal(dist_query.numpy(), dist_brute.numpy(), tol=1.0e-3)
devices = get_test_devices()
class TestMeshQueryPoint(unittest.TestCase):
def test_mesh_query_point_codegen_adjoints_with_select(self):
def kernel_fn(
mesh: wp.uint64,
):
v = wp.vec3(0.0, 0.0, 0.0)
d = 1e-6
if True:
query_1 = wp.mesh_query_point(mesh, v, d)
query_2 = wp.mesh_query_point_no_sign(mesh, v, d)
query_3 = wp.mesh_query_furthest_point_no_sign(mesh, v, d)
query_4 = wp.mesh_query_point_sign_normal(mesh, v, d)
query_5 = wp.mesh_query_point_sign_winding_number(mesh, v, d)
else:
query_1 = wp.mesh_query_point(mesh, v, d)
query_2 = wp.mesh_query_point_no_sign(mesh, v, d)
query_3 = wp.mesh_query_furthest_point_no_sign(mesh, v, d)
query_4 = wp.mesh_query_point_sign_normal(mesh, v, d)
query_5 = wp.mesh_query_point_sign_winding_number(mesh, v, d)
wp.Kernel(func=kernel_fn)
add_function_test(TestMeshQueryPoint, "test_mesh_query_point", test_mesh_query_point, devices=devices)
add_function_test(TestMeshQueryPoint, "test_mesh_query_furthest_point", test_mesh_query_furthest_point, devices=devices)
add_function_test(TestMeshQueryPoint, "test_adj_mesh_query_point", test_adj_mesh_query_point, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 21,751 | Python | 30.479016 | 121 | 0.620247 |
NVIDIA/warp/warp/tests/test_atomic.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# construct kernel + test function for atomic ops on each vec/matrix type
def make_atomic_test(type):
def test_atomic_kernel(
out_add: wp.array(dtype=type),
out_min: wp.array(dtype=type),
out_max: wp.array(dtype=type),
val: wp.array(dtype=type),
):
tid = wp.tid()
wp.atomic_add(out_add, 0, val[tid])
wp.atomic_min(out_min, 0, val[tid])
wp.atomic_max(out_max, 0, val[tid])
# register a custom kernel (no decorator) function
# this lets us register the same function definition
# against multiple symbols, with different arg types
kernel = wp.Kernel(func=test_atomic_kernel, key=f"test_atomic_{type.__name__}_kernel")
def test_atomic(test, device):
n = 1024
rng = np.random.default_rng(42)
if type == wp.int32:
base = (rng.random(size=1, dtype=np.float32) * 100.0).astype(np.int32)
val = (rng.random(size=n, dtype=np.float32) * 100.0).astype(np.int32)
elif type == wp.float32:
base = rng.random(size=1, dtype=np.float32)
val = rng.random(size=n, dtype=np.float32)
else:
base = rng.random(size=(1, *type._shape_), dtype=float)
val = rng.random(size=(n, *type._shape_), dtype=float)
add_array = wp.array(base, dtype=type, device=device, requires_grad=True)
min_array = wp.array(base, dtype=type, device=device, requires_grad=True)
max_array = wp.array(base, dtype=type, device=device, requires_grad=True)
add_array.zero_()
min_array.fill_(10000)
max_array.fill_(-10000)
val_array = wp.array(val, dtype=type, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel, n, inputs=[add_array, min_array, max_array, val_array], device=device)
assert_np_equal(add_array.numpy(), np.sum(val, axis=0), tol=1.0e-2)
assert_np_equal(min_array.numpy(), np.min(val, axis=0), tol=1.0e-2)
assert_np_equal(max_array.numpy(), np.max(val, axis=0), tol=1.0e-2)
if type != wp.int32:
add_array.grad.fill_(1)
tape.backward()
assert_np_equal(val_array.grad.numpy(), np.ones_like(val))
tape.zero()
min_array.grad.fill_(1)
tape.backward()
min_grad_array = np.zeros_like(val)
argmin = val.argmin(axis=0)
if val.ndim == 1:
min_grad_array[argmin] = 1
elif val.ndim == 2:
for i in range(val.shape[1]):
min_grad_array[argmin[i], i] = 1
elif val.ndim == 3:
for i in range(val.shape[1]):
for j in range(val.shape[2]):
min_grad_array[argmin[i, j], i, j] = 1
assert_np_equal(val_array.grad.numpy(), min_grad_array)
tape.zero()
max_array.grad.fill_(1)
tape.backward()
max_grad_array = np.zeros_like(val)
argmax = val.argmax(axis=0)
if val.ndim == 1:
max_grad_array[argmax] = 1
elif val.ndim == 2:
for i in range(val.shape[1]):
max_grad_array[argmax[i], i] = 1
elif val.ndim == 3:
for i in range(val.shape[1]):
for j in range(val.shape[2]):
max_grad_array[argmax[i, j], i, j] = 1
assert_np_equal(val_array.grad.numpy(), max_grad_array)
return test_atomic
# generate test functions for atomic types
test_atomic_int = make_atomic_test(wp.int32)
test_atomic_float = make_atomic_test(wp.float32)
test_atomic_vec2 = make_atomic_test(wp.vec2)
test_atomic_vec3 = make_atomic_test(wp.vec3)
test_atomic_vec4 = make_atomic_test(wp.vec4)
test_atomic_mat22 = make_atomic_test(wp.mat22)
test_atomic_mat33 = make_atomic_test(wp.mat33)
test_atomic_mat44 = make_atomic_test(wp.mat44)
devices = get_test_devices()
class TestAtomic(unittest.TestCase):
pass
add_function_test(TestAtomic, "test_atomic_int", test_atomic_int, devices=devices)
add_function_test(TestAtomic, "test_atomic_float", test_atomic_float, devices=devices)
add_function_test(TestAtomic, "test_atomic_vec2", test_atomic_vec2, devices=devices)
add_function_test(TestAtomic, "test_atomic_vec3", test_atomic_vec3, devices=devices)
add_function_test(TestAtomic, "test_atomic_vec4", test_atomic_vec4, devices=devices)
add_function_test(TestAtomic, "test_atomic_mat22", test_atomic_mat22, devices=devices)
add_function_test(TestAtomic, "test_atomic_mat33", test_atomic_mat33, devices=devices)
add_function_test(TestAtomic, "test_atomic_mat44", test_atomic_mat44, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,349 | Python | 37.214285 | 100 | 0.619742 |
NVIDIA/warp/warp/tests/test_matmul.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
wp.init() # For wp.context.runtime.core.is_cutlass_enabled()
class gemm_test_bed_runner:
def __init__(self, dtype, device):
self.dtype = dtype
self.device = device
def alloc(self, m, n, k, batch_count):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
if batch_count == 1:
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array2d(np.zeros((m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
else:
A = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array3d(np.zeros((batch_count, m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
return A, B, C, D
def run_and_verify(self, m, n, k, batch_count, alpha, beta):
A, B, C, D = self.alloc(m, n, k, batch_count)
ones = wp.zeros_like(D)
ones.fill_(1.0)
if batch_count == 1:
tape = wp.Tape()
with tape:
wp.matmul(A, B, C, D, alpha, beta, False)
tape.backward(grads={D: ones})
D_np = alpha * (A.numpy() @ B.numpy()) + beta * C.numpy()
assert_np_equal(D.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose())
adj_B_np = alpha * (A.numpy().transpose() @ ones.numpy())
adj_C_np = beta * ones.numpy()
else:
tape = wp.Tape()
with tape:
wp.batched_matmul(A, B, C, D, alpha, beta, False)
tape.backward(grads={D: ones})
D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
assert_np_equal(D.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
adj_C_np = beta * ones.numpy()
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(C.grad.numpy(), adj_C_np)
def run(self):
Ms = [64, 128, 512]
Ns = [64, 128, 512]
Ks = [64, 128, 512]
batch_counts = [1, 4]
betas = [0.0, 1.0]
alpha = 1.0
for batch_count in batch_counts:
for m in Ms:
for n in Ns:
for k in Ks:
for beta in betas:
self.run_and_verify(m, n, k, batch_count, alpha, beta)
class gemm_test_bed_runner_transpose:
def __init__(self, dtype, device):
self.dtype = dtype
self.device = device
def alloc(self, m, n, k, batch_count):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
if batch_count == 1:
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array2d(np.zeros((m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
AT = wp.array2d(A.numpy().transpose([1, 0]), dtype=self.dtype, device=self.device, requires_grad=True)
BT = wp.array2d(B.numpy().transpose([1, 0]), dtype=self.dtype, device=self.device, requires_grad=True)
else:
A = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array3d(np.zeros((batch_count, m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
AT = wp.array3d(A.numpy().transpose([0, 2, 1]), dtype=self.dtype, device=self.device, requires_grad=True)
BT = wp.array3d(B.numpy().transpose([0, 2, 1]), dtype=self.dtype, device=self.device, requires_grad=True)
return A, B, C, D, AT, BT
def run_and_verify(self, m, n, k, batch_count, alpha, beta):
A, B, C1, D1, AT1, BT1 = self.alloc(m, n, k, batch_count)
C2 = wp.clone(C1)
C3 = wp.clone(C1)
D2 = wp.clone(D1)
D3 = wp.clone(D1)
AT2 = wp.clone(AT1)
BT2 = wp.clone(BT1)
ones1 = wp.zeros_like(D1)
ones1.fill_(1.0)
ones2 = wp.zeros_like(D2)
ones2.fill_(1.0)
ones3 = wp.zeros_like(D3)
ones3.fill_(1.0)
if batch_count == 1:
ATT1 = AT1.transpose([1, 0])
BTT1 = BT1.transpose([1, 0])
ATT2 = AT2.transpose([1, 0])
BTT2 = BT2.transpose([1, 0])
tape = wp.Tape()
with tape:
wp.matmul(A, BTT1, C1, D1, alpha, beta, False)
wp.matmul(ATT1, B, C2, D2, alpha, beta, False)
wp.matmul(ATT2, BTT2, C3, D3, alpha, beta, False)
tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
D_np = alpha * (A.numpy() @ B.numpy()) + beta * C1.numpy()
assert_np_equal(D1.numpy(), D_np)
assert_np_equal(D2.numpy(), D_np)
assert_np_equal(D3.numpy(), D_np)
adj_A_np = alpha * (ones1.numpy() @ B.numpy().transpose())
adj_B_np = alpha * (A.numpy().transpose() @ ones1.numpy())
adj_C_np = beta * ones1.numpy()
else:
ATT1 = AT1.transpose([0, 2, 1])
BTT1 = BT1.transpose([0, 2, 1])
ATT2 = AT2.transpose([0, 2, 1])
BTT2 = BT2.transpose([0, 2, 1])
tape = wp.Tape()
with tape:
wp.batched_matmul(A, BTT1, C1, D1, alpha, beta, False)
wp.batched_matmul(ATT1, B, C2, D2, alpha, beta, False)
wp.batched_matmul(ATT2, BTT2, C3, D3, alpha, beta, False)
tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C1.numpy()
assert_np_equal(D1.numpy(), D_np)
assert_np_equal(D2.numpy(), D_np)
assert_np_equal(D3.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones1.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones1.numpy())
adj_C_np = beta * ones1.numpy()
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(ATT1.grad.numpy(), adj_A_np)
assert_np_equal(ATT2.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(BTT1.grad.numpy(), adj_B_np)
assert_np_equal(BTT2.grad.numpy(), adj_B_np)
assert_np_equal(C1.grad.numpy(), adj_C_np)
assert_np_equal(C2.grad.numpy(), adj_C_np)
assert_np_equal(C3.grad.numpy(), adj_C_np)
def run(self):
m = 16
n = 32
k = 64
batch_counts = [1, 4]
beta = 1.0
alpha = 1.0
for batch_count in batch_counts:
self.run_and_verify(m, n, k, batch_count, alpha, beta)
# NOTE: F16 tests are slow due to the performance of the reference numpy F16 matmuls performed on CPU.
def test_f16(test, device):
gemm_test_bed_runner(wp.float16, device).run()
gemm_test_bed_runner_transpose(wp.float16, device).run()
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_f32(test, device):
gemm_test_bed_runner(wp.float32, device).run()
gemm_test_bed_runner_transpose(wp.float32, device).run()
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_f64(test, device):
gemm_test_bed_runner(wp.float64, device).run()
gemm_test_bed_runner_transpose(wp.float64, device).run()
@wp.kernel
def matrix_sum_kernel(arr: wp.array2d(dtype=float), loss: wp.array(dtype=float)):
i, j = wp.tid()
wp.atomic_add(loss, 0, arr[i, j])
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_tape(test, device):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
m = 64
n = 128
k = 256
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))), dtype=float, device=device, requires_grad=True
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))), dtype=float, device=device, requires_grad=True
)
C = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, n))), dtype=float, device=device, requires_grad=True
)
D = wp.array2d(np.zeros((m, n)), dtype=float, device=device, requires_grad=True)
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
# test tape
tape = wp.Tape()
with tape:
wp.matmul(A, B, C, D)
wp.launch(matrix_sum_kernel, dim=(m, n), inputs=[D, loss], device=device)
tape.backward(loss=loss)
A_grad = A.grad.numpy()
tape.reset()
# test adjoint
D.grad = wp.ones((m, n), dtype=float, device=device)
wp.adj_matmul(A, B, C, A.grad, B.grad, C.grad, D.grad)
assert_np_equal(A_grad, A.grad.numpy())
# test zero
tape.zero()
assert_array_equal(A.grad, wp.zeros_like(A))
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_operator(test, device):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
m = 64
n = 128
k = 256
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))), dtype=float, device=device, requires_grad=True
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))), dtype=float, device=device, requires_grad=True
)
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
# test tape
tape = wp.Tape()
with tape:
D = A @ B
wp.launch(matrix_sum_kernel, dim=(m, n), inputs=[D, loss], device=device)
tape.backward(loss=loss)
# test adjoint
D.grad = wp.ones((m, n), dtype=float, device=device)
B_transpose = wp.array2d(B.transpose().numpy(), dtype=float, device=device)
adj_A = D.grad @ B_transpose
assert_array_equal(adj_A, A.grad)
# test zero
tape.zero()
assert_array_equal(A.grad, wp.zeros_like(A))
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_large_batch_count(test, device):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
m = 2
n = 3
k = 4
batch_count = 65535 * 2 + int(65535 / 2)
A = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, k))),
dtype=float,
device=device,
requires_grad=True,
)
B = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, k, n))),
dtype=float,
device=device,
requires_grad=True,
)
C = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, n))),
dtype=float,
device=device,
requires_grad=True,
)
D = wp.array3d(np.zeros((batch_count, m, n)), dtype=float, device=device, requires_grad=True)
ones = wp.zeros_like(D)
ones.fill_(1.0)
alpha = 1.0
beta = 1.0
tape = wp.Tape()
with tape:
wp.batched_matmul(A, B, C, D, alpha=alpha, beta=beta, allow_tf32x3_arith=False)
tape.backward(grads={D: ones})
D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
assert_np_equal(D.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
adj_C_np = beta * ones.numpy()
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(C.grad.numpy(), adj_C_np)
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_adjoint_accumulation(test, device):
a_np = np.ones(shape=(2, 3))
b_np = np.ones(shape=(3, 2))
c_np = np.zeros(shape=(2, 2))
d_np = np.zeros(shape=(2, 2))
a_wp = wp.from_numpy(a_np, dtype=float, requires_grad=True, device=device)
b_wp = wp.from_numpy(b_np, dtype=float, requires_grad=True, device=device)
c_wp = wp.from_numpy(c_np, dtype=float, requires_grad=True, device=device)
d1_wp = wp.from_numpy(d_np, dtype=float, requires_grad=True, device=device)
d2_wp = wp.from_numpy(d_np, dtype=float, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.matmul(a_wp, b_wp, c_wp, d1_wp, alpha=1.0, beta=1.0)
wp.matmul(a_wp, b_wp, d1_wp, d2_wp, alpha=1.0, beta=1.0)
d_grad = wp.zeros_like(d2_wp, device=device)
d_grad.fill_(1.0)
grads = {d2_wp: d_grad}
tape.backward(grads=grads)
assert_np_equal(a_wp.grad.numpy(), 4.0 * np.ones(shape=(2, 3)))
assert_np_equal(b_wp.grad.numpy(), 4.0 * np.ones(shape=(3, 2)))
assert_np_equal(c_wp.grad.numpy(), np.ones(shape=(2, 2)))
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_cuda_graph_capture(test, device):
@wp.kernel
def mat_sum(mat: wp.array2d(dtype=Any), loss: wp.array(dtype=Any)):
i, j = wp.tid()
e = mat[i, j]
wp.atomic_add(loss, 0, e)
for T in [wp.float16, wp.float32, wp.float64]:
wp.overload(mat_sum, [wp.array2d(dtype=T), wp.array(dtype=T)])
wp.load_module(device=device)
wp.load_module(module="warp.utils", device=device)
for dtype in [wp.float16, wp.float32, wp.float64]:
m = 8
n = 8
k = 8
A = wp.ones((m, n), dtype=dtype, device=device, requires_grad=True)
B = wp.ones((n, k), dtype=dtype, device=device, requires_grad=True)
C = wp.zeros((m, k), dtype=dtype, device=device, requires_grad=True)
D = wp.zeros((m, k), dtype=dtype, device=device, requires_grad=True)
loss = wp.zeros(1, dtype=dtype, device=device, requires_grad=True)
wp.capture_begin(device, force_module_load=False)
try:
tape = wp.Tape()
with tape:
wp.matmul(A, B, C, D)
wp.launch(mat_sum, dim=(m, k), inputs=[D, loss], device=device)
tape.backward(loss=loss)
finally:
graph = wp.capture_end(device)
wp.capture_launch(graph)
assert_np_equal(A.grad.numpy(), 8.0 * np.ones((m, n), dtype=wp.types.warp_type_to_np_dtype[dtype]))
devices = get_test_devices()
cuda_devices = get_selected_cuda_test_devices()
class TestMatmul(unittest.TestCase):
pass
# add_function_test(TestMatmul, "test_f16", test_f16, devices=devices)
add_function_test(TestMatmul, "test_f32", test_f32, devices=devices)
add_function_test(TestMatmul, "test_f64", test_f64, devices=devices)
add_function_test(TestMatmul, "test_tape", test_tape, devices=devices)
add_function_test(TestMatmul, "test_operator", test_operator, devices=devices)
add_function_test(TestMatmul, "test_large_batch_count", test_large_batch_count, devices=devices)
add_function_test(TestMatmul, "test_adjoint_accumulation", test_adjoint_accumulation, devices=devices)
add_function_test(TestMatmul, "test_cuda_graph_capture", test_cuda_graph_capture, devices=cuda_devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 18,159 | Python | 35.392786 | 117 | 0.565615 |
NVIDIA/warp/warp/tests/test_dense.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def eval_dense_gemm(
m: int,
n: int,
p: int,
t1: int,
t2: int,
A: wp.array(dtype=float),
B: wp.array(dtype=float),
C: wp.array(dtype=float),
):
wp.dense_gemm(m, n, p, t1, t2, A, B, C)
@wp.kernel
def eval_dense_cholesky(n: int, A: wp.array(dtype=float), regularization: float, L: wp.array(dtype=float)):
wp.dense_chol(n, A, regularization, L)
@wp.kernel
def eval_dense_subs(n: int, L: wp.array(dtype=float), b: wp.array(dtype=float), x: wp.array(dtype=float)):
wp.dense_subs(n, L, b, x)
# helper that propagates gradients back to A, treating L as a constant / temporary variable
# allows us to reuse the Cholesky decomposition from the forward pass
@wp.kernel
def eval_dense_solve(
n: int, A: wp.array(dtype=float), L: wp.array(dtype=float), b: wp.array(dtype=float), x: wp.array(dtype=float)
):
wp.dense_solve(n, A, L, b, x)
def test_dense_compilation(test, device):
# just testing compilation of the dense matrix routines
# most are deprecated / WIP
wp.load_module(device=device)
devices = get_test_devices()
class TestDense(unittest.TestCase):
pass
add_function_test(TestDense, "test_dense_compilation", test_dense_compilation, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,835 | Python | 26.818181 | 114 | 0.707357 |
NVIDIA/warp/warp/tests/test_ctypes.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def add_vec2(dest: wp.array(dtype=wp.vec2), c: wp.vec2):
tid = wp.tid()
dest[tid] = c
@wp.kernel
def transform_vec2(dest_right: wp.array(dtype=wp.vec2), dest_left: wp.array(dtype=wp.vec2), m: wp.mat22, v: wp.vec2):
tid = wp.tid()
dest_right[tid] = wp.mul(m, v)
dest_left[tid] = wp.mul(v, m)
@wp.kernel
def add_vec3(dest: wp.array(dtype=wp.vec3), c: wp.vec3):
tid = wp.tid()
dest[tid] = c
@wp.kernel
def transform_vec3(dest_right: wp.array(dtype=wp.vec3), dest_left: wp.array(dtype=wp.vec3), m: wp.mat33, v: wp.vec3):
tid = wp.tid()
dest_right[tid] = wp.mul(m, v)
dest_left[tid] = wp.mul(v, m)
@wp.kernel
def transform_multiply(xforms: wp.array(dtype=wp.transform), a: wp.transform):
tid = wp.tid()
xforms[tid] = wp.transform_multiply(xforms[tid], a)
def test_vec2_arg(test, device, n):
dest = wp.zeros(n=n, dtype=wp.vec2, device=device)
c = np.array((1.0, 2.0))
wp.launch(add_vec2, dim=n, inputs=[dest, c], device=device)
# ensure type can round-trip from Python->GPU->Python
assert_np_equal(dest.numpy(), np.tile(c, (n, 1)))
def test_vec2_transform(test, device, n):
dest_right = wp.zeros(n=n, dtype=wp.vec2, device=device)
dest_left = wp.zeros(n=n, dtype=wp.vec2, device=device)
c = np.array((1.0, 2.0))
m = np.array(((3.0, -1.0), (2.5, 4.0)))
wp.launch(transform_vec2, dim=n, inputs=[dest_right, dest_left, m, c], device=device)
assert_np_equal(dest_right.numpy(), np.tile(m @ c, (n, 1)))
assert_np_equal(dest_left.numpy(), np.tile(c @ m, (n, 1)))
def test_vec3_arg(test, device, n):
dest = wp.zeros(n=n, dtype=wp.vec3, device=device)
c = np.array((1.0, 2.0, 3.0))
wp.launch(add_vec3, dim=n, inputs=[dest, c], device=device)
assert_np_equal(dest.numpy(), np.tile(c, (n, 1)))
def test_vec3_transform(test, device, n):
dest_right = wp.zeros(n=n, dtype=wp.vec3, device=device)
dest_left = wp.zeros(n=n, dtype=wp.vec3, device=device)
c = np.array((1.0, 2.0, 3.0))
m = np.array(((1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)))
wp.launch(transform_vec3, dim=n, inputs=[dest_right, dest_left, m, c], device=device)
assert_np_equal(dest_right.numpy(), np.tile(m @ c, (n, 1)))
assert_np_equal(dest_left.numpy(), np.tile(c @ m, (n, 1)))
def test_transform_multiply(test, device, n):
a = wp.transform((0.0, 1.0, 0.0), wp.quat_identity())
x = []
for _i in range(10):
x.append(wp.transform_identity())
xforms = wp.array(x, dtype=wp.transform, device=device)
wp.launch(transform_multiply, dim=n, inputs=[xforms, a], device=device)
transformf = wp.types.transformation(dtype=wp.float32)
@wp.kernel
def test_transformation_constructor():
a = wp.transformation(wp.vec3(0.0), wp.quat_identity())
b = transformf(wp.vec3(0.0), wp.quat_identity())
c = wp.transform_identity(dtype=wp.float64)
spatial_vector = wp.types.vector(length=6, dtype=wp.float32)
@wp.kernel
def test_spatial_vector_constructor():
a = wp.spatial_vector(wp.vec3(0.0), wp.vec3(0.0))
# construct kernel + test harness for given matrix / vector types
def make_matrix_test(dim, matrix, vector):
def test_matrix_kernel(
a: wp.array(dtype=matrix),
b: wp.array(dtype=matrix),
c: wp.array(dtype=matrix),
x: wp.array(dtype=vector),
result_m: wp.array(dtype=matrix),
result_i: wp.array(dtype=matrix),
result_d: wp.array(dtype=float),
result_x: wp.array(dtype=vector),
):
tid = wp.tid()
m = a[tid] * b[tid] + c[tid] * 2.0
result_m[tid] = m
result_x[tid] = m * x[tid]
result_d[tid] = wp.determinant(m)
invm = wp.inverse(m)
result_i[tid] = m * invm
# register a custom kernel (no decorator) function
# this lets us register the same function definition
# against multiple symbols, with different arg types
kernel = wp.Kernel(func=test_matrix_kernel, key=f"test_mat{dim}{dim}_kernel")
def test_matrix(test, device):
rng = np.random.default_rng(42)
n = 1024
a = rng.random(size=(n, dim, dim), dtype=float)
b = rng.random(size=(n, dim, dim), dtype=float)
c = rng.random(size=(n, dim, dim), dtype=float)
x = rng.random(size=(n, dim, 1), dtype=float)
a_array = wp.array(a, dtype=matrix, device=device)
b_array = wp.array(b, dtype=matrix, device=device)
c_array = wp.array(c, dtype=matrix, device=device)
x_array = wp.array(x, dtype=vector, device=device)
result_m_array = wp.zeros_like(a_array)
result_i_array = wp.zeros_like(a_array)
result_x_array = wp.zeros_like(x_array)
result_d_array = wp.zeros(n, dtype=float, device=device)
wp.launch(
kernel,
n,
inputs=[a_array, b_array, c_array, x_array, result_m_array, result_i_array, result_d_array, result_x_array],
device=device,
)
# numpy reference result
result_m = np.matmul(a, b) + c * 2.0
result_x = np.matmul(result_m, x)
result_i = np.array([np.eye(dim)] * n)
result_d = np.linalg.det(result_m)
assert_np_equal(result_m_array.numpy(), result_m, tol=1.0e-5)
assert_np_equal(result_i_array.numpy(), result_i, tol=1.0e-3)
assert_np_equal(result_d_array.numpy(), result_d, tol=1.0e-3)
assert_np_equal(result_x_array.numpy(), result_x, tol=1.0e-5)
return test_matrix
# generate test functions for matrix types
test_mat22 = make_matrix_test(2, wp.mat22, wp.vec2)
test_mat33 = make_matrix_test(3, wp.mat33, wp.vec3)
test_mat44 = make_matrix_test(4, wp.mat44, wp.vec4)
def test_scalar_array(test, device):
scalar_list = (0.0, 1.0, 2.0)
scalar_array = wp.array(scalar_list, device=device)
assert_np_equal(np.array(scalar_list), scalar_array.numpy())
def test_vector_array(test, device):
vector_list = [(0.0, 0.0, 0.0), (1.0, 1.0, 1.0), (2.0, 2.0, 2.0)]
vector_array = wp.array(vector_list, dtype=wp.vec3, device=device)
assert_np_equal(np.array(vector_list), vector_array.numpy())
@wp.kernel
def test_vector_arg_types(v2: wp.vec2, v3: wp.vec3, v4: wp.vec4, m22: wp.mat22, m33: wp.mat33, m44: wp.mat44):
wp.expect_eq(v2, wp.vec2(1.0, 2.0))
wp.expect_eq(v3, wp.vec3(1.0, 2.0, 3.0))
wp.expect_eq(v4, wp.vec4(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(m22, wp.mat22(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(m33, wp.mat33(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0))
wp.expect_eq(m44, wp.mat44(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0))
@wp.kernel
def test_scalar_arg_types(
i8: wp.int8,
u8: wp.uint8,
i16: wp.int16,
u16: wp.uint16,
i32: wp.int32,
u32: wp.uint32,
i64: wp.int64,
u64: wp.uint64,
f32: wp.float32,
f64: wp.float64,
):
wp.expect_eq(int(i8), -64)
wp.expect_eq(int(u8), 255)
wp.expect_eq(int(i16), -64)
wp.expect_eq(int(u16), 255)
wp.expect_eq(int(i32), -64)
wp.expect_eq(int(u32), 255)
wp.expect_eq(int(i64), -64)
wp.expect_eq(int(u64), 255)
wp.expect_eq(int(f32), 3)
wp.expect_eq(int(f64), 3)
wp.expect_eq(float(f32), 3.14159)
wp.expect_eq(float(f64), 3.14159)
@wp.kernel
def test_scalar_array_types_load(
i8: wp.array(dtype=wp.int8),
u8: wp.array(dtype=wp.uint8),
i16: wp.array(dtype=wp.int16),
u16: wp.array(dtype=wp.uint16),
i32: wp.array(dtype=wp.int32),
u32: wp.array(dtype=wp.uint32),
i64: wp.array(dtype=wp.int64),
u64: wp.array(dtype=wp.uint64),
f32: wp.array(dtype=wp.float32),
f64: wp.array(dtype=wp.float64),
):
tid = wp.tid()
wp.expect_eq(int(i8[tid]), tid)
wp.expect_eq(int(u8[tid]), tid)
wp.expect_eq(int(i16[tid]), tid)
wp.expect_eq(int(u16[tid]), tid)
wp.expect_eq(int(i32[tid]), tid)
wp.expect_eq(int(u32[tid]), tid)
wp.expect_eq(int(i64[tid]), tid)
wp.expect_eq(int(u64[tid]), tid)
wp.expect_eq(float(f32[tid]), float(tid))
wp.expect_eq(float(f64[tid]), float(tid))
@wp.kernel
def test_scalar_array_types_store(
i8: wp.array(dtype=wp.int8),
u8: wp.array(dtype=wp.uint8),
i16: wp.array(dtype=wp.int16),
u16: wp.array(dtype=wp.uint16),
i32: wp.array(dtype=wp.int32),
u32: wp.array(dtype=wp.uint32),
i64: wp.array(dtype=wp.int64),
u64: wp.array(dtype=wp.uint64),
f32: wp.array(dtype=wp.float32),
f64: wp.array(dtype=wp.float64),
):
tid = wp.tid()
i8[tid] = wp.int8(tid)
u8[tid] = wp.uint8(tid)
i16[tid] = wp.int16(tid)
u16[tid] = wp.uint16(tid)
i32[tid] = wp.int32(tid)
u32[tid] = wp.uint32(tid)
i64[tid] = wp.int64(tid)
u64[tid] = wp.uint64(tid)
f32[tid] = wp.float32(tid)
f64[tid] = wp.float64(tid)
# check round-trip
wp.expect_eq(int(i8[tid]), tid)
wp.expect_eq(int(u8[tid]), tid)
wp.expect_eq(int(i16[tid]), tid)
wp.expect_eq(int(u16[tid]), tid)
wp.expect_eq(int(i32[tid]), tid)
wp.expect_eq(int(u32[tid]), tid)
wp.expect_eq(int(i64[tid]), tid)
wp.expect_eq(int(u64[tid]), tid)
wp.expect_eq(float(f32[tid]), float(tid))
wp.expect_eq(float(f64[tid]), float(tid))
@wp.kernel
def test_type_conversions():
# below tests auto-generated by the following snippet:
# scalar_types_all = [*wp.types.scalar_types, int, float]
# for t in scalar_types_all:
# for u in scalar_types_all:
# def prefix(t):
# if t == int or t == float:
# return t.__name__
# else:
# return "wp." + t.__name__
# print(f"wp.expect_eq({prefix(t)}(2.0), {prefix(t)}({prefix(u)}(2.0)))")
wp.expect_eq(wp.int8(2.0), wp.int8(wp.int8(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.uint8(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.int16(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.uint16(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.int32(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.uint32(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.int64(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.uint64(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.float16(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.float32(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(wp.float64(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(int(2.0)))
wp.expect_eq(wp.int8(2.0), wp.int8(float(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.int8(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.uint8(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.int16(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.uint16(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.int32(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.uint32(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.int64(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.uint64(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.float16(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.float32(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(wp.float64(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(int(2.0)))
wp.expect_eq(wp.uint8(2.0), wp.uint8(float(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.int8(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.uint8(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.int16(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.uint16(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.int32(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.uint32(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.int64(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.uint64(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.float16(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.float32(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(wp.float64(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(int(2.0)))
wp.expect_eq(wp.int16(2.0), wp.int16(float(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.int8(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.uint8(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.int16(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.uint16(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.int32(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.uint32(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.int64(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.uint64(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.float16(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.float32(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(wp.float64(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(int(2.0)))
wp.expect_eq(wp.uint16(2.0), wp.uint16(float(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.int8(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.uint8(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.int16(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.uint16(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.int32(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.uint32(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.int64(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.uint64(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.float16(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.float32(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(wp.float64(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(int(2.0)))
wp.expect_eq(wp.int32(2.0), wp.int32(float(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.int8(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.uint8(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.int16(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.uint16(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.int32(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.uint32(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.int64(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.uint64(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.float16(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.float32(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(wp.float64(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(int(2.0)))
wp.expect_eq(wp.uint32(2.0), wp.uint32(float(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.int8(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.uint8(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.int16(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.uint16(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.int32(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.uint32(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.int64(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.uint64(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.float16(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.float32(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(wp.float64(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(int(2.0)))
wp.expect_eq(wp.int64(2.0), wp.int64(float(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.int8(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.uint8(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.int16(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.uint16(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.int32(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.uint32(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.int64(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.uint64(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.float16(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.float32(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(wp.float64(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(int(2.0)))
wp.expect_eq(wp.uint64(2.0), wp.uint64(float(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.int8(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.uint8(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.int16(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.uint16(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.int32(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.uint32(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.int64(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.uint64(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.float16(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.float32(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(wp.float64(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(int(2.0)))
wp.expect_eq(wp.float16(2.0), wp.float16(float(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.int8(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.uint8(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.int16(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.uint16(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.int32(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.uint32(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.int64(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.uint64(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.float16(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.float32(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(wp.float64(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(int(2.0)))
wp.expect_eq(wp.float32(2.0), wp.float32(float(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.int8(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.uint8(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.int16(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.uint16(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.int32(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.uint32(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.int64(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.uint64(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.float16(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.float32(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(wp.float64(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(int(2.0)))
wp.expect_eq(wp.float64(2.0), wp.float64(float(2.0)))
wp.expect_eq(int(2.0), int(wp.int8(2.0)))
wp.expect_eq(int(2.0), int(wp.uint8(2.0)))
wp.expect_eq(int(2.0), int(wp.int16(2.0)))
wp.expect_eq(int(2.0), int(wp.uint16(2.0)))
wp.expect_eq(int(2.0), int(wp.int32(2.0)))
wp.expect_eq(int(2.0), int(wp.uint32(2.0)))
wp.expect_eq(int(2.0), int(wp.int64(2.0)))
wp.expect_eq(int(2.0), int(wp.uint64(2.0)))
wp.expect_eq(int(2.0), int(wp.float16(2.0)))
wp.expect_eq(int(2.0), int(wp.float32(2.0)))
wp.expect_eq(int(2.0), int(wp.float64(2.0)))
wp.expect_eq(int(2.0), int(int(2.0)))
wp.expect_eq(int(2.0), int(float(2.0)))
wp.expect_eq(float(2.0), float(wp.int8(2.0)))
wp.expect_eq(float(2.0), float(wp.uint8(2.0)))
wp.expect_eq(float(2.0), float(wp.int16(2.0)))
wp.expect_eq(float(2.0), float(wp.uint16(2.0)))
wp.expect_eq(float(2.0), float(wp.int32(2.0)))
wp.expect_eq(float(2.0), float(wp.uint32(2.0)))
wp.expect_eq(float(2.0), float(wp.int64(2.0)))
wp.expect_eq(float(2.0), float(wp.uint64(2.0)))
wp.expect_eq(float(2.0), float(wp.float16(2.0)))
wp.expect_eq(float(2.0), float(wp.float32(2.0)))
wp.expect_eq(float(2.0), float(wp.float64(2.0)))
wp.expect_eq(float(2.0), float(int(2.0)))
wp.expect_eq(float(2.0), float(float(2.0)))
def test_scalar_array_types(test, device, load, store):
dim = 64
i8 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.int8), device=device)
u8 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.uint8), device=device)
i16 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.int16), device=device)
u16 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.uint16), device=device)
i32 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.int32), device=device)
u32 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.uint32), device=device)
i64 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.int64), device=device)
u64 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.uint64), device=device)
f32 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.float32), device=device)
f64 = wp.array(np.linspace(0, dim, dim, endpoint=False, dtype=np.float64), device=device)
if load:
wp.launch(
test_scalar_array_types_load,
dim=dim,
inputs=[i8, u8, i16, u16, i32, u32, i64, u64, f32, f64],
device=device,
)
if store:
wp.launch(
test_scalar_array_types_store,
dim=dim,
inputs=[i8, u8, i16, u16, i32, u32, i64, u64, f32, f64],
device=device,
)
@wp.kernel
def test_transform_matrix():
r = wp.quat_from_axis_angle(wp.vec3(1.0, 0.0, 0.0), 0.5)
t = wp.vec3(0.25, 0.5, -0.75)
s = wp.vec3(2.0, 0.5, 0.75)
m = wp.mat44(t, r, s)
p = wp.vec3(1.0, 2.0, 3.0)
r_0 = wp.quat_rotate(r, wp.cw_mul(s, p)) + t
r_1 = wp.transform_point(m, p)
r_2 = wp.transform_vector(m, p)
wp.expect_near(r_0, r_1, 1.0e-4)
wp.expect_near(r_2, r_0 - t, 1.0e-4)
devices = get_test_devices()
class TestCTypes(unittest.TestCase):
pass
inputs = [
wp.vec2(1.0, 2.0),
wp.vec3(1.0, 2.0, 3.0),
wp.vec4(1.0, 2.0, 3.0, 4.0),
wp.mat22(1.0, 2.0, 3.0, 4.0),
wp.mat33(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0),
wp.mat44(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0),
]
add_function_test(TestCTypes, "test_mat22", test_mat22, devices=devices)
add_function_test(TestCTypes, "test_mat33", test_mat33, devices=devices)
add_function_test(TestCTypes, "test_mat44", test_mat44, devices=devices)
add_kernel_test(
TestCTypes, name="test_transformation_constructor", kernel=test_transformation_constructor, dim=1, devices=devices
)
add_kernel_test(
TestCTypes, name="test_spatial_vector_constructor", kernel=test_spatial_vector_constructor, dim=1, devices=devices
)
add_kernel_test(
TestCTypes,
name="test_scalar_arg_types",
kernel=test_scalar_arg_types,
dim=1,
inputs=[-64, 255, -64, 255, -64, 255, -64, 255, 3.14159, 3.14159],
devices=devices,
)
add_kernel_test(
TestCTypes,
name="test_scalar_arg_types_explicit",
kernel=test_scalar_arg_types,
dim=1,
inputs=[
wp.int8(-64),
wp.uint8(255),
wp.int16(-64),
wp.uint16(255),
wp.int32(-64),
wp.uint32(255),
wp.int64(-64),
wp.uint64(255),
wp.float32(3.14159),
wp.float64(3.14159),
],
devices=devices,
)
add_kernel_test(
TestCTypes, name="test_vector_arg_types", kernel=test_vector_arg_types, dim=1, inputs=inputs, devices=devices
)
add_kernel_test(TestCTypes, name="test_type_convesrions", kernel=test_type_conversions, dim=1, devices=devices)
add_function_test(
TestCTypes, "test_scalar_array_load", test_scalar_array_types, devices=devices, load=True, store=False
)
add_function_test(
TestCTypes, "test_scalar_array_store", test_scalar_array_types, devices=devices, load=False, store=True
)
add_function_test(TestCTypes, "test_vec2_arg", test_vec2_arg, devices=devices, n=8)
add_function_test(TestCTypes, "test_vec2_transform", test_vec2_transform, devices=devices, n=8)
add_function_test(TestCTypes, "test_vec3_arg", test_vec3_arg, devices=devices, n=8)
add_function_test(TestCTypes, "test_vec3_transform", test_vec3_transform, devices=devices, n=8)
add_function_test(TestCTypes, "test_transform_multiply", test_transform_multiply, devices=devices, n=8)
add_kernel_test(TestCTypes, name="test_transform_matrix", kernel=test_transform_matrix, dim=1, devices=devices)
add_function_test(TestCTypes, "test_scalar_array", test_scalar_array, devices=devices)
add_function_test(TestCTypes, "test_vector_array", test_vector_array, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 24,546 | Python | 37.901743 | 120 | 0.622342 |
NVIDIA/warp/warp/tests/test_dlpack.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import ctypes
import os
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
N = 1024 * 1024
def _jax_version():
try:
import jax
return jax.__version_info__
except (ImportError, AttributeError):
return (0, 0, 0)
@wp.kernel
def inc(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] + 1.0
def test_dlpack_warp_to_warp(test, device):
a1 = wp.array(data=np.arange(N, dtype=np.float32), device=device)
a2 = wp.from_dlpack(wp.to_dlpack(a1))
test.assertEqual(a1.ptr, a2.ptr)
test.assertEqual(a1.device, a2.device)
test.assertEqual(a1.dtype, a2.dtype)
test.assertEqual(a1.shape, a2.shape)
test.assertEqual(a1.strides, a2.strides)
assert_np_equal(a1.numpy(), a2.numpy())
wp.launch(inc, dim=a2.size, inputs=[a2], device=device)
assert_np_equal(a1.numpy(), a2.numpy())
def test_dlpack_dtypes_and_shapes(test, device):
# automatically determine scalar dtype
def wrap_scalar_tensor_implicit(dtype):
a1 = wp.zeros(N, dtype=dtype, device=device)
a2 = wp.from_dlpack(wp.to_dlpack(a1))
test.assertEqual(a1.ptr, a2.ptr)
test.assertEqual(a1.device, a2.device)
test.assertEqual(a1.dtype, a2.dtype)
test.assertEqual(a1.shape, a2.shape)
test.assertEqual(a1.strides, a2.strides)
# explicitly specify scalar dtype
def wrap_scalar_tensor_explicit(dtype, target_dtype):
a1 = wp.zeros(N, dtype=dtype, device=device)
a2 = wp.from_dlpack(wp.to_dlpack(a1), dtype=target_dtype)
test.assertEqual(a1.ptr, a2.ptr)
test.assertEqual(a1.device, a2.device)
test.assertEqual(a1.dtype, dtype)
test.assertEqual(a2.dtype, target_dtype)
test.assertEqual(a1.shape, a2.shape)
test.assertEqual(a1.strides, a2.strides)
# convert vector arrays to scalar arrays
def wrap_vector_to_scalar_tensor(vec_dtype):
scalar_type = vec_dtype._wp_scalar_type_
scalar_size = ctypes.sizeof(vec_dtype._type_)
a1 = wp.zeros(N, dtype=vec_dtype, device=device)
a2 = wp.from_dlpack(wp.to_dlpack(a1), dtype=scalar_type)
test.assertEqual(a1.ptr, a2.ptr)
test.assertEqual(a1.device, a2.device)
test.assertEqual(a2.ndim, a1.ndim + 1)
test.assertEqual(a1.dtype, vec_dtype)
test.assertEqual(a2.dtype, scalar_type)
test.assertEqual(a2.shape, (*a1.shape, vec_dtype._length_))
test.assertEqual(a2.strides, (*a1.strides, scalar_size))
# convert scalar arrays to vector arrays
def wrap_scalar_to_vector_tensor(vec_dtype):
scalar_type = vec_dtype._wp_scalar_type_
scalar_size = ctypes.sizeof(vec_dtype._type_)
a1 = wp.zeros((N, vec_dtype._length_), dtype=scalar_type, device=device)
a2 = wp.from_dlpack(wp.to_dlpack(a1), dtype=vec_dtype)
test.assertEqual(a1.ptr, a2.ptr)
test.assertEqual(a1.device, a2.device)
test.assertEqual(a2.ndim, a1.ndim - 1)
test.assertEqual(a1.dtype, scalar_type)
test.assertEqual(a2.dtype, vec_dtype)
test.assertEqual(a1.shape, (*a2.shape, vec_dtype._length_))
test.assertEqual(a1.strides, (*a2.strides, scalar_size))
# convert matrix arrays to scalar arrays
def wrap_matrix_to_scalar_tensor(mat_dtype):
scalar_type = mat_dtype._wp_scalar_type_
scalar_size = ctypes.sizeof(mat_dtype._type_)
a1 = wp.zeros(N, dtype=mat_dtype, device=device)
a2 = wp.from_dlpack(wp.to_dlpack(a1), dtype=scalar_type)
test.assertEqual(a1.ptr, a2.ptr)
test.assertEqual(a1.device, a2.device)
test.assertEqual(a2.ndim, a1.ndim + 2)
test.assertEqual(a1.dtype, mat_dtype)
test.assertEqual(a2.dtype, scalar_type)
test.assertEqual(a2.shape, (*a1.shape, *mat_dtype._shape_))
test.assertEqual(a2.strides, (*a1.strides, scalar_size * mat_dtype._shape_[1], scalar_size))
# convert scalar arrays to matrix arrays
def wrap_scalar_to_matrix_tensor(mat_dtype):
scalar_type = mat_dtype._wp_scalar_type_
scalar_size = ctypes.sizeof(mat_dtype._type_)
a1 = wp.zeros((N, *mat_dtype._shape_), dtype=scalar_type, device=device)
a2 = wp.from_dlpack(wp.to_dlpack(a1), dtype=mat_dtype)
test.assertEqual(a1.ptr, a2.ptr)
test.assertEqual(a1.device, a2.device)
test.assertEqual(a2.ndim, a1.ndim - 2)
test.assertEqual(a1.dtype, scalar_type)
test.assertEqual(a2.dtype, mat_dtype)
test.assertEqual(a1.shape, (*a2.shape, *mat_dtype._shape_))
test.assertEqual(a1.strides, (*a2.strides, scalar_size * mat_dtype._shape_[1], scalar_size))
for t in wp.types.scalar_types:
wrap_scalar_tensor_implicit(t)
for t in wp.types.scalar_types:
wrap_scalar_tensor_explicit(t, t)
# test signed/unsigned conversions
wrap_scalar_tensor_explicit(wp.int8, wp.uint8)
wrap_scalar_tensor_explicit(wp.uint8, wp.int8)
wrap_scalar_tensor_explicit(wp.int16, wp.uint16)
wrap_scalar_tensor_explicit(wp.uint16, wp.int16)
wrap_scalar_tensor_explicit(wp.int32, wp.uint32)
wrap_scalar_tensor_explicit(wp.uint32, wp.int32)
wrap_scalar_tensor_explicit(wp.int64, wp.uint64)
wrap_scalar_tensor_explicit(wp.uint64, wp.int64)
vec_types = []
for t in wp.types.scalar_types:
for vec_len in [2, 3, 4, 5]:
vec_types.append(wp.types.vector(vec_len, t))
vec_types.append(wp.quath)
vec_types.append(wp.quatf)
vec_types.append(wp.quatd)
vec_types.append(wp.transformh)
vec_types.append(wp.transformf)
vec_types.append(wp.transformd)
vec_types.append(wp.spatial_vectorh)
vec_types.append(wp.spatial_vectorf)
vec_types.append(wp.spatial_vectord)
for vec_type in vec_types:
wrap_vector_to_scalar_tensor(vec_type)
wrap_scalar_to_vector_tensor(vec_type)
mat_shapes = [(2, 2), (3, 3), (4, 4), (5, 5), (2, 3), (3, 2), (3, 4), (4, 3)]
mat_types = []
for t in wp.types.scalar_types:
for mat_shape in mat_shapes:
mat_types.append(wp.types.matrix(mat_shape, t))
mat_types.append(wp.spatial_matrixh)
mat_types.append(wp.spatial_matrixf)
mat_types.append(wp.spatial_matrixd)
for mat_type in mat_types:
wrap_matrix_to_scalar_tensor(mat_type)
wrap_scalar_to_matrix_tensor(mat_type)
def test_dlpack_warp_to_torch(test, device):
import torch.utils.dlpack
a = wp.array(data=np.arange(N, dtype=np.float32), device=device)
t = torch.utils.dlpack.from_dlpack(wp.to_dlpack(a))
item_size = wp.types.type_size_in_bytes(a.dtype)
test.assertEqual(a.ptr, t.data_ptr())
test.assertEqual(a.device, wp.device_from_torch(t.device))
test.assertEqual(a.dtype, wp.dtype_from_torch(t.dtype))
test.assertEqual(a.shape, tuple(t.shape))
test.assertEqual(a.strides, tuple(s * item_size for s in t.stride()))
assert_np_equal(a.numpy(), t.cpu().numpy())
wp.launch(inc, dim=a.size, inputs=[a], device=device)
assert_np_equal(a.numpy(), t.cpu().numpy())
t += 1
assert_np_equal(a.numpy(), t.cpu().numpy())
def test_dlpack_warp_to_torch_v2(test, device):
# same as original test, but uses newer __dlpack__() method
import torch.utils.dlpack
a = wp.array(data=np.arange(N, dtype=np.float32), device=device)
# pass the array directly
t = torch.utils.dlpack.from_dlpack(a)
item_size = wp.types.type_size_in_bytes(a.dtype)
test.assertEqual(a.ptr, t.data_ptr())
test.assertEqual(a.device, wp.device_from_torch(t.device))
test.assertEqual(a.dtype, wp.dtype_from_torch(t.dtype))
test.assertEqual(a.shape, tuple(t.shape))
test.assertEqual(a.strides, tuple(s * item_size for s in t.stride()))
assert_np_equal(a.numpy(), t.cpu().numpy())
wp.launch(inc, dim=a.size, inputs=[a], device=device)
assert_np_equal(a.numpy(), t.cpu().numpy())
t += 1
assert_np_equal(a.numpy(), t.cpu().numpy())
def test_dlpack_torch_to_warp(test, device):
import torch
import torch.utils.dlpack
t = torch.arange(N, dtype=torch.float32, device=wp.device_to_torch(device))
a = wp.from_dlpack(torch.utils.dlpack.to_dlpack(t))
item_size = wp.types.type_size_in_bytes(a.dtype)
test.assertEqual(a.ptr, t.data_ptr())
test.assertEqual(a.device, wp.device_from_torch(t.device))
test.assertEqual(a.dtype, wp.dtype_from_torch(t.dtype))
test.assertEqual(a.shape, tuple(t.shape))
test.assertEqual(a.strides, tuple(s * item_size for s in t.stride()))
assert_np_equal(a.numpy(), t.cpu().numpy())
wp.launch(inc, dim=a.size, inputs=[a], device=device)
assert_np_equal(a.numpy(), t.cpu().numpy())
t += 1
assert_np_equal(a.numpy(), t.cpu().numpy())
def test_dlpack_torch_to_warp_v2(test, device):
# same as original test, but uses newer __dlpack__() method
import torch
t = torch.arange(N, dtype=torch.float32, device=wp.device_to_torch(device))
# pass tensor directly
a = wp.from_dlpack(t)
item_size = wp.types.type_size_in_bytes(a.dtype)
test.assertEqual(a.ptr, t.data_ptr())
test.assertEqual(a.device, wp.device_from_torch(t.device))
test.assertEqual(a.dtype, wp.dtype_from_torch(t.dtype))
test.assertEqual(a.shape, tuple(t.shape))
test.assertEqual(a.strides, tuple(s * item_size for s in t.stride()))
assert_np_equal(a.numpy(), t.cpu().numpy())
wp.launch(inc, dim=a.size, inputs=[a], device=device)
assert_np_equal(a.numpy(), t.cpu().numpy())
t += 1
assert_np_equal(a.numpy(), t.cpu().numpy())
def test_dlpack_warp_to_jax(test, device):
import jax
import jax.dlpack
a = wp.array(data=np.arange(N, dtype=np.float32), device=device)
# use generic dlpack conversion
j1 = jax.dlpack.from_dlpack(wp.to_dlpack(a))
# use jax wrapper
j2 = wp.to_jax(a)
test.assertEqual(a.ptr, j1.unsafe_buffer_pointer())
test.assertEqual(a.ptr, j2.unsafe_buffer_pointer())
test.assertEqual(a.device, wp.device_from_jax(list(j1.devices())[0]))
test.assertEqual(a.device, wp.device_from_jax(list(j2.devices())[0]))
test.assertEqual(a.shape, j1.shape)
test.assertEqual(a.shape, j2.shape)
assert_np_equal(a.numpy(), np.asarray(j1))
assert_np_equal(a.numpy(), np.asarray(j2))
wp.launch(inc, dim=a.size, inputs=[a], device=device)
wp.synchronize_device(device)
# HACK? Run a no-op operation so that Jax flags the arrays as dirty
# and gets the latest values, which were modified by Warp.
j1 += 0
j2 += 0
assert_np_equal(a.numpy(), np.asarray(j1))
assert_np_equal(a.numpy(), np.asarray(j2))
@unittest.skipUnless(_jax_version() >= (0, 4, 15), "Jax version too old")
def test_dlpack_warp_to_jax_v2(test, device):
# same as original test, but uses newer __dlpack__() method
import jax
import jax.dlpack
a = wp.array(data=np.arange(N, dtype=np.float32), device=device)
# pass warp array directly
j1 = jax.dlpack.from_dlpack(a)
# use jax wrapper
j2 = wp.to_jax(a)
test.assertEqual(a.ptr, j1.unsafe_buffer_pointer())
test.assertEqual(a.ptr, j2.unsafe_buffer_pointer())
test.assertEqual(a.device, wp.device_from_jax(list(j1.devices())[0]))
test.assertEqual(a.device, wp.device_from_jax(list(j2.devices())[0]))
test.assertEqual(a.shape, j1.shape)
test.assertEqual(a.shape, j2.shape)
assert_np_equal(a.numpy(), np.asarray(j1))
assert_np_equal(a.numpy(), np.asarray(j2))
wp.launch(inc, dim=a.size, inputs=[a], device=device)
wp.synchronize_device(device)
# HACK? Run a no-op operation so that Jax flags the arrays as dirty
# and gets the latest values, which were modified by Warp.
j1 += 0
j2 += 0
assert_np_equal(a.numpy(), np.asarray(j1))
assert_np_equal(a.numpy(), np.asarray(j2))
def test_dlpack_jax_to_warp(test, device):
import jax
import jax.dlpack
with jax.default_device(wp.device_to_jax(device)):
j = jax.numpy.arange(N, dtype=jax.numpy.float32)
# use generic dlpack conversion
a1 = wp.from_dlpack(jax.dlpack.to_dlpack(j))
# use jax wrapper
a2 = wp.from_jax(j)
test.assertEqual(a1.ptr, j.unsafe_buffer_pointer())
test.assertEqual(a2.ptr, j.unsafe_buffer_pointer())
test.assertEqual(a1.device, wp.device_from_jax(list(j.devices())[0]))
test.assertEqual(a2.device, wp.device_from_jax(list(j.devices())[0]))
test.assertEqual(a1.shape, j.shape)
test.assertEqual(a2.shape, j.shape)
assert_np_equal(a1.numpy(), np.asarray(j))
assert_np_equal(a2.numpy(), np.asarray(j))
wp.launch(inc, dim=a1.size, inputs=[a1], device=device)
wp.synchronize_device(device)
# HACK? Run a no-op operation so that Jax flags the array as dirty
# and gets the latest values, which were modified by Warp.
j += 0
assert_np_equal(a1.numpy(), np.asarray(j))
assert_np_equal(a2.numpy(), np.asarray(j))
@unittest.skipUnless(_jax_version() >= (0, 4, 15), "Jax version too old")
def test_dlpack_jax_to_warp_v2(test, device):
# same as original test, but uses newer __dlpack__() method
import jax
with jax.default_device(wp.device_to_jax(device)):
j = jax.numpy.arange(N, dtype=jax.numpy.float32)
# pass jax array directly
a1 = wp.from_dlpack(j)
# use jax wrapper
a2 = wp.from_jax(j)
test.assertEqual(a1.ptr, j.unsafe_buffer_pointer())
test.assertEqual(a2.ptr, j.unsafe_buffer_pointer())
test.assertEqual(a1.device, wp.device_from_jax(list(j.devices())[0]))
test.assertEqual(a2.device, wp.device_from_jax(list(j.devices())[0]))
test.assertEqual(a1.shape, j.shape)
test.assertEqual(a2.shape, j.shape)
assert_np_equal(a1.numpy(), np.asarray(j))
assert_np_equal(a2.numpy(), np.asarray(j))
wp.launch(inc, dim=a1.size, inputs=[a1], device=device)
wp.synchronize_device(device)
# HACK? Run a no-op operation so that Jax flags the array as dirty
# and gets the latest values, which were modified by Warp.
j += 0
assert_np_equal(a1.numpy(), np.asarray(j))
assert_np_equal(a2.numpy(), np.asarray(j))
class TestDLPack(unittest.TestCase):
pass
devices = get_test_devices()
add_function_test(TestDLPack, "test_dlpack_warp_to_warp", test_dlpack_warp_to_warp, devices=devices)
add_function_test(TestDLPack, "test_dlpack_dtypes_and_shapes", test_dlpack_dtypes_and_shapes, devices=devices)
# torch interop via dlpack
try:
import torch
import torch.utils.dlpack
# check which Warp devices work with Torch
# CUDA devices may fail if Torch was not compiled with CUDA support
test_devices = get_test_devices()
torch_compatible_devices = []
for d in test_devices:
try:
t = torch.arange(10, device=wp.device_to_torch(d))
t += 1
torch_compatible_devices.append(d)
except Exception as e:
print(f"Skipping Torch DLPack tests on device '{d}' due to exception: {e}")
if torch_compatible_devices:
add_function_test(
TestDLPack, "test_dlpack_warp_to_torch", test_dlpack_warp_to_torch, devices=torch_compatible_devices
)
add_function_test(
TestDLPack, "test_dlpack_warp_to_torch_v2", test_dlpack_warp_to_torch_v2, devices=torch_compatible_devices
)
add_function_test(
TestDLPack, "test_dlpack_torch_to_warp", test_dlpack_torch_to_warp, devices=torch_compatible_devices
)
add_function_test(
TestDLPack, "test_dlpack_torch_to_warp_v2", test_dlpack_torch_to_warp_v2, devices=torch_compatible_devices
)
except Exception as e:
print(f"Skipping Torch DLPack tests due to exception: {e}")
# jax interop via dlpack
try:
# prevent Jax from gobbling up GPU memory
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
import jax
import jax.dlpack
# check which Warp devices work with Jax
# CUDA devices may fail if Jax cannot find a CUDA Toolkit
test_devices = get_test_devices()
jax_compatible_devices = []
for d in test_devices:
try:
with jax.default_device(wp.device_to_jax(d)):
j = jax.numpy.arange(10, dtype=jax.numpy.float32)
j += 1
jax_compatible_devices.append(d)
except Exception as e:
print(f"Skipping Jax DLPack tests on device '{d}' due to exception: {e}")
if jax_compatible_devices:
add_function_test(
TestDLPack, "test_dlpack_warp_to_jax", test_dlpack_warp_to_jax, devices=jax_compatible_devices
)
add_function_test(
TestDLPack, "test_dlpack_warp_to_jax_v2", test_dlpack_warp_to_jax_v2, devices=jax_compatible_devices
)
add_function_test(
TestDLPack, "test_dlpack_jax_to_warp", test_dlpack_jax_to_warp, devices=jax_compatible_devices
)
add_function_test(
TestDLPack, "test_dlpack_jax_to_warp_v2", test_dlpack_jax_to_warp_v2, devices=jax_compatible_devices
)
except Exception as e:
print(f"Skipping Jax DLPack tests due to exception: {e}")
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 17,903 | Python | 32.84499 | 118 | 0.651678 |
NVIDIA/warp/warp/tests/__main__.py | from warp.thirdparty.unittest_parallel import main
if __name__ == "__main__":
main()
| 90 | Python | 17.199997 | 50 | 0.644444 |
NVIDIA/warp/warp/tests/test_vec_lite.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_vector_constructor_value_func():
a = wp.vec2()
b = wp.vector(a, dtype=wp.float16)
c = wp.vector(a)
d = wp.vector(a, length=2)
# Test matrix constructors using explicit type (float16)
# note that these tests are specifically not using generics / closure
# args to create kernels dynamically (like the rest of this file)
# as those use different code paths to resolve arg types which
# has lead to regressions.
@wp.kernel
def test_constructors_explicit_precision():
# construction for custom vector types
ones = wp.vector(wp.float16(1.0), length=2)
zeros = wp.vector(length=2, dtype=wp.float16)
custom = wp.vector(wp.float16(0.0), wp.float16(1.0))
for i in range(2):
wp.expect_eq(ones[i], wp.float16(1.0))
wp.expect_eq(zeros[i], wp.float16(0.0))
wp.expect_eq(custom[i], wp.float16(i))
# Same as above but with a default (float/int) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructors_default_precision():
# construction for custom vector types
ones = wp.vector(1.0, length=2)
zeros = wp.vector(length=2, dtype=float)
custom = wp.vector(0.0, 1.0)
for i in range(2):
wp.expect_eq(ones[i], 1.0)
wp.expect_eq(zeros[i], 0.0)
wp.expect_eq(custom[i], float(i))
devices = get_test_devices()
class TestVecLite(unittest.TestCase):
pass
add_kernel_test(TestVecLite, test_vector_constructor_value_func, dim=1, devices=devices)
add_kernel_test(TestVecLite, test_constructors_explicit_precision, dim=1, devices=devices)
add_kernel_test(TestVecLite, test_constructors_default_precision, dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 2,343 | Python | 31.555555 | 90 | 0.718737 |
NVIDIA/warp/warp/tests/aux_test_square.py | import warp as wp
@wp.func
def multiply(x: float):
return x * x
@wp.kernel
def kern(expect: float):
wp.expect_eq(multiply(4.0), expect)
def run(expect, device):
wp.launch(kern, dim=1, inputs=[expect], device=device)
| 234 | Python | 13.687499 | 58 | 0.662393 |
NVIDIA/warp/warp/tests/aux_test_reference.py | # This file is used to test reloading module references.
import warp as wp
import warp.tests.aux_test_reference_reference as refref
@wp.func
def magic():
return 2.0 * refref.more_magic()
| 194 | Python | 18.499998 | 56 | 0.742268 |
NVIDIA/warp/warp/tests/test_verify_fp.py | # Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
def setUpModule():
wp.config.verify_fp = True # Enable checking floating-point values to be finite
def tearDownModule():
wp.config.verify_fp = False
@wp.struct
class TestStruct:
field: wp.float32
@wp.kernel
def finite_kernel(foos: wp.array(dtype=TestStruct)):
i = wp.tid()
foos[i].field += wp.float32(1.0)
def test_finite(test, device):
foos = wp.zeros((10,), dtype=TestStruct, device=device)
wp.launch(
kernel=finite_kernel,
dim=(10,),
inputs=[foos],
device=device,
)
wp.synchronize()
expected = TestStruct()
expected.field = 1.0
for f in foos.list():
if f.field != expected.field:
raise AssertionError(f"Unexpected result, got: {f} expected: {expected}")
@wp.kernel
def nan_kernel(foos: wp.array(dtype=TestStruct)):
i = wp.tid()
foos[i].field /= wp.float32(0.0) # Division by zero produces Not-a-Number (NaN)
def test_nan(test, device):
foos = wp.zeros((10,), dtype=TestStruct, device=device)
capture = StdOutCapture()
capture.begin()
wp.launch(
kernel=nan_kernel,
dim=(10,),
inputs=[foos],
device=device,
)
wp.synchronize()
output = capture.end()
# Check that the output contains warnings about "nan" being produced.
# Older Windows C runtimes have a bug where stdout sometimes does not get properly flushed.
if output != "" or sys.platform != "win32":
test.assertRegex(output, r"nan")
devices = get_test_devices()
class TestVerifyFP(unittest.TestCase):
pass
add_function_test(TestVerifyFP, "test_finite", test_finite, devices=devices)
add_function_test(TestVerifyFP, "test_nan", test_nan, devices=devices, check_output=False)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 2,343 | Python | 24.204301 | 95 | 0.679044 |
NVIDIA/warp/warp/tests/aux_test_grad_customs.py | # Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This file is used to test importing a user-defined function with a custom gradient"""
import warp as wp
@wp.func
def aux_custom_fn(x: float, y: float):
return x * 3.0 + y / 3.0, y**2.5
@wp.func_grad(aux_custom_fn)
def aux_custom_fn_grad(x: float, y: float, adj_ret0: float, adj_ret1: float):
wp.adjoint[x] += x * adj_ret0 * 42.0 + y * adj_ret1 * 10.0
wp.adjoint[y] += y * adj_ret1 * 3.0
| 831 | Python | 36.81818 | 88 | 0.724428 |
NVIDIA/warp/warp/tests/run_coverage_serial.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Serial code-coverage runner
This script is used to generate code-coverage reports by running Warp tests.
It runs in serial so can take over an hour to finish. To generate a coverage
report in parallel, use the warp/thirdparty./unittest_parallel.py script
instead with the --coverage option, e.g. python -m warp.tests --coverage
"""
import coverage
cover = coverage.Coverage(config_file=True, messages=True)
cover.start()
with cover.collect():
import unittest_serial # noqa: E402
unittest_serial.run_specified()
cover.save()
cover.report()
cover.html_report(title="Warp Testing Code Coverage Report")
| 1,045 | Python | 31.687499 | 76 | 0.781818 |
NVIDIA/warp/warp/tests/test_sim_kinematics.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import os
import unittest
import warp as wp
import warp.sim
from warp.tests.unittest_utils import *
def test_fk_ik(test, device):
builder = wp.sim.ModelBuilder()
num_envs = 1
for i in range(num_envs):
wp.sim.parse_mjcf(
os.path.join(os.path.dirname(__file__), "../examples/assets/nv_ant.xml"),
builder,
stiffness=0.0,
damping=1.0,
armature=0.1,
contact_ke=1.0e4,
contact_kd=1.0e2,
contact_kf=1.0e2,
contact_mu=0.75,
limit_ke=1.0e3,
limit_kd=1.0e1,
up_axis="y",
)
coord_count = 15
dof_count = 14
coord_start = i * coord_count
dof_start = i * dof_count
# base
builder.joint_q[coord_start : coord_start + 3] = [i * 2.0, 0.70, 0.0]
builder.joint_q[coord_start + 3 : coord_start + 7] = wp.quat_from_axis_angle(
wp.vec3(1.0, 0.0, 0.0), -math.pi * 0.5
)
# joints
builder.joint_q[coord_start + 7 : coord_start + coord_count] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
builder.joint_qd[dof_start + 6 : dof_start + dof_count] = [1.0, 1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0]
# finalize model
model = builder.finalize(device=device)
model.ground = True
model.joint_attach_ke *= 16.0
model.joint_attach_kd *= 4.0
state = model.state()
# save a copy of joint values
q_fk = model.joint_q.numpy()
qd_fk = model.joint_qd.numpy()
wp.sim.eval_fk(model, model.joint_q, model.joint_qd, None, state)
q_ik = wp.zeros_like(model.joint_q, device=device)
qd_ik = wp.zeros_like(model.joint_qd, device=device)
wp.sim.eval_ik(model, state, q_ik, qd_ik)
assert_np_equal(q_fk, q_ik.numpy(), tol=1e-6)
assert_np_equal(qd_fk, qd_ik.numpy(), tol=1e-6)
devices = get_test_devices()
class TestSimKinematics(unittest.TestCase):
pass
add_function_test(TestSimKinematics, "test_fk_ik", test_fk_ik, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 2,578 | Python | 27.655555 | 113 | 0.610163 |
NVIDIA/warp/warp/tests/test_vec_scalar_ops.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_signed_int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.byte,
]
np_unsigned_int_types = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.ubyte,
]
np_int_types = np_signed_int_types + np_unsigned_int_types
np_float_types = [np.float16, np.float32, np.float64]
np_scalar_types = np_int_types + np_float_types
def randvals(rng, shape, dtype):
if dtype in np_float_types:
return rng.standard_normal(size=shape).astype(dtype)
elif dtype in [np.int8, np.uint8, np.byte, np.ubyte]:
return rng.integers(1, high=3, size=shape, dtype=dtype)
return rng.integers(1, high=5, size=shape, dtype=dtype)
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
def get_select_kernel2(dtype):
def output_select_kernel2_fn(
input: wp.array(dtype=dtype, ndim=2),
index0: int,
index1: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index0, index1]
return getkernel(output_select_kernel2_fn, suffix=dtype.__name__)
def test_arrays(test, device, dtype):
rng = np.random.default_rng(123)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
v2_np = randvals(rng, (10, 2), dtype)
v3_np = randvals(rng, (10, 3), dtype)
v4_np = randvals(rng, (10, 4), dtype)
v5_np = randvals(rng, (10, 5), dtype)
v2 = wp.array(v2_np, dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(v3_np, dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(v4_np, dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(v5_np, dtype=vec5, requires_grad=True, device=device)
assert_np_equal(v2.numpy(), v2_np, tol=1.0e-6)
assert_np_equal(v3.numpy(), v3_np, tol=1.0e-6)
assert_np_equal(v4.numpy(), v4_np, tol=1.0e-6)
assert_np_equal(v5.numpy(), v5_np, tol=1.0e-6)
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
v2 = wp.array(v2_np, dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(v3_np, dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(v4_np, dtype=vec4, requires_grad=True, device=device)
assert_np_equal(v2.numpy(), v2_np, tol=1.0e-6)
assert_np_equal(v3.numpy(), v3_np, tol=1.0e-6)
assert_np_equal(v4.numpy(), v4_np, tol=1.0e-6)
def test_components(test, device, dtype):
# test accessing vector components from Python - this is especially important
# for float16, which requires special handling internally
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
v = vec3(1, 2, 3)
# test __getitem__ for individual components
test.assertEqual(v[0], 1)
test.assertEqual(v[1], 2)
test.assertEqual(v[2], 3)
# test __getitem__ for slices
s = v[:]
test.assertEqual(s[0], 1)
test.assertEqual(s[1], 2)
test.assertEqual(s[2], 3)
s = v[1:]
test.assertEqual(s[0], 2)
test.assertEqual(s[1], 3)
s = v[:2]
test.assertEqual(s[0], 1)
test.assertEqual(s[1], 2)
s = v[::2]
test.assertEqual(s[0], 1)
test.assertEqual(s[1], 3)
# test __setitem__ for individual components
v[0] = 4
v[1] = 5
v[2] = 6
test.assertEqual(v[0], 4)
test.assertEqual(v[1], 5)
test.assertEqual(v[2], 6)
# test __setitem__ for slices
v[:] = [7, 8, 9]
test.assertEqual(v[0], 7)
test.assertEqual(v[1], 8)
test.assertEqual(v[2], 9)
v[1:] = [10, 11]
test.assertEqual(v[0], 7)
test.assertEqual(v[1], 10)
test.assertEqual(v[2], 11)
v[:2] = [12, 13]
test.assertEqual(v[0], 12)
test.assertEqual(v[1], 13)
test.assertEqual(v[2], 11)
v[::2] = [14, 15]
test.assertEqual(v[0], 14)
test.assertEqual(v[1], 13)
test.assertEqual(v[2], 15)
def test_py_arithmetic_ops(test, device, dtype):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def make_vec(*args):
if wptype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(wptype._type_(x).value for x in args)
return args
vec_cls = wp.vec(3, wptype)
v = vec_cls(1, -2, 3)
test.assertSequenceEqual(+v, make_vec(1, -2, 3))
test.assertSequenceEqual(-v, make_vec(-1, 2, -3))
test.assertSequenceEqual(v + vec_cls(5, 5, 5), make_vec(6, 3, 8))
test.assertSequenceEqual(v - vec_cls(5, 5, 5), make_vec(-4, -7, -2))
v = vec_cls(2, 4, 6)
test.assertSequenceEqual(v * wptype(2), make_vec(4, 8, 12))
test.assertSequenceEqual(wptype(2) * v, make_vec(4, 8, 12))
test.assertSequenceEqual(v / wptype(2), make_vec(1, 2, 3))
test.assertSequenceEqual(wptype(24) / v, make_vec(12, 6, 4))
def test_constructors(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_scalar_constructor(
input: wp.array(dtype=wptype),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = vec2(input[0])
v3result = vec3(input[0])
v4result = vec4(input[0])
v5result = vec5(input[0])
v2[0] = v2result
v3[0] = v3result
v4[0] = v4result
v5[0] = v5result
# multiply outputs by 2 so we've got something to backpropagate
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
def check_vector_constructors(
input: wp.array(dtype=wptype),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = vec2(input[0], input[1])
v3result = vec3(input[2], input[3], input[4])
v4result = vec4(input[5], input[6], input[7], input[8])
v5result = vec5(input[9], input[10], input[11], input[12], input[13])
v2[0] = v2result
v3[0] = v3result
v4[0] = v4result
v5[0] = v5result
# multiply the output by 2 so we've got something to backpropagate:
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
vec_kernel = getkernel(check_vector_constructors, suffix=dtype.__name__)
kernel = getkernel(check_scalar_constructor, suffix=dtype.__name__)
if register_kernels:
return
input = wp.array(randvals(rng, [1], dtype), requires_grad=True, device=device)
v2 = wp.zeros(1, dtype=vec2, device=device)
v3 = wp.zeros(1, dtype=vec3, device=device)
v4 = wp.zeros(1, dtype=vec4, device=device)
v5 = wp.zeros(1, dtype=vec5, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[input],
outputs=[v2, v3, v4, v5, v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
if dtype in np_float_types:
for l in [v20, v21]:
tape.backward(loss=l)
test.assertEqual(tape.gradients[input].numpy()[0], 2.0)
tape.zero()
for l in [v30, v31, v32]:
tape.backward(loss=l)
test.assertEqual(tape.gradients[input].numpy()[0], 2.0)
tape.zero()
for l in [v40, v41, v42, v43]:
tape.backward(loss=l)
test.assertEqual(tape.gradients[input].numpy()[0], 2.0)
tape.zero()
for l in [v50, v51, v52, v53, v54]:
tape.backward(loss=l)
test.assertEqual(tape.gradients[input].numpy()[0], 2.0)
tape.zero()
val = input.numpy()[0]
assert_np_equal(v2.numpy()[0], np.array([val, val]), tol=1.0e-6)
assert_np_equal(v3.numpy()[0], np.array([val, val, val]), tol=1.0e-6)
assert_np_equal(v4.numpy()[0], np.array([val, val, val, val]), tol=1.0e-6)
assert_np_equal(v5.numpy()[0], np.array([val, val, val, val, val]), tol=1.0e-6)
assert_np_equal(v20.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v21.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v30.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v31.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v32.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v40.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v41.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v42.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v43.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v50.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v51.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v52.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v53.numpy()[0], 2 * val, tol=1.0e-6)
assert_np_equal(v54.numpy()[0], 2 * val, tol=1.0e-6)
input = wp.array(randvals(rng, [14], dtype), requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
vec_kernel,
dim=1,
inputs=[input],
outputs=[v2, v3, v4, v5, v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
grad = tape.gradients[input].numpy()
expected_grad = np.zeros_like(grad)
expected_grad[i] = 2
assert_np_equal(grad, expected_grad, tol=tol)
tape.zero()
assert_np_equal(v2.numpy()[0, 0], input.numpy()[0], tol=tol)
assert_np_equal(v2.numpy()[0, 1], input.numpy()[1], tol=tol)
assert_np_equal(v3.numpy()[0, 0], input.numpy()[2], tol=tol)
assert_np_equal(v3.numpy()[0, 1], input.numpy()[3], tol=tol)
assert_np_equal(v3.numpy()[0, 2], input.numpy()[4], tol=tol)
assert_np_equal(v4.numpy()[0, 0], input.numpy()[5], tol=tol)
assert_np_equal(v4.numpy()[0, 1], input.numpy()[6], tol=tol)
assert_np_equal(v4.numpy()[0, 2], input.numpy()[7], tol=tol)
assert_np_equal(v4.numpy()[0, 3], input.numpy()[8], tol=tol)
assert_np_equal(v5.numpy()[0, 0], input.numpy()[9], tol=tol)
assert_np_equal(v5.numpy()[0, 1], input.numpy()[10], tol=tol)
assert_np_equal(v5.numpy()[0, 2], input.numpy()[11], tol=tol)
assert_np_equal(v5.numpy()[0, 3], input.numpy()[12], tol=tol)
assert_np_equal(v5.numpy()[0, 4], input.numpy()[13], tol=tol)
assert_np_equal(v20.numpy()[0], 2 * input.numpy()[0], tol=tol)
assert_np_equal(v21.numpy()[0], 2 * input.numpy()[1], tol=tol)
assert_np_equal(v30.numpy()[0], 2 * input.numpy()[2], tol=tol)
assert_np_equal(v31.numpy()[0], 2 * input.numpy()[3], tol=tol)
assert_np_equal(v32.numpy()[0], 2 * input.numpy()[4], tol=tol)
assert_np_equal(v40.numpy()[0], 2 * input.numpy()[5], tol=tol)
assert_np_equal(v41.numpy()[0], 2 * input.numpy()[6], tol=tol)
assert_np_equal(v42.numpy()[0], 2 * input.numpy()[7], tol=tol)
assert_np_equal(v43.numpy()[0], 2 * input.numpy()[8], tol=tol)
assert_np_equal(v50.numpy()[0], 2 * input.numpy()[9], tol=tol)
assert_np_equal(v51.numpy()[0], 2 * input.numpy()[10], tol=tol)
assert_np_equal(v52.numpy()[0], 2 * input.numpy()[11], tol=tol)
assert_np_equal(v53.numpy()[0], 2 * input.numpy()[12], tol=tol)
assert_np_equal(v54.numpy()[0], 2 * input.numpy()[13], tol=tol)
def test_anon_type_instance(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_scalar_init(
input: wp.array(dtype=wptype),
output: wp.array(dtype=wptype),
):
v2result = wp.vector(input[0], length=2)
v3result = wp.vector(input[1], length=3)
v4result = wp.vector(input[2], length=4)
v5result = wp.vector(input[3], length=5)
idx = 0
for i in range(2):
output[idx] = wptype(2) * v2result[i]
idx = idx + 1
for i in range(3):
output[idx] = wptype(2) * v3result[i]
idx = idx + 1
for i in range(4):
output[idx] = wptype(2) * v4result[i]
idx = idx + 1
for i in range(5):
output[idx] = wptype(2) * v5result[i]
idx = idx + 1
def check_component_init(
input: wp.array(dtype=wptype),
output: wp.array(dtype=wptype),
):
v2result = wp.vector(input[0], input[1])
v3result = wp.vector(input[2], input[3], input[4])
v4result = wp.vector(input[5], input[6], input[7], input[8])
v5result = wp.vector(input[9], input[10], input[11], input[12], input[13])
idx = 0
for i in range(2):
output[idx] = wptype(2) * v2result[i]
idx = idx + 1
for i in range(3):
output[idx] = wptype(2) * v3result[i]
idx = idx + 1
for i in range(4):
output[idx] = wptype(2) * v4result[i]
idx = idx + 1
for i in range(5):
output[idx] = wptype(2) * v5result[i]
idx = idx + 1
scalar_kernel = getkernel(check_scalar_init, suffix=dtype.__name__)
component_kernel = getkernel(check_component_init, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(randvals(rng, [4], dtype), requires_grad=True, device=device)
output = wp.zeros(2 + 3 + 4 + 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(scalar_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy()[:2], 2 * np.array([input.numpy()[0]] * 2), tol=1.0e-6)
assert_np_equal(output.numpy()[2:5], 2 * np.array([input.numpy()[1]] * 3), tol=1.0e-6)
assert_np_equal(output.numpy()[5:9], 2 * np.array([input.numpy()[2]] * 4), tol=1.0e-6)
assert_np_equal(output.numpy()[9:], 2 * np.array([input.numpy()[3]] * 5), tol=1.0e-6)
if dtype in np_float_types:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(len(output)):
tape = wp.Tape()
with tape:
wp.launch(scalar_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(input.numpy())
if i < 2:
expected[0] = 2
elif i < 5:
expected[1] = 2
elif i < 9:
expected[2] = 2
else:
expected[3] = 2
assert_np_equal(tape.gradients[input].numpy(), expected, tol=tol)
tape.reset()
tape.zero()
input = wp.array(randvals(rng, [2 + 3 + 4 + 5], dtype), requires_grad=True, device=device)
output = wp.zeros(2 + 3 + 4 + 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(component_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=1.0e-6)
if dtype in np_float_types:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(len(output)):
tape = wp.Tape()
with tape:
wp.launch(component_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(input.numpy())
expected[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expected, tol=tol)
tape.reset()
tape.zero()
def test_indexing(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_indexing(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
v20[0] = wptype(2) * v2[0][0]
v21[0] = wptype(2) * v2[0][1]
v30[0] = wptype(2) * v3[0][0]
v31[0] = wptype(2) * v3[0][1]
v32[0] = wptype(2) * v3[0][2]
v40[0] = wptype(2) * v4[0][0]
v41[0] = wptype(2) * v4[0][1]
v42[0] = wptype(2) * v4[0][2]
v43[0] = wptype(2) * v4[0][3]
v50[0] = wptype(2) * v5[0][0]
v51[0] = wptype(2) * v5[0][1]
v52[0] = wptype(2) * v5[0][2]
v53[0] = wptype(2) * v5[0][3]
v54[0] = wptype(2) * v5[0][4]
kernel = getkernel(check_indexing, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[v2, v3, v4, v5],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = 2
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
assert_np_equal(v20.numpy()[0], 2.0 * v2.numpy()[0, 0], tol=tol)
assert_np_equal(v21.numpy()[0], 2.0 * v2.numpy()[0, 1], tol=tol)
assert_np_equal(v30.numpy()[0], 2.0 * v3.numpy()[0, 0], tol=tol)
assert_np_equal(v31.numpy()[0], 2.0 * v3.numpy()[0, 1], tol=tol)
assert_np_equal(v32.numpy()[0], 2.0 * v3.numpy()[0, 2], tol=tol)
assert_np_equal(v40.numpy()[0], 2.0 * v4.numpy()[0, 0], tol=tol)
assert_np_equal(v41.numpy()[0], 2.0 * v4.numpy()[0, 1], tol=tol)
assert_np_equal(v42.numpy()[0], 2.0 * v4.numpy()[0, 2], tol=tol)
assert_np_equal(v43.numpy()[0], 2.0 * v4.numpy()[0, 3], tol=tol)
assert_np_equal(v50.numpy()[0], 2.0 * v5.numpy()[0, 0], tol=tol)
assert_np_equal(v51.numpy()[0], 2.0 * v5.numpy()[0, 1], tol=tol)
assert_np_equal(v52.numpy()[0], 2.0 * v5.numpy()[0, 2], tol=tol)
assert_np_equal(v53.numpy()[0], 2.0 * v5.numpy()[0, 3], tol=tol)
assert_np_equal(v54.numpy()[0], 2.0 * v5.numpy()[0, 4], tol=tol)
def test_equality(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_equality(
v20: wp.array(dtype=vec2),
v21: wp.array(dtype=vec2),
v22: wp.array(dtype=vec2),
v30: wp.array(dtype=vec3),
v31: wp.array(dtype=vec3),
v32: wp.array(dtype=vec3),
v33: wp.array(dtype=vec3),
v40: wp.array(dtype=vec4),
v41: wp.array(dtype=vec4),
v42: wp.array(dtype=vec4),
v43: wp.array(dtype=vec4),
v44: wp.array(dtype=vec4),
v50: wp.array(dtype=vec5),
v51: wp.array(dtype=vec5),
v52: wp.array(dtype=vec5),
v53: wp.array(dtype=vec5),
v54: wp.array(dtype=vec5),
v55: wp.array(dtype=vec5),
):
wp.expect_eq(v20[0], v20[0])
wp.expect_neq(v21[0], v20[0])
wp.expect_neq(v22[0], v20[0])
wp.expect_eq(v30[0], v30[0])
wp.expect_neq(v31[0], v30[0])
wp.expect_neq(v32[0], v30[0])
wp.expect_neq(v33[0], v30[0])
wp.expect_eq(v40[0], v40[0])
wp.expect_neq(v41[0], v40[0])
wp.expect_neq(v42[0], v40[0])
wp.expect_neq(v43[0], v40[0])
wp.expect_neq(v44[0], v40[0])
wp.expect_eq(v50[0], v50[0])
wp.expect_neq(v51[0], v50[0])
wp.expect_neq(v52[0], v50[0])
wp.expect_neq(v53[0], v50[0])
wp.expect_neq(v54[0], v50[0])
wp.expect_neq(v55[0], v50[0])
kernel = getkernel(check_equality, suffix=dtype.__name__)
if register_kernels:
return
v20 = wp.array([1.0, 2.0], dtype=vec2, requires_grad=True, device=device)
v21 = wp.array([1.0, 3.0], dtype=vec2, requires_grad=True, device=device)
v22 = wp.array([3.0, 2.0], dtype=vec2, requires_grad=True, device=device)
v30 = wp.array([1.0, 2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v31 = wp.array([-1.0, 2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v32 = wp.array([1.0, -2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v33 = wp.array([1.0, 2.0, -3.0], dtype=vec3, requires_grad=True, device=device)
v40 = wp.array([1.0, 2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v41 = wp.array([-1.0, 2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v42 = wp.array([1.0, -2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v43 = wp.array([1.0, 2.0, -3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v44 = wp.array([1.0, 2.0, 3.0, -4.0], dtype=vec4, requires_grad=True, device=device)
v50 = wp.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v51 = wp.array([-1.0, 2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v52 = wp.array([1.0, -2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v53 = wp.array([1.0, 2.0, -3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v54 = wp.array([1.0, 2.0, 3.0, -4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v55 = wp.array([1.0, 2.0, 3.0, 4.0, -5.0], dtype=vec5, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
v20,
v21,
v22,
v30,
v31,
v32,
v33,
v40,
v41,
v42,
v43,
v44,
v50,
v51,
v52,
v53,
v54,
v55,
],
outputs=[],
device=device,
)
def test_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_mul(
s: wp.array(dtype=wptype),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = s[0] * v2[0]
v3result = s[0] * v3[0]
v4result = s[0] * v4[0]
v5result = s[0] * v5[0]
# multiply outputs by 2 so we've got something to backpropagate:
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_mul, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(randvals(rng, [1], dtype), requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
assert_np_equal(v20.numpy()[0], 2 * s.numpy()[0] * v2.numpy()[0, 0], tol=tol)
assert_np_equal(v21.numpy()[0], 2 * s.numpy()[0] * v2.numpy()[0, 1], tol=tol)
assert_np_equal(v30.numpy()[0], 2 * s.numpy()[0] * v3.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v31.numpy()[0], 2 * s.numpy()[0] * v3.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v32.numpy()[0], 2 * s.numpy()[0] * v3.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v40.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v41.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v42.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v43.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 3], tol=10 * tol)
assert_np_equal(v50.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v51.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v52.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v53.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 3], tol=10 * tol)
assert_np_equal(v54.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 4], tol=10 * tol)
incmps = np.concatenate([v.numpy()[0] for v in [v2, v3, v4, v5]])
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43]):
tape.backward(loss=l)
sgrad = tape.gradients[s].numpy()[0]
assert_np_equal(sgrad, 2 * incmps[i], tol=10 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4]])
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = s.numpy()[0] * 2
assert_np_equal(allgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_scalar_multiplication_rightmul(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_rightmul(
s: wp.array(dtype=wptype),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = v2[0] * s[0]
v3result = v3[0] * s[0]
v4result = v4[0] * s[0]
v5result = v5[0] * s[0]
# multiply outputs by 2 so we've got something to backpropagate:
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_rightmul, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(randvals(rng, [1], dtype), requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
assert_np_equal(v20.numpy()[0], 2 * s.numpy()[0] * v2.numpy()[0, 0], tol=tol)
assert_np_equal(v21.numpy()[0], 2 * s.numpy()[0] * v2.numpy()[0, 1], tol=tol)
assert_np_equal(v30.numpy()[0], 2 * s.numpy()[0] * v3.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v31.numpy()[0], 2 * s.numpy()[0] * v3.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v32.numpy()[0], 2 * s.numpy()[0] * v3.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v40.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v41.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v42.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v43.numpy()[0], 2 * s.numpy()[0] * v4.numpy()[0, 3], tol=10 * tol)
assert_np_equal(v50.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v51.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v52.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v53.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 3], tol=10 * tol)
assert_np_equal(v54.numpy()[0], 2 * s.numpy()[0] * v5.numpy()[0, 4], tol=10 * tol)
incmps = np.concatenate([v.numpy()[0] for v in [v2, v3, v4, v5]])
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43]):
tape.backward(loss=l)
sgrad = tape.gradients[s].numpy()[0]
assert_np_equal(sgrad, 2 * incmps[i], tol=10 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4]])
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = s.numpy()[0] * 2
assert_np_equal(allgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_cw_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_cw_mul(
s2: wp.array(dtype=vec2),
s3: wp.array(dtype=vec3),
s4: wp.array(dtype=vec4),
s5: wp.array(dtype=vec5),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = wp.cw_mul(s2[0], v2[0])
v3result = wp.cw_mul(s3[0], v3[0])
v4result = wp.cw_mul(s4[0], v4[0])
v5result = wp.cw_mul(s5[0], v5[0])
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_cw_mul, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
assert_np_equal(v20.numpy()[0], 2 * s2.numpy()[0, 0] * v2.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v21.numpy()[0], 2 * s2.numpy()[0, 1] * v2.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v30.numpy()[0], 2 * s3.numpy()[0, 0] * v3.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v31.numpy()[0], 2 * s3.numpy()[0, 1] * v3.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v32.numpy()[0], 2 * s3.numpy()[0, 2] * v3.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v40.numpy()[0], 2 * s4.numpy()[0, 0] * v4.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v41.numpy()[0], 2 * s4.numpy()[0, 1] * v4.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v42.numpy()[0], 2 * s4.numpy()[0, 2] * v4.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v43.numpy()[0], 2 * s4.numpy()[0, 3] * v4.numpy()[0, 3], tol=10 * tol)
assert_np_equal(v50.numpy()[0], 2 * s5.numpy()[0, 0] * v5.numpy()[0, 0], tol=10 * tol)
assert_np_equal(v51.numpy()[0], 2 * s5.numpy()[0, 1] * v5.numpy()[0, 1], tol=10 * tol)
assert_np_equal(v52.numpy()[0], 2 * s5.numpy()[0, 2] * v5.numpy()[0, 2], tol=10 * tol)
assert_np_equal(v53.numpy()[0], 2 * s5.numpy()[0, 3] * v5.numpy()[0, 3], tol=10 * tol)
assert_np_equal(v54.numpy()[0], 2 * s5.numpy()[0, 4] * v5.numpy()[0, 4], tol=10 * tol)
incmps = np.concatenate([v.numpy()[0] for v in [v2, v3, v4, v5]])
scmps = np.concatenate([v.numpy()[0] for v in [s2, s3, s4, s5]])
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
sgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [s2, s3, s4, s5]])
expected_grads = np.zeros_like(sgrads)
expected_grads[i] = incmps[i] * 2
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = scmps[i] * 2
assert_np_equal(allgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_scalar_division(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_div(
s: wp.array(dtype=wptype),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = v2[0] / s[0]
v3result = v3[0] / s[0]
v4result = v4[0] / s[0]
v5result = v5[0] / s[0]
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_div, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(randvals(rng, [1], dtype), requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
if dtype in np_int_types:
assert_np_equal(v20.numpy()[0], 2 * (v2.numpy()[0, 0] // (s.numpy()[0])), tol=tol)
assert_np_equal(v21.numpy()[0], 2 * (v2.numpy()[0, 1] // (s.numpy()[0])), tol=tol)
assert_np_equal(v30.numpy()[0], 2 * (v3.numpy()[0, 0] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v31.numpy()[0], 2 * (v3.numpy()[0, 1] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v32.numpy()[0], 2 * (v3.numpy()[0, 2] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v40.numpy()[0], 2 * (v4.numpy()[0, 0] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v41.numpy()[0], 2 * (v4.numpy()[0, 1] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v42.numpy()[0], 2 * (v4.numpy()[0, 2] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v43.numpy()[0], 2 * (v4.numpy()[0, 3] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v50.numpy()[0], 2 * (v5.numpy()[0, 0] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v51.numpy()[0], 2 * (v5.numpy()[0, 1] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v52.numpy()[0], 2 * (v5.numpy()[0, 2] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v53.numpy()[0], 2 * (v5.numpy()[0, 3] // (s.numpy()[0])), tol=10 * tol)
assert_np_equal(v54.numpy()[0], 2 * (v5.numpy()[0, 4] // (s.numpy()[0])), tol=10 * tol)
else:
assert_np_equal(v20.numpy()[0], 2 * v2.numpy()[0, 0] / (s.numpy()[0]), tol=tol)
assert_np_equal(v21.numpy()[0], 2 * v2.numpy()[0, 1] / (s.numpy()[0]), tol=tol)
assert_np_equal(v30.numpy()[0], 2 * v3.numpy()[0, 0] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v31.numpy()[0], 2 * v3.numpy()[0, 1] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v32.numpy()[0], 2 * v3.numpy()[0, 2] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v40.numpy()[0], 2 * v4.numpy()[0, 0] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v41.numpy()[0], 2 * v4.numpy()[0, 1] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v42.numpy()[0], 2 * v4.numpy()[0, 2] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v43.numpy()[0], 2 * v4.numpy()[0, 3] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v50.numpy()[0], 2 * v5.numpy()[0, 0] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v51.numpy()[0], 2 * v5.numpy()[0, 1] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v52.numpy()[0], 2 * v5.numpy()[0, 2] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v53.numpy()[0], 2 * v5.numpy()[0, 3] / (s.numpy()[0]), tol=10 * tol)
assert_np_equal(v54.numpy()[0], 2 * v5.numpy()[0, 4] / (s.numpy()[0]), tol=10 * tol)
incmps = np.concatenate([v.numpy()[0] for v in [v2, v3, v4, v5]])
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
sgrad = tape.gradients[s].numpy()[0]
# d/ds v/s = -v/s^2
assert_np_equal(sgrad, -2 * incmps[i] / (s.numpy()[0] * s.numpy()[0]), tol=10 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = 2 / s.numpy()[0]
# d/dv v/s = 1/s
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
def test_cw_division(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_cw_div(
s2: wp.array(dtype=vec2),
s3: wp.array(dtype=vec3),
s4: wp.array(dtype=vec4),
s5: wp.array(dtype=vec5),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = wp.cw_div(v2[0], s2[0])
v3result = wp.cw_div(v3[0], s3[0])
v4result = wp.cw_div(v4[0], s4[0])
v5result = wp.cw_div(v5[0], s5[0])
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_cw_div, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
if dtype in np_int_types:
assert_np_equal(v20.numpy()[0], 2 * (v2.numpy()[0, 0] // s2.numpy()[0, 0]), tol=tol)
assert_np_equal(v21.numpy()[0], 2 * (v2.numpy()[0, 1] // s2.numpy()[0, 1]), tol=tol)
assert_np_equal(v30.numpy()[0], 2 * (v3.numpy()[0, 0] // s3.numpy()[0, 0]), tol=tol)
assert_np_equal(v31.numpy()[0], 2 * (v3.numpy()[0, 1] // s3.numpy()[0, 1]), tol=tol)
assert_np_equal(v32.numpy()[0], 2 * (v3.numpy()[0, 2] // s3.numpy()[0, 2]), tol=tol)
assert_np_equal(v40.numpy()[0], 2 * (v4.numpy()[0, 0] // s4.numpy()[0, 0]), tol=tol)
assert_np_equal(v41.numpy()[0], 2 * (v4.numpy()[0, 1] // s4.numpy()[0, 1]), tol=tol)
assert_np_equal(v42.numpy()[0], 2 * (v4.numpy()[0, 2] // s4.numpy()[0, 2]), tol=tol)
assert_np_equal(v43.numpy()[0], 2 * (v4.numpy()[0, 3] // s4.numpy()[0, 3]), tol=tol)
assert_np_equal(v50.numpy()[0], 2 * (v5.numpy()[0, 0] // s5.numpy()[0, 0]), tol=tol)
assert_np_equal(v51.numpy()[0], 2 * (v5.numpy()[0, 1] // s5.numpy()[0, 1]), tol=tol)
assert_np_equal(v52.numpy()[0], 2 * (v5.numpy()[0, 2] // s5.numpy()[0, 2]), tol=tol)
assert_np_equal(v53.numpy()[0], 2 * (v5.numpy()[0, 3] // s5.numpy()[0, 3]), tol=tol)
assert_np_equal(v54.numpy()[0], 2 * (v5.numpy()[0, 4] // s5.numpy()[0, 4]), tol=tol)
else:
assert_np_equal(v20.numpy()[0], 2 * v2.numpy()[0, 0] / s2.numpy()[0, 0], tol=tol)
assert_np_equal(v21.numpy()[0], 2 * v2.numpy()[0, 1] / s2.numpy()[0, 1], tol=tol)
assert_np_equal(v30.numpy()[0], 2 * v3.numpy()[0, 0] / s3.numpy()[0, 0], tol=tol)
assert_np_equal(v31.numpy()[0], 2 * v3.numpy()[0, 1] / s3.numpy()[0, 1], tol=tol)
assert_np_equal(v32.numpy()[0], 2 * v3.numpy()[0, 2] / s3.numpy()[0, 2], tol=tol)
assert_np_equal(v40.numpy()[0], 2 * v4.numpy()[0, 0] / s4.numpy()[0, 0], tol=tol)
assert_np_equal(v41.numpy()[0], 2 * v4.numpy()[0, 1] / s4.numpy()[0, 1], tol=tol)
assert_np_equal(v42.numpy()[0], 2 * v4.numpy()[0, 2] / s4.numpy()[0, 2], tol=tol)
assert_np_equal(v43.numpy()[0], 2 * v4.numpy()[0, 3] / s4.numpy()[0, 3], tol=tol)
assert_np_equal(v50.numpy()[0], 2 * v5.numpy()[0, 0] / s5.numpy()[0, 0], tol=tol)
assert_np_equal(v51.numpy()[0], 2 * v5.numpy()[0, 1] / s5.numpy()[0, 1], tol=tol)
assert_np_equal(v52.numpy()[0], 2 * v5.numpy()[0, 2] / s5.numpy()[0, 2], tol=tol)
assert_np_equal(v53.numpy()[0], 2 * v5.numpy()[0, 3] / s5.numpy()[0, 3], tol=tol)
assert_np_equal(v54.numpy()[0], 2 * v5.numpy()[0, 4] / s5.numpy()[0, 4], tol=tol)
if dtype in np_float_types:
incmps = np.concatenate([v.numpy()[0] for v in [v2, v3, v4, v5]])
scmps = np.concatenate([v.numpy()[0] for v in [s2, s3, s4, s5]])
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
sgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [s2, s3, s4, s5]])
expected_grads = np.zeros_like(sgrads)
# d/ds v/s = -v/s^2
expected_grads[i] = -incmps[i] * 2 / (scmps[i] * scmps[i])
assert_np_equal(sgrads, expected_grads, tol=20 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
# d/dv v/s = 1/s
expected_grads[i] = 2 / scmps[i]
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
def test_addition(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_add(
s2: wp.array(dtype=vec2),
s3: wp.array(dtype=vec3),
s4: wp.array(dtype=vec4),
s5: wp.array(dtype=vec5),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = v2[0] + s2[0]
v3result = v3[0] + s3[0]
v4result = v4[0] + s4[0]
v5result = v5[0] + s5[0]
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_add, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
assert_np_equal(v20.numpy()[0], 2 * (v2.numpy()[0, 0] + s2.numpy()[0, 0]), tol=tol)
assert_np_equal(v21.numpy()[0], 2 * (v2.numpy()[0, 1] + s2.numpy()[0, 1]), tol=tol)
assert_np_equal(v30.numpy()[0], 2 * (v3.numpy()[0, 0] + s3.numpy()[0, 0]), tol=tol)
assert_np_equal(v31.numpy()[0], 2 * (v3.numpy()[0, 1] + s3.numpy()[0, 1]), tol=tol)
assert_np_equal(v32.numpy()[0], 2 * (v3.numpy()[0, 2] + s3.numpy()[0, 2]), tol=tol)
assert_np_equal(v40.numpy()[0], 2 * (v4.numpy()[0, 0] + s4.numpy()[0, 0]), tol=tol)
assert_np_equal(v41.numpy()[0], 2 * (v4.numpy()[0, 1] + s4.numpy()[0, 1]), tol=tol)
assert_np_equal(v42.numpy()[0], 2 * (v4.numpy()[0, 2] + s4.numpy()[0, 2]), tol=tol)
assert_np_equal(v43.numpy()[0], 2 * (v4.numpy()[0, 3] + s4.numpy()[0, 3]), tol=tol)
assert_np_equal(v50.numpy()[0], 2 * (v5.numpy()[0, 0] + s5.numpy()[0, 0]), tol=tol)
assert_np_equal(v51.numpy()[0], 2 * (v5.numpy()[0, 1] + s5.numpy()[0, 1]), tol=tol)
assert_np_equal(v52.numpy()[0], 2 * (v5.numpy()[0, 2] + s5.numpy()[0, 2]), tol=tol)
assert_np_equal(v53.numpy()[0], 2 * (v5.numpy()[0, 3] + s5.numpy()[0, 3]), tol=tol)
assert_np_equal(v54.numpy()[0], 2 * (v5.numpy()[0, 4] + s5.numpy()[0, 4]), tol=2 * tol)
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
sgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [s2, s3, s4, s5]])
expected_grads = np.zeros_like(sgrads)
expected_grads[i] = 2
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
def test_dotproduct(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_dot(
s2: wp.array(dtype=vec2),
s3: wp.array(dtype=vec3),
s4: wp.array(dtype=vec4),
s5: wp.array(dtype=vec5),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
dot2: wp.array(dtype=wptype),
dot3: wp.array(dtype=wptype),
dot4: wp.array(dtype=wptype),
dot5: wp.array(dtype=wptype),
):
dot2[0] = wptype(2) * wp.dot(v2[0], s2[0])
dot3[0] = wptype(2) * wp.dot(v3[0], s3[0])
dot4[0] = wptype(2) * wp.dot(v4[0], s4[0])
dot5[0] = wptype(2) * wp.dot(v5[0], s5[0])
kernel = getkernel(check_dot, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
dot2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
dot3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
dot4 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
dot5 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[dot2, dot3, dot4, dot5],
device=device,
)
assert_np_equal(dot2.numpy()[0], 2.0 * (v2.numpy() * s2.numpy()).sum(), tol=10 * tol)
assert_np_equal(dot3.numpy()[0], 2.0 * (v3.numpy() * s3.numpy()).sum(), tol=10 * tol)
assert_np_equal(dot4.numpy()[0], 2.0 * (v4.numpy() * s4.numpy()).sum(), tol=10 * tol)
assert_np_equal(dot5.numpy()[0], 2.0 * (v5.numpy() * s5.numpy()).sum(), tol=10 * tol)
if dtype in np_float_types:
tape.backward(loss=dot2)
sgrads = tape.gradients[s2].numpy()[0]
expected_grads = 2.0 * v2.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v2].numpy()[0]
expected_grads = 2.0 * s2.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
tape.zero()
tape.backward(loss=dot3)
sgrads = tape.gradients[s3].numpy()[0]
expected_grads = 2.0 * v3.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v3].numpy()[0]
expected_grads = 2.0 * s3.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
tape.zero()
tape.backward(loss=dot4)
sgrads = tape.gradients[s4].numpy()[0]
expected_grads = 2.0 * v4.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v4].numpy()[0]
expected_grads = 2.0 * s4.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
tape.zero()
tape.backward(loss=dot5)
sgrads = tape.gradients[s5].numpy()[0]
expected_grads = 2.0 * v5.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v5].numpy()[0]
expected_grads = 2.0 * s5.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_equivalent_types(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
# vector types
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
# vector types equivalent to the above
vec2_equiv = wp.types.vector(length=2, dtype=wptype)
vec3_equiv = wp.types.vector(length=3, dtype=wptype)
vec4_equiv = wp.types.vector(length=4, dtype=wptype)
vec5_equiv = wp.types.vector(length=5, dtype=wptype)
# declare kernel with original types
def check_equivalence(
v2: vec2,
v3: vec3,
v4: vec4,
v5: vec5,
):
wp.expect_eq(v2, vec2(wptype(1), wptype(2)))
wp.expect_eq(v3, vec3(wptype(1), wptype(2), wptype(3)))
wp.expect_eq(v4, vec4(wptype(1), wptype(2), wptype(3), wptype(4)))
wp.expect_eq(v5, vec5(wptype(1), wptype(2), wptype(3), wptype(4), wptype(5)))
wp.expect_eq(v2, vec2_equiv(wptype(1), wptype(2)))
wp.expect_eq(v3, vec3_equiv(wptype(1), wptype(2), wptype(3)))
wp.expect_eq(v4, vec4_equiv(wptype(1), wptype(2), wptype(3), wptype(4)))
wp.expect_eq(v5, vec5_equiv(wptype(1), wptype(2), wptype(3), wptype(4), wptype(5)))
kernel = getkernel(check_equivalence, suffix=dtype.__name__)
if register_kernels:
return
# call kernel with equivalent types
v2 = vec2_equiv(1, 2)
v3 = vec3_equiv(1, 2, 3)
v4 = vec4_equiv(1, 2, 3, 4)
v5 = vec5_equiv(1, 2, 3, 4, 5)
wp.launch(kernel, dim=1, inputs=[v2, v3, v4, v5], device=device)
def test_conversions(test, device, dtype, register_kernels=False):
def check_vectors_equal(
v0: wp.vec3,
v1: wp.vec3,
v2: wp.vec3,
v3: wp.vec3,
):
wp.expect_eq(v1, v0)
wp.expect_eq(v2, v0)
wp.expect_eq(v3, v0)
kernel = getkernel(check_vectors_equal, suffix=dtype.__name__)
if register_kernels:
return
v0 = wp.vec3(1, 2, 3)
# test explicit conversions - constructing vectors from different containers
v1 = wp.vec3((1, 2, 3))
v2 = wp.vec3([1, 2, 3])
v3 = wp.vec3(np.array([1, 2, 3], dtype=dtype))
wp.launch(kernel, dim=1, inputs=[v0, v1, v2, v3], device=device)
# test implicit conversions - passing different containers as vectors to wp.launch()
v1 = (1, 2, 3)
v2 = [1, 2, 3]
v3 = np.array([1, 2, 3], dtype=dtype)
wp.launch(kernel, dim=1, inputs=[v0, v1, v2, v3], device=device)
def test_constants(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
cv2 = wp.constant(vec2(1, 2))
cv3 = wp.constant(vec3(1, 2, 3))
cv4 = wp.constant(vec4(1, 2, 3, 4))
cv5 = wp.constant(vec5(1, 2, 3, 4, 5))
def check_vector_constants():
wp.expect_eq(cv2, vec2(wptype(1), wptype(2)))
wp.expect_eq(cv3, vec3(wptype(1), wptype(2), wptype(3)))
wp.expect_eq(cv4, vec4(wptype(1), wptype(2), wptype(3), wptype(4)))
wp.expect_eq(cv5, vec5(wptype(1), wptype(2), wptype(3), wptype(4), wptype(5)))
kernel = getkernel(check_vector_constants, suffix=dtype.__name__)
if register_kernels:
return
wp.launch(kernel, dim=1, inputs=[], device=device)
def test_minmax(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
# \TODO: not quite sure why, but the numbers are off for 16 bit float
# on the cpu (but not cuda). This is probably just the sketchy float16
# arithmetic I implemented to get all this stuff working, so
# hopefully that can be fixed when we do that correctly.
tol = {
np.float16: 1.0e-2,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
# \TODO: Also not quite sure why: this kernel compiles incredibly
# slowly though...
def check_vec_min_max(
a: wp.array(dtype=wptype, ndim=2),
b: wp.array(dtype=wptype, ndim=2),
mins: wp.array(dtype=wptype, ndim=2),
maxs: wp.array(dtype=wptype, ndim=2),
):
for i in range(10):
# multiplying by 2 so we've got something to backpropagate:
a2read = vec2(a[i, 0], a[i, 1])
b2read = vec2(b[i, 0], b[i, 1])
c2 = wptype(2) * wp.min(a2read, b2read)
d2 = wptype(2) * wp.max(a2read, b2read)
a3read = vec3(a[i, 2], a[i, 3], a[i, 4])
b3read = vec3(b[i, 2], b[i, 3], b[i, 4])
c3 = wptype(2) * wp.min(a3read, b3read)
d3 = wptype(2) * wp.max(a3read, b3read)
a4read = vec4(a[i, 5], a[i, 6], a[i, 7], a[i, 8])
b4read = vec4(b[i, 5], b[i, 6], b[i, 7], b[i, 8])
c4 = wptype(2) * wp.min(a4read, b4read)
d4 = wptype(2) * wp.max(a4read, b4read)
a5read = vec5(a[i, 9], a[i, 10], a[i, 11], a[i, 12], a[i, 13])
b5read = vec5(b[i, 9], b[i, 10], b[i, 11], b[i, 12], b[i, 13])
c5 = wptype(2) * wp.min(a5read, b5read)
d5 = wptype(2) * wp.max(a5read, b5read)
mins[i, 0] = c2[0]
mins[i, 1] = c2[1]
mins[i, 2] = c3[0]
mins[i, 3] = c3[1]
mins[i, 4] = c3[2]
mins[i, 5] = c4[0]
mins[i, 6] = c4[1]
mins[i, 7] = c4[2]
mins[i, 8] = c4[3]
mins[i, 9] = c5[0]
mins[i, 10] = c5[1]
mins[i, 11] = c5[2]
mins[i, 12] = c5[3]
mins[i, 13] = c5[4]
maxs[i, 0] = d2[0]
maxs[i, 1] = d2[1]
maxs[i, 2] = d3[0]
maxs[i, 3] = d3[1]
maxs[i, 4] = d3[2]
maxs[i, 5] = d4[0]
maxs[i, 6] = d4[1]
maxs[i, 7] = d4[2]
maxs[i, 8] = d4[3]
maxs[i, 9] = d5[0]
maxs[i, 10] = d5[1]
maxs[i, 11] = d5[2]
maxs[i, 12] = d5[3]
maxs[i, 13] = d5[4]
kernel = getkernel(check_vec_min_max, suffix=dtype.__name__)
output_select_kernel = get_select_kernel2(wptype)
if register_kernels:
return
a = wp.array(randvals(rng, (10, 14), dtype), dtype=wptype, requires_grad=True, device=device)
b = wp.array(randvals(rng, (10, 14), dtype), dtype=wptype, requires_grad=True, device=device)
mins = wp.zeros((10, 14), dtype=wptype, requires_grad=True, device=device)
maxs = wp.zeros((10, 14), dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[a, b], outputs=[mins, maxs], device=device)
assert_np_equal(mins.numpy(), 2 * np.minimum(a.numpy(), b.numpy()), tol=tol)
assert_np_equal(maxs.numpy(), 2 * np.maximum(a.numpy(), b.numpy()), tol=tol)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
if dtype in np_float_types:
for i in range(10):
for j in range(14):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[a, b], outputs=[mins, maxs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[mins, i, j], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(a.numpy())
expected[i, j] = 2 if (a.numpy()[i, j] < b.numpy()[i, j]) else 0
assert_np_equal(tape.gradients[a].numpy(), expected, tol=tol)
expected[i, j] = 2 if (b.numpy()[i, j] < a.numpy()[i, j]) else 0
assert_np_equal(tape.gradients[b].numpy(), expected, tol=tol)
tape.zero()
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[a, b], outputs=[mins, maxs], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[maxs, i, j], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(a.numpy())
expected[i, j] = 2 if (a.numpy()[i, j] > b.numpy()[i, j]) else 0
assert_np_equal(tape.gradients[a].numpy(), expected, tol=tol)
expected[i, j] = 2 if (b.numpy()[i, j] > a.numpy()[i, j]) else 0
assert_np_equal(tape.gradients[b].numpy(), expected, tol=tol)
tape.zero()
devices = get_test_devices()
class TestVecScalarOps(unittest.TestCase):
pass
for dtype in np_scalar_types:
add_function_test(TestVecScalarOps, f"test_arrays_{dtype.__name__}", test_arrays, devices=devices, dtype=dtype)
add_function_test(TestVecScalarOps, f"test_components_{dtype.__name__}", test_components, devices=None, dtype=dtype)
add_function_test(
TestVecScalarOps, f"test_py_arithmetic_ops_{dtype.__name__}", test_py_arithmetic_ops, devices=None, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_constructors_{dtype.__name__}", test_constructors, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps,
f"test_anon_type_instance_{dtype.__name__}",
test_anon_type_instance,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_indexing_{dtype.__name__}", test_indexing, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_equality_{dtype.__name__}", test_equality, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps,
f"test_scalar_multiplication_{dtype.__name__}",
test_scalar_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestVecScalarOps,
f"test_scalar_multiplication_rightmul_{dtype.__name__}",
test_scalar_multiplication_rightmul,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestVecScalarOps,
f"test_cw_multiplication_{dtype.__name__}",
test_cw_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_scalar_division_{dtype.__name__}", test_scalar_division, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_cw_division_{dtype.__name__}", test_cw_division, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_addition_{dtype.__name__}", test_addition, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_dotproduct_{dtype.__name__}", test_dotproduct, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_equivalent_types_{dtype.__name__}", test_equivalent_types, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_conversions_{dtype.__name__}", test_conversions, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVecScalarOps, f"test_constants_{dtype.__name__}", test_constants, devices=devices, dtype=dtype
)
# the kernels in this test compile incredibly slowly...
# add_function_test_register_kernel(TestVecScalarOps, f"test_minmax_{dtype.__name__}", test_minmax, devices=devices, dtype=dtype)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 84,906 | Python | 39.470448 | 133 | 0.571703 |
NVIDIA/warp/warp/tests/test_matmul_lite.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
wp.init() # For wp.context.runtime.core.is_cutlass_enabled()
class gemm_test_bed_runner:
def __init__(self, dtype, device):
self.dtype = dtype
self.device = device
def alloc(self, m, n, k, batch_count):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
if batch_count == 1:
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array2d(np.zeros((m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
else:
A = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array3d(np.zeros((batch_count, m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
return A, B, C, D
def run_and_verify(self, m, n, k, batch_count, alpha, beta):
A, B, C, D = self.alloc(m, n, k, batch_count)
ones = wp.zeros_like(D)
ones.fill_(1.0)
if batch_count == 1:
tape = wp.Tape()
with tape:
wp.matmul(A, B, C, D, alpha, beta, False)
tape.backward(grads={D: ones})
D_np = alpha * (A.numpy() @ B.numpy()) + beta * C.numpy()
assert_np_equal(D.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose())
adj_B_np = alpha * (A.numpy().transpose() @ ones.numpy())
adj_C_np = beta * ones.numpy()
else:
tape = wp.Tape()
with tape:
wp.batched_matmul(A, B, C, D, alpha, beta, False)
tape.backward(grads={D: ones})
D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
assert_np_equal(D.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
adj_C_np = beta * ones.numpy()
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(C.grad.numpy(), adj_C_np)
def run(self):
Ms = [8]
Ns = [16]
Ks = [32]
batch_counts = [1]
betas = [1.0]
alpha = 1.0
for batch_count in batch_counts:
for m in Ms:
for n in Ns:
for k in Ks:
for beta in betas:
self.run_and_verify(m, n, k, batch_count, alpha, beta)
class gemm_test_bed_runner_transpose:
def __init__(self, dtype, device):
self.dtype = dtype
self.device = device
def alloc(self, m, n, k, batch_count):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
if batch_count == 1:
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array2d(np.zeros((m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
AT = wp.array2d(A.numpy().transpose([1, 0]), dtype=self.dtype, device=self.device, requires_grad=True)
BT = wp.array2d(B.numpy().transpose([1, 0]), dtype=self.dtype, device=self.device, requires_grad=True)
else:
A = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, k))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
B = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, k, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
C = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, n))),
dtype=self.dtype,
device=self.device,
requires_grad=True,
)
D = wp.array3d(np.zeros((batch_count, m, n)), dtype=self.dtype, device=self.device, requires_grad=True)
AT = wp.array3d(A.numpy().transpose([0, 2, 1]), dtype=self.dtype, device=self.device, requires_grad=True)
BT = wp.array3d(B.numpy().transpose([0, 2, 1]), dtype=self.dtype, device=self.device, requires_grad=True)
return A, B, C, D, AT, BT
def run_and_verify(self, m, n, k, batch_count, alpha, beta):
A, B, C1, D1, AT1, BT1 = self.alloc(m, n, k, batch_count)
C2 = wp.clone(C1)
C3 = wp.clone(C1)
D2 = wp.clone(D1)
D3 = wp.clone(D1)
AT2 = wp.clone(AT1)
BT2 = wp.clone(BT1)
ones1 = wp.zeros_like(D1)
ones1.fill_(1.0)
ones2 = wp.zeros_like(D2)
ones2.fill_(1.0)
ones3 = wp.zeros_like(D3)
ones3.fill_(1.0)
if batch_count == 1:
ATT1 = AT1.transpose([1, 0])
BTT1 = BT1.transpose([1, 0])
ATT2 = AT2.transpose([1, 0])
BTT2 = BT2.transpose([1, 0])
tape = wp.Tape()
with tape:
wp.matmul(A, BTT1, C1, D1, alpha, beta, False)
wp.matmul(ATT1, B, C2, D2, alpha, beta, False)
wp.matmul(ATT2, BTT2, C3, D3, alpha, beta, False)
tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
D_np = alpha * (A.numpy() @ B.numpy()) + beta * C1.numpy()
assert_np_equal(D1.numpy(), D_np)
assert_np_equal(D2.numpy(), D_np)
assert_np_equal(D3.numpy(), D_np)
adj_A_np = alpha * (ones1.numpy() @ B.numpy().transpose())
adj_B_np = alpha * (A.numpy().transpose() @ ones1.numpy())
adj_C_np = beta * ones1.numpy()
else:
ATT1 = AT1.transpose([0, 2, 1])
BTT1 = BT1.transpose([0, 2, 1])
ATT2 = AT2.transpose([0, 2, 1])
BTT2 = BT2.transpose([0, 2, 1])
tape = wp.Tape()
with tape:
wp.batched_matmul(A, BTT1, C1, D1, alpha, beta, False)
wp.batched_matmul(ATT1, B, C2, D2, alpha, beta, False)
wp.batched_matmul(ATT2, BTT2, C3, D3, alpha, beta, False)
tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C1.numpy()
assert_np_equal(D1.numpy(), D_np)
assert_np_equal(D2.numpy(), D_np)
assert_np_equal(D3.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones1.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones1.numpy())
adj_C_np = beta * ones1.numpy()
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(ATT1.grad.numpy(), adj_A_np)
assert_np_equal(ATT2.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(BTT1.grad.numpy(), adj_B_np)
assert_np_equal(BTT2.grad.numpy(), adj_B_np)
assert_np_equal(C1.grad.numpy(), adj_C_np)
assert_np_equal(C2.grad.numpy(), adj_C_np)
assert_np_equal(C3.grad.numpy(), adj_C_np)
def run(self):
m = 8
n = 16
k = 32
batch_counts = [1, 4]
beta = 1.0
alpha = 1.0
for batch_count in batch_counts:
self.run_and_verify(m, n, k, batch_count, alpha, beta)
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_f32(test, device):
gemm_test_bed_runner(wp.float32, device).run()
gemm_test_bed_runner_transpose(wp.float32, device).run()
@wp.kernel
def matrix_sum_kernel(arr: wp.array2d(dtype=float), loss: wp.array(dtype=float)):
i, j = wp.tid()
wp.atomic_add(loss, 0, arr[i, j])
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_tape(test, device):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
m = 8
n = 16
k = 32
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))), dtype=float, device=device, requires_grad=True
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))), dtype=float, device=device, requires_grad=True
)
C = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, n))), dtype=float, device=device, requires_grad=True
)
D = wp.array2d(np.zeros((m, n)), dtype=float, device=device, requires_grad=True)
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
# test tape
tape = wp.Tape()
with tape:
wp.matmul(A, B, C, D)
wp.launch(matrix_sum_kernel, dim=(m, n), inputs=[D, loss], device=device)
tape.backward(loss=loss)
A_grad = A.grad.numpy()
tape.reset()
# test adjoint
D.grad = wp.ones((m, n), dtype=float, device=device)
wp.adj_matmul(A, B, C, A.grad, B.grad, C.grad, D.grad)
assert_np_equal(A_grad, A.grad.numpy())
# test zero
tape.zero()
assert_array_equal(A.grad, wp.zeros_like(A))
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_operator(test, device):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
m = 8
n = 16
k = 32
A = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(m, k))), dtype=float, device=device, requires_grad=True
)
B = wp.array2d(
np.ceil(rng.uniform(low=low, high=high, size=(k, n))), dtype=float, device=device, requires_grad=True
)
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
# test tape
tape = wp.Tape()
with tape:
D = A @ B
wp.launch(matrix_sum_kernel, dim=(m, n), inputs=[D, loss], device=device)
tape.backward(loss=loss)
# test adjoint
D.grad = wp.ones((m, n), dtype=float, device=device)
B_transpose = wp.array2d(B.transpose().numpy(), dtype=float, device=device)
adj_A = D.grad @ B_transpose
assert_array_equal(adj_A, A.grad)
# test zero
tape.zero()
assert_array_equal(A.grad, wp.zeros_like(A))
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_large_batch_count(test, device):
rng = np.random.default_rng(42)
low = -4.5
high = 3.5
m = 2
n = 3
k = 4
batch_count = 65535 * 2 + int(65535 / 2)
A = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, k))),
dtype=float,
device=device,
requires_grad=True,
)
B = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, k, n))),
dtype=float,
device=device,
requires_grad=True,
)
C = wp.array3d(
np.ceil(rng.uniform(low=low, high=high, size=(batch_count, m, n))),
dtype=float,
device=device,
requires_grad=True,
)
D = wp.array3d(np.zeros((batch_count, m, n)), dtype=float, device=device, requires_grad=True)
ones = wp.zeros_like(D)
ones.fill_(1.0)
alpha = 1.0
beta = 1.0
tape = wp.Tape()
with tape:
wp.batched_matmul(A, B, C, D, alpha=alpha, beta=beta, allow_tf32x3_arith=False)
tape.backward(grads={D: ones})
D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
assert_np_equal(D.numpy(), D_np)
adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
adj_C_np = beta * ones.numpy()
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(C.grad.numpy(), adj_C_np)
devices = get_test_devices()
class TestMatmulLite(unittest.TestCase):
pass
add_function_test(TestMatmulLite, "test_f32", test_f32, devices=devices)
add_function_test(TestMatmulLite, "test_tape", test_tape, devices=devices)
add_function_test(TestMatmulLite, "test_operator", test_operator, devices=devices)
add_function_test(TestMatmulLite, "test_large_batch_count", test_large_batch_count, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 14,532 | Python | 34.533007 | 117 | 0.546036 |
NVIDIA/warp/warp/tests/test_func.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.func
def sqr(x: float):
return x * x
# test nested user function calls
# and explicit return type hints
@wp.func
def cube(x: float) -> float:
return sqr(x) * x
@wp.func
def custom(x: int):
return x + 1
@wp.func
def custom(x: float):
return x + 1.0
@wp.func
def custom(x: wp.vec3):
return x + wp.vec3(1.0, 0.0, 0.0)
@wp.func
def noreturn(x: wp.vec3):
x = x + wp.vec3(0.0, 1.0, 0.0)
wp.expect_eq(x, wp.vec3(1.0, 1.0, 0.0))
@wp.kernel
def test_overload_func():
# tests overloading a custom @wp.func
i = custom(1)
f = custom(1.0)
v = custom(wp.vec3(1.0, 0.0, 0.0))
wp.expect_eq(i, 2)
wp.expect_eq(f, 2.0)
wp.expect_eq(v, wp.vec3(2.0, 0.0, 0.0))
noreturn(wp.vec3(1.0, 0.0, 0.0))
@wp.func
def foo(x: int):
# This shouldn't be picked up.
return x * 2
@wp.func
def foo(x: int):
return x * 3
@wp.kernel
def test_override_func():
i = foo(1)
wp.expect_eq(i, 3)
def test_func_closure_capture(test, device):
def make_closure_kernel(func):
def closure_kernel_fn(data: wp.array(dtype=float), expected: float):
f = func(data[wp.tid()])
wp.expect_eq(f, expected)
return wp.Kernel(func=closure_kernel_fn)
sqr_closure = make_closure_kernel(sqr)
cube_closure = make_closure_kernel(cube)
data = wp.array([2.0], dtype=float, device=device)
expected_sqr = 4.0
expected_cube = 8.0
wp.launch(sqr_closure, dim=data.shape, inputs=[data, expected_sqr], device=device)
wp.launch(cube_closure, dim=data.shape, inputs=[data, expected_cube], device=device)
@wp.func
def test_func(param1: wp.int32, param2: wp.int32, param3: wp.int32) -> wp.float32:
return 1.0
@wp.kernel
def test_return_kernel(test_data: wp.array(dtype=wp.float32)):
tid = wp.tid()
test_data[tid] = wp.lerp(test_func(0, 1, 2), test_func(0, 1, 2), 0.5)
def test_return_func(test, device):
test_data = wp.zeros(100, dtype=wp.float32, device=device)
wp.launch(kernel=test_return_kernel, dim=test_data.size, inputs=[test_data], device=device)
@wp.func
def multi_valued_func(a: wp.float32, b: wp.float32):
return a + b, a - b, a * b, a / b
def test_multi_valued_func(test, device):
@wp.kernel
def test_multi_valued_kernel(test_data1: wp.array(dtype=wp.float32), test_data2: wp.array(dtype=wp.float32)):
tid = wp.tid()
d1, d2 = test_data1[tid], test_data2[tid]
a, b, c, d = multi_valued_func(d1, d2)
wp.expect_eq(a, d1 + d2)
wp.expect_eq(b, d1 - d2)
wp.expect_eq(c, d1 * d2)
wp.expect_eq(d, d1 / d2)
test_data1 = wp.array(np.arange(100), dtype=wp.float32, device=device)
test_data2 = wp.array(np.arange(100, 0, -1), dtype=wp.float32, device=device)
wp.launch(kernel=test_multi_valued_kernel, dim=test_data1.size, inputs=[test_data1, test_data2], device=device)
@wp.kernel
def test_func_defaults():
# test default as expected
wp.expect_near(1.0, 1.0 + 1.0e-6)
# test that changing tolerance still works
wp.expect_near(1.0, 1.1, 0.5)
@wp.func
def sign(x: float):
return 123.0
@wp.kernel
def test_builtin_shadowing():
wp.expect_eq(sign(1.23), 123.0)
devices = get_test_devices()
class TestFunc(unittest.TestCase):
def test_user_func_export(self):
# tests calling overloaded user-defined functions from Python
i = custom(1)
f = custom(1.0)
v = custom(wp.vec3(1.0, 0.0, 0.0))
self.assertEqual(i, 2)
self.assertEqual(f, 2.0)
assert_np_equal(np.array([*v]), np.array([2.0, 0.0, 0.0]))
def test_native_func_export(self):
# tests calling native functions from Python
q = wp.quat(0.0, 0.0, 0.0, 1.0)
assert_np_equal(np.array([*q]), np.array([0.0, 0.0, 0.0, 1.0]))
r = wp.quat_from_axis_angle(wp.vec3(1.0, 0.0, 0.0), 2.0)
assert_np_equal(np.array([*r]), np.array([0.8414709568023682, 0.0, 0.0, 0.5403022170066833]), tol=1.0e-3)
q = wp.quat(1.0, 2.0, 3.0, 4.0)
q = wp.normalize(q) * 2.0
assert_np_equal(
np.array([*q]),
np.array([0.18257418274879456, 0.3651483654975891, 0.547722578048706, 0.7302967309951782]) * 2.0,
tol=1.0e-3,
)
v2 = wp.vec2(1.0, 2.0)
v2 = wp.normalize(v2) * 2.0
assert_np_equal(np.array([*v2]), np.array([0.4472135901451111, 0.8944271802902222]) * 2.0, tol=1.0e-3)
v3 = wp.vec3(1.0, 2.0, 3.0)
v3 = wp.normalize(v3) * 2.0
assert_np_equal(
np.array([*v3]), np.array([0.26726123690605164, 0.5345224738121033, 0.8017836809158325]) * 2.0, tol=1.0e-3
)
v4 = wp.vec4(1.0, 2.0, 3.0, 4.0)
v4 = wp.normalize(v4) * 2.0
assert_np_equal(
np.array([*v4]),
np.array([0.18257418274879456, 0.3651483654975891, 0.547722578048706, 0.7302967309951782]) * 2.0,
tol=1.0e-3,
)
v = wp.vec2(0.0)
v += wp.vec2(1.0, 1.0)
assert v == wp.vec2(1.0, 1.0)
v -= wp.vec2(1.0, 1.0)
assert v == wp.vec2(0.0, 0.0)
v = wp.vec2(2.0, 2.0) - wp.vec2(1.0, 1.0)
assert v == wp.vec2(1.0, 1.0)
v *= 2.0
assert v == wp.vec2(2.0, 2.0)
v = v * 2.0
assert v == wp.vec2(4.0, 4.0)
v = v / 2.0
assert v == wp.vec2(2.0, 2.0)
v /= 2.0
assert v == wp.vec2(1.0, 1.0)
v = -v
assert v == wp.vec2(-1.0, -1.0)
v = +v
assert v == wp.vec2(-1.0, -1.0)
m22 = wp.mat22(1.0, 2.0, 3.0, 4.0)
m22 = m22 + m22
self.assertEqual(m22[1, 1], 8.0)
self.assertEqual(str(m22), "[[2.0, 4.0],\n [6.0, 8.0]]")
t = wp.transform(
wp.vec3(1.0, 2.0, 3.0),
wp.quat(4.0, 5.0, 6.0, 7.0),
)
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(
t * wp.transform(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0), (396.0, 432.0, 720.0, 56.0, 70.0, 84.0, -28.0)
)
self.assertSequenceEqual(
t * wp.transform((1.0, 2.0, 3.0), (4.0, 5.0, 6.0, 7.0)), (396.0, 432.0, 720.0, 56.0, 70.0, 84.0, -28.0)
)
t = wp.transform()
self.assertSequenceEqual(t, (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
t = wp.transform(p=(1.0, 2.0, 3.0), q=(4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform(q=(4.0, 5.0, 6.0, 7.0), p=(1.0, 2.0, 3.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform((1.0, 2.0, 3.0), q=(4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform(p=(1.0, 2.0, 3.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 0.0, 0.0, 0.0, 1.0))
t = wp.transform(q=(4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(t, (0.0, 0.0, 0.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform((1.0, 2.0, 3.0), (4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform(p=wp.vec3(1.0, 2.0, 3.0), q=wp.quat(4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0)
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform(wp.transform(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
t = wp.transform(*wp.transform(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(t, (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0))
transformf = wp.types.transformation(dtype=float)
t = wp.transformf((1.0, 2.0, 3.0), (4.0, 5.0, 6.0, 7.0))
self.assertSequenceEqual(
t + transformf((2.0, 3.0, 4.0), (5.0, 6.0, 7.0, 8.0)),
(3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0),
)
self.assertSequenceEqual(
t - transformf((2.0, 3.0, 4.0), (5.0, 6.0, 7.0, 8.0)),
(-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0),
)
f = wp.sin(math.pi * 0.5)
self.assertAlmostEqual(f, 1.0, places=3)
m = wp.mat22(0.0, 0.0, 0.0, 0.0)
m += wp.mat22(1.0, 1.0, 1.0, 1.0)
assert m == wp.mat22(1.0, 1.0, 1.0, 1.0)
m -= wp.mat22(1.0, 1.0, 1.0, 1.0)
assert m == wp.mat22(0.0, 0.0, 0.0, 0.0)
m = wp.mat22(2.0, 2.0, 2.0, 2.0) - wp.mat22(1.0, 1.0, 1.0, 1.0)
assert m == wp.mat22(1.0, 1.0, 1.0, 1.0)
m *= 2.0
assert m == wp.mat22(2.0, 2.0, 2.0, 2.0)
m = m * 2.0
assert m == wp.mat22(4.0, 4.0, 4.0, 4.0)
m = m / 2.0
assert m == wp.mat22(2.0, 2.0, 2.0, 2.0)
m /= 2.0
assert m == wp.mat22(1.0, 1.0, 1.0, 1.0)
m = -m
assert m == wp.mat22(-1.0, -1.0, -1.0, -1.0)
m = +m
assert m == wp.mat22(-1.0, -1.0, -1.0, -1.0)
m = m * m
assert m == wp.mat22(2.0, 2.0, 2.0, 2.0)
def test_native_function_error_resolution(self):
a = wp.mat22f(1.0, 2.0, 3.0, 4.0)
b = wp.mat22d(1.0, 2.0, 3.0, 4.0)
with self.assertRaisesRegex(
RuntimeError,
r"^Couldn't find a function 'mul' compatible with " r"the arguments 'mat22f, mat22d'$",
):
a * b
add_kernel_test(TestFunc, kernel=test_overload_func, name="test_overload_func", dim=1, devices=devices)
add_function_test(TestFunc, func=test_return_func, name="test_return_func", devices=devices)
add_kernel_test(TestFunc, kernel=test_override_func, name="test_override_func", dim=1, devices=devices)
add_function_test(TestFunc, func=test_func_closure_capture, name="test_func_closure_capture", devices=devices)
add_function_test(TestFunc, func=test_multi_valued_func, name="test_multi_valued_func", devices=devices)
add_kernel_test(TestFunc, kernel=test_func_defaults, name="test_func_defaults", dim=1, devices=devices)
add_kernel_test(TestFunc, kernel=test_builtin_shadowing, name="test_builtin_shadowing", dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 10,744 | Python | 30.884273 | 118 | 0.557148 |
NVIDIA/warp/warp/tests/test_tape.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def mul_constant(x: wp.array(dtype=float), y: wp.array(dtype=float)):
tid = wp.tid()
y[tid] = x[tid] * 2.0
@wp.struct
class Multiplicands:
x: wp.array(dtype=float)
y: wp.array(dtype=float)
@wp.kernel
def mul_variable(mutiplicands: Multiplicands, z: wp.array(dtype=float)):
tid = wp.tid()
z[tid] = mutiplicands.x[tid] * mutiplicands.y[tid]
@wp.kernel
def dot_product(x: wp.array(dtype=float), y: wp.array(dtype=float), z: wp.array(dtype=float)):
tid = wp.tid()
wp.atomic_add(z, 0, x[tid] * y[tid])
def test_tape_mul_constant(test, device):
dim = 8
iters = 16
tape = wp.Tape()
# record onto tape
with tape:
# input data
x0 = wp.array(np.zeros(dim), dtype=wp.float32, device=device, requires_grad=True)
x = x0
for _i in range(iters):
y = wp.empty_like(x, requires_grad=True)
wp.launch(kernel=mul_constant, dim=dim, inputs=[x], outputs=[y], device=device)
x = y
# loss = wp.sum(x)
x.grad = wp.array(np.ones(dim), device=device, dtype=wp.float32)
# run backward
tape.backward()
# grad = 2.0^iters
assert_np_equal(tape.gradients[x0].numpy(), np.ones(dim) * (2**iters))
def test_tape_mul_variable(test, device):
dim = 8
tape = wp.Tape()
# record onto tape
with tape:
# input data (Note: We're intentionally testing structs in tapes here)
multiplicands = Multiplicands()
multiplicands.x = wp.array(np.ones(dim) * 16.0, dtype=wp.float32, device=device, requires_grad=True)
multiplicands.y = wp.array(np.ones(dim) * 32.0, dtype=wp.float32, device=device, requires_grad=True)
z = wp.zeros_like(multiplicands.x)
wp.launch(kernel=mul_variable, dim=dim, inputs=[multiplicands], outputs=[z], device=device)
# loss = wp.sum(x)
z.grad = wp.array(np.ones(dim), device=device, dtype=wp.float32)
# run backward
tape.backward()
# grad_x=y, grad_y=x
assert_np_equal(tape.gradients[multiplicands].x.numpy(), multiplicands.y.numpy())
assert_np_equal(tape.gradients[multiplicands].y.numpy(), multiplicands.x.numpy())
# run backward again with different incoming gradient
# should accumulate the same gradients again onto output
# so gradients = 2.0*prev
tape.backward()
assert_np_equal(tape.gradients[multiplicands].x.numpy(), multiplicands.y.numpy() * 2.0)
assert_np_equal(tape.gradients[multiplicands].y.numpy(), multiplicands.x.numpy() * 2.0)
# Clear launches and zero out the gradients
tape.reset()
assert_np_equal(tape.gradients[multiplicands].x.numpy(), np.zeros_like(tape.gradients[multiplicands].x.numpy()))
test.assertFalse(tape.launches)
def test_tape_dot_product(test, device):
dim = 8
tape = wp.Tape()
# record onto tape
with tape:
# input data
x = wp.array(np.ones(dim) * 16.0, dtype=wp.float32, device=device, requires_grad=True)
y = wp.array(np.ones(dim) * 32.0, dtype=wp.float32, device=device, requires_grad=True)
z = wp.zeros(n=1, dtype=wp.float32, device=device, requires_grad=True)
wp.launch(kernel=dot_product, dim=dim, inputs=[x, y], outputs=[z], device=device)
# scalar loss
tape.backward(loss=z)
# grad_x=y, grad_y=x
assert_np_equal(tape.gradients[x].numpy(), y.numpy())
assert_np_equal(tape.gradients[y].numpy(), x.numpy())
def test_tape_visualize(test, device):
dim = 8
tape = wp.Tape()
# record onto tape
with tape:
# input data
x = wp.array(np.ones(dim) * 16.0, dtype=wp.float32, device=device, requires_grad=True)
y = wp.array(np.ones(dim) * 32.0, dtype=wp.float32, device=device, requires_grad=True)
z = wp.zeros(n=1, dtype=wp.float32, device=device, requires_grad=True)
tape.record_scope_begin("my loop")
for _ in range(16):
wp.launch(kernel=dot_product, dim=dim, inputs=[x, y], outputs=[z], device=device)
tape.record_scope_end()
# generate GraphViz diagram code
dot_code = tape.visualize(simplify_graph=True)
assert "repeated 16x" in dot_code
assert "my loop" in dot_code
assert dot_code.count("dot_product") == 1
devices = get_test_devices()
class TestTape(unittest.TestCase):
def test_tape_no_nested_tapes(self):
with self.assertRaises(RuntimeError):
with wp.Tape():
with wp.Tape():
pass
add_function_test(TestTape, "test_tape_mul_constant", test_tape_mul_constant, devices=devices)
add_function_test(TestTape, "test_tape_mul_variable", test_tape_mul_variable, devices=devices)
add_function_test(TestTape, "test_tape_dot_product", test_tape_dot_product, devices=devices)
add_function_test(TestTape, "test_tape_visualize", test_tape_visualize, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,456 | Python | 30.726744 | 116 | 0.660557 |
NVIDIA/warp/warp/tests/aux_test_dependent.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This file is used to test reloading module references."""
import warp as wp
import warp.tests.aux_test_reference as ref
@wp.kernel
def kern(expect: float):
wp.expect_eq(ref.magic(), expect)
def run(expect, device):
wp.launch(kern, dim=1, inputs=[expect], device=device)
| 710 | Python | 32.857141 | 76 | 0.773239 |
NVIDIA/warp/warp/tests/test_mesh_query_ray.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# triangulate a list of polygon face indices
def triangulate(face_counts, face_indices):
num_tris = np.sum(np.subtract(face_counts, 2))
num_tri_vtx = num_tris * 3
tri_indices = np.zeros(num_tri_vtx, dtype=int)
ctr = 0
wedgeIdx = 0
for nb in face_counts:
for i in range(nb - 2):
tri_indices[ctr] = face_indices[wedgeIdx]
tri_indices[ctr + 1] = face_indices[wedgeIdx + i + 1]
tri_indices[ctr + 2] = face_indices[wedgeIdx + i + 2]
ctr += 3
wedgeIdx += nb
return tri_indices
@wp.kernel
def mesh_query_ray_loss(
mesh: wp.uint64,
query_points: wp.array(dtype=wp.vec3),
query_dirs: wp.array(dtype=wp.vec3),
intersection_points: wp.array(dtype=wp.vec3),
loss: wp.array(dtype=float),
):
tid = wp.tid()
p = query_points[tid]
D = query_dirs[tid]
max_t = 10012.0
t = float(0.0)
bary_u = float(0.0)
bary_v = float(0.0)
sign = float(0.0)
normal = wp.vec3()
face_index = int(0)
q = wp.vec3()
if wp.mesh_query_ray(mesh, p, D, max_t, t, bary_u, bary_v, sign, normal, face_index):
q = wp.mesh_eval_position(mesh, face_index, bary_u, bary_v)
intersection_points[tid] = q
l = q[0]
loss[tid] = l
query = wp.mesh_query_ray(mesh, p, D, max_t)
wp.expect_eq(query.t, t)
wp.expect_eq(query.u, bary_u)
wp.expect_eq(query.v, bary_v)
wp.expect_eq(query.sign, sign)
wp.expect_eq(query.normal, normal)
wp.expect_eq(query.face, face_index)
@unittest.skipUnless(USD_AVAILABLE, "Requires usd-core")
def test_mesh_query_ray_grad(test, device):
from pxr import Usd, UsdGeom
# test tri
# print("Testing Single Triangle")
# mesh_points = wp.array(np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0], [0.0, 2.0, 0.0]]), dtype=wp.vec3, device=device)
# mesh_indices = wp.array(np.array([0,1,2]), dtype=int, device=device)
mesh = Usd.Stage.Open(os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/torus.usda")))
mesh_geom = UsdGeom.Mesh(mesh.GetPrimAtPath("/World/Torus"))
mesh_counts = mesh_geom.GetFaceVertexCountsAttr().Get()
mesh_indices = mesh_geom.GetFaceVertexIndicesAttr().Get()
tri_indices = triangulate(mesh_counts, mesh_indices)
mesh_points = wp.array(np.array(mesh_geom.GetPointsAttr().Get()), dtype=wp.vec3, device=device)
mesh_indices = wp.array(np.array(tri_indices), dtype=int, device=device)
p = wp.vec3(50.0, 50.0, 0.0)
D = wp.vec3(0.0, -1.0, 0.0)
# create mesh
mesh = wp.Mesh(points=mesh_points, velocities=None, indices=mesh_indices)
tape = wp.Tape()
# analytic gradients
with tape:
query_points = wp.array(p, dtype=wp.vec3, device=device, requires_grad=True)
query_dirs = wp.array(D, dtype=wp.vec3, device=device, requires_grad=True)
intersection_points = wp.zeros(n=1, dtype=wp.vec3, device=device)
loss = wp.zeros(n=1, dtype=float, device=device, requires_grad=True)
wp.launch(
kernel=mesh_query_ray_loss,
dim=1,
inputs=[mesh.id, query_points, query_dirs, intersection_points, loss],
device=device,
)
tape.backward(loss=loss)
q = intersection_points.numpy().flatten()
analytic_p = tape.gradients[query_points].numpy().flatten()
analytic_D = tape.gradients[query_dirs].numpy().flatten()
# numeric gradients
# ray origin
eps = 1.0e-3
loss_values_p = []
numeric_p = np.zeros(3)
offset_query_points = [
wp.vec3(p[0] - eps, p[1], p[2]),
wp.vec3(p[0] + eps, p[1], p[2]),
wp.vec3(p[0], p[1] - eps, p[2]),
wp.vec3(p[0], p[1] + eps, p[2]),
wp.vec3(p[0], p[1], p[2] - eps),
wp.vec3(p[0], p[1], p[2] + eps),
]
for i in range(6):
q = offset_query_points[i]
query_points = wp.array(q, dtype=wp.vec3, device=device)
query_dirs = wp.array(D, dtype=wp.vec3, device=device)
intersection_points = wp.zeros(n=1, dtype=wp.vec3, device=device)
loss = wp.zeros(n=1, dtype=float, device=device)
wp.launch(
kernel=mesh_query_ray_loss,
dim=1,
inputs=[mesh.id, query_points, query_dirs, intersection_points, loss],
device=device,
)
loss_values_p.append(loss.numpy()[0])
for i in range(3):
l_0 = loss_values_p[i * 2]
l_1 = loss_values_p[i * 2 + 1]
gradient = (l_1 - l_0) / (2.0 * eps)
numeric_p[i] = gradient
# ray dir
loss_values_D = []
numeric_D = np.zeros(3)
offset_query_dirs = [
wp.vec3(D[0] - eps, D[1], D[2]),
wp.vec3(D[0] + eps, D[1], D[2]),
wp.vec3(D[0], D[1] - eps, D[2]),
wp.vec3(D[0], D[1] + eps, D[2]),
wp.vec3(D[0], D[1], D[2] - eps),
wp.vec3(D[0], D[1], D[2] + eps),
]
for i in range(6):
q = offset_query_dirs[i]
query_points = wp.array(p, dtype=wp.vec3, device=device)
query_dirs = wp.array(q, dtype=wp.vec3, device=device)
intersection_points = wp.zeros(n=1, dtype=wp.vec3, device=device)
loss = wp.zeros(n=1, dtype=float, device=device)
wp.launch(
kernel=mesh_query_ray_loss,
dim=1,
inputs=[mesh.id, query_points, query_dirs, intersection_points, loss],
device=device,
)
loss_values_D.append(loss.numpy()[0])
for i in range(3):
l_0 = loss_values_D[i * 2]
l_1 = loss_values_D[i * 2 + 1]
gradient = (l_1 - l_0) / (2.0 * eps)
numeric_D[i] = gradient
error_p = ((analytic_p - numeric_p) * (analytic_p - numeric_p)).sum(axis=0)
error_D = ((analytic_D - numeric_D) * (analytic_D - numeric_D)).sum(axis=0)
tolerance = 1.0e-3
test.assertTrue(error_p < tolerance, f"error is {error_p} which is >= {tolerance}")
test.assertTrue(error_D < tolerance, f"error is {error_D} which is >= {tolerance}")
@wp.kernel
def raycast_kernel(
mesh: wp.uint64,
ray_starts: wp.array(dtype=wp.vec3),
ray_directions: wp.array(dtype=wp.vec3),
count: wp.array(dtype=int),
):
t = float(0.0) # hit distance along ray
u = float(0.0) # hit face barycentric u
v = float(0.0) # hit face barycentric v
sign = float(0.0) # hit face sign
n = wp.vec3() # hit face normal
f = int(0) # hit face index
max_dist = 1e6 # max raycast distance
# ray cast against the mesh
tid = wp.tid()
if wp.mesh_query_ray(mesh, ray_starts[tid], ray_directions[tid], max_dist, t, u, v, sign, n, f):
wp.atomic_add(count, 0, 1)
# tests rays against a quad of two connected triangles
# with rays exactly falling on the edge, tests that
# there are no leaks
def test_mesh_query_ray_edge(test, device):
# Create raycast starts and directions
xx, yy = np.meshgrid(np.arange(0.1, 0.4, 0.01), np.arange(0.1, 0.4, 0.01))
xx = xx.flatten().reshape(-1, 1)
yy = yy.flatten().reshape(-1, 1)
zz = np.ones_like(xx)
ray_starts = np.concatenate((xx, yy, zz), axis=1)
ray_dirs = np.zeros_like(ray_starts)
ray_dirs[:, 2] = -1.0
# Create simple square mesh
vertices = np.array([[0.0, 0.0, 0.0], [0.0, 0.5, 0.0], [0.5, 0.0, 0.0], [0.5, 0.5, 0.0]], dtype=np.float32)
triangles = np.array([[1, 0, 2], [1, 2, 3]], dtype=np.int32)
mesh = wp.Mesh(
points=wp.array(vertices, dtype=wp.vec3, device=device),
indices=wp.array(triangles.flatten(), dtype=int, device=device),
)
counts = wp.zeros(1, dtype=int, device=device)
n = len(ray_starts)
ray_starts = wp.array(ray_starts, shape=(n,), dtype=wp.vec3, device=device)
ray_dirs = wp.array(ray_dirs, shape=(n,), dtype=wp.vec3, device=device)
wp.launch(kernel=raycast_kernel, dim=n, inputs=[mesh.id, ray_starts, ray_dirs, counts], device=device)
wp.synchronize()
test.assertEqual(counts.numpy()[0], n)
devices = get_test_devices()
class TestMeshQueryRay(unittest.TestCase):
def test_mesh_query_codegen_adjoints_with_select(self):
def kernel_fn(
mesh: wp.uint64,
):
v = wp.vec3(0.0, 0.0, 0.0)
d = 1e-6
if True:
query = wp.mesh_query_ray(mesh, v, v, d)
else:
query = wp.mesh_query_ray(mesh, v, v, d)
wp.Kernel(func=kernel_fn)
add_function_test(TestMeshQueryRay, "test_mesh_query_ray_edge", test_mesh_query_ray_edge, devices=devices)
add_function_test(TestMeshQueryRay, "test_mesh_query_ray_grad", test_mesh_query_ray_grad, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 9,199 | Python | 30.61512 | 121 | 0.599957 |
NVIDIA/warp/warp/tests/test_multigpu.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import check_iommu
@wp.kernel
def inc(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] + 1.0
@wp.kernel
def arange(start: int, step: int, a: wp.array(dtype=int)):
tid = wp.tid()
a[tid] = start + step * tid
class TestMultiGPU(unittest.TestCase):
@unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
def test_multigpu_set_device(self):
# save default device
saved_device = wp.get_device()
n = 32
wp.set_device("cuda:0")
a0 = wp.empty(n, dtype=int)
wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
wp.set_device("cuda:1")
a1 = wp.empty(n, dtype=int)
wp.launch(arange, dim=a1.size, inputs=[0, 1, a1])
# restore default device
wp.set_device(saved_device)
assert a0.device == "cuda:0"
assert a1.device == "cuda:1"
expected = np.arange(n, dtype=int)
assert_np_equal(a0.numpy(), expected)
assert_np_equal(a1.numpy(), expected)
@unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
def test_multigpu_scoped_device(self):
n = 32
with wp.ScopedDevice("cuda:0"):
a0 = wp.empty(n, dtype=int)
wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
with wp.ScopedDevice("cuda:1"):
a1 = wp.empty(n, dtype=int)
wp.launch(arange, dim=a1.size, inputs=[0, 1, a1])
assert a0.device == "cuda:0"
assert a1.device == "cuda:1"
expected = np.arange(n, dtype=int)
assert_np_equal(a0.numpy(), expected)
assert_np_equal(a1.numpy(), expected)
@unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
def test_multigpu_nesting(self):
initial_device = wp.get_device()
initial_cuda_device = wp.get_cuda_device()
with wp.ScopedDevice("cuda:1"):
assert wp.get_device() == "cuda:1"
assert wp.get_cuda_device() == "cuda:1"
with wp.ScopedDevice("cuda:0"):
assert wp.get_device() == "cuda:0"
assert wp.get_cuda_device() == "cuda:0"
with wp.ScopedDevice("cpu"):
assert wp.get_device() == "cpu"
assert wp.get_cuda_device() == "cuda:0"
wp.set_device("cuda:1")
assert wp.get_device() == "cuda:1"
assert wp.get_cuda_device() == "cuda:1"
assert wp.get_device() == "cuda:0"
assert wp.get_cuda_device() == "cuda:0"
assert wp.get_device() == "cuda:1"
assert wp.get_cuda_device() == "cuda:1"
assert wp.get_device() == initial_device
assert wp.get_cuda_device() == initial_cuda_device
@unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
@unittest.skipUnless(check_iommu(), "IOMMU seems enabled")
def test_multigpu_pingpong(self):
n = 1024 * 1024
a0 = wp.zeros(n, dtype=float, device="cuda:0")
a1 = wp.zeros(n, dtype=float, device="cuda:1")
iters = 10
for _ in range(iters):
wp.launch(inc, dim=a0.size, inputs=[a0], device=a0.device)
wp.synchronize_device(a0.device)
wp.copy(a1, a0)
wp.launch(inc, dim=a1.size, inputs=[a1], device=a1.device)
wp.synchronize_device(a1.device)
wp.copy(a0, a1)
expected = np.full(n, iters * 2, dtype=np.float32)
assert_np_equal(a0.numpy(), expected)
assert_np_equal(a1.numpy(), expected)
@unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
@unittest.skipUnless(check_iommu(), "IOMMU seems enabled")
def test_multigpu_pingpong_streams(self):
n = 1024 * 1024
a0 = wp.zeros(n, dtype=float, device="cuda:0")
a1 = wp.zeros(n, dtype=float, device="cuda:1")
stream0 = wp.get_stream("cuda:0")
stream1 = wp.get_stream("cuda:1")
iters = 10
for _ in range(iters):
wp.launch(inc, dim=a0.size, inputs=[a0], stream=stream0)
stream1.wait_stream(stream0)
wp.copy(a1, a0, stream=stream1)
wp.launch(inc, dim=a1.size, inputs=[a1], stream=stream1)
stream0.wait_stream(stream1)
wp.copy(a0, a1, stream=stream0)
expected = np.full(n, iters * 2, dtype=np.float32)
assert_np_equal(a0.numpy(), expected)
assert_np_equal(a1.numpy(), expected)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 5,247 | Python | 31.395062 | 94 | 0.589289 |
NVIDIA/warp/warp/tests/unittest_utils.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import ctypes
import ctypes.util
import importlib
import os
import sys
import time
import unittest
from typing import Optional
import numpy as np
import warp as wp
pxr = importlib.util.find_spec("pxr")
USD_AVAILABLE = pxr is not None
# default test mode (see get_test_devices())
# "basic" - only run on CPU and first GPU device
# "unique" - run on CPU and all unique GPU arches
# "unique_or_2x" - run on CPU and all unique GPU arches. If there is a single GPU arch, add a second GPU if it exists.
# "all" - run on all devices
test_mode = "unique_or_2x"
coverage_enabled = False
coverage_temp_dir = None
coverage_branch = None
try:
if sys.platform == "win32":
LIBC = ctypes.CDLL("ucrtbase.dll")
else:
LIBC = ctypes.CDLL(ctypes.util.find_library("c"))
except OSError:
print("Failed to load the standard C library")
LIBC = None
def get_selected_cuda_test_devices(mode: Optional[str] = None):
"""Returns a list of CUDA devices according the selected ``mode`` behavior.
If ``mode`` is ``None``, the ``global test_mode`` value will be used and
this list will be a subset of the devices returned from ``get_test_devices()``.
Args:
mode: ``"basic"``, returns a list containing up to a single CUDA device.
``"unique"``, returns a list containing no more than one device of
every CUDA architecture on the system.
``"unique_or_2x"`` behaves like ``"unique"`` but adds up to one
additional CUDA device if the system only devices of a single CUDA
architecture.
"""
if mode is None:
global test_mode
mode = test_mode
if mode == "basic":
if wp.is_cuda_available():
return [wp.get_device("cuda:0")]
else:
return []
cuda_devices = wp.get_cuda_devices()
first_cuda_devices = {}
for d in cuda_devices:
if d.arch not in first_cuda_devices:
first_cuda_devices[d.arch] = d
selected_cuda_devices = list(first_cuda_devices.values())
if mode == "unique_or_2x" and len(selected_cuda_devices) == 1 and len(cuda_devices) > 1:
for d in cuda_devices:
if d not in selected_cuda_devices:
selected_cuda_devices.append(d)
break
return selected_cuda_devices
def get_test_devices(mode: Optional[str] = None):
"""Returns a list of devices based on the mode selected.
Args:
mode: The testing mode to specify which devices to include. If not provided or ``None``, the
``global test_mode`` value will be used.
"basic": Returns the CPU and the first GPU device when available.
"unique": Returns the CPU and all unique GPU architectures.
"unique_or_2x" (default): Behaves like "unique" but adds up to one additional CUDA device
if the system only devices of a single CUDA architecture.
"all": Returns all available devices.
"""
if mode is None:
global test_mode
mode = test_mode
devices = []
if mode == "basic":
# only run on CPU and first GPU device
if wp.is_cpu_available():
devices.append(wp.get_device("cpu"))
if wp.is_cuda_available():
devices.append(wp.get_device("cuda:0"))
elif mode == "unique" or mode == "unique_or_2x":
# run on CPU and a subset of GPUs
if wp.is_cpu_available():
devices.append(wp.get_device("cpu"))
devices.extend(get_selected_cuda_test_devices(mode))
elif mode == "all":
# run on all devices
devices = wp.get_devices()
else:
raise ValueError(f"Unknown test mode selected: {mode}")
return devices
def get_cuda_test_devices(mode=None):
devices = get_test_devices(mode=mode)
return [d for d in devices if d.is_cuda]
# redirects and captures all stdout output (including from C-libs)
class StdOutCapture:
def begin(self):
# Flush the stream buffers managed by libc.
# This is needed at the moment due to Carbonite not flushing the logs
# being printed out when extensions are starting up.
if LIBC is not None:
LIBC.fflush(None)
# save original
self.saved = sys.stdout
self.target = os.dup(self.saved.fileno())
# create temporary capture stream
import io
import tempfile
self.tempfile = io.TextIOWrapper(
tempfile.TemporaryFile(buffering=0),
encoding="utf-8",
errors="replace",
newline="",
write_through=True,
)
os.dup2(self.tempfile.fileno(), self.saved.fileno())
sys.stdout = self.tempfile
def end(self):
# The following sleep doesn't seem to fix the test_print failure on Windows
# if sys.platform == "win32":
# # Workaround for what seems to be a Windows-specific bug where
# # the output of CUDA's `printf` is not being immediately flushed
# # despite the context synchronisation.
# time.sleep(0.01)
if LIBC is not None:
LIBC.fflush(None)
os.dup2(self.target, self.saved.fileno())
os.close(self.target)
self.tempfile.seek(0)
res = self.tempfile.buffer.read()
self.tempfile.close()
sys.stdout = self.saved
return str(res.decode("utf-8"))
class CheckOutput:
def __init__(self, test):
self.test = test
def __enter__(self):
# wp.force_load()
self.capture = StdOutCapture()
self.capture.begin()
def __exit__(self, exc_type, exc_value, traceback):
# ensure any stdout output is flushed
wp.synchronize()
s = self.capture.end()
if s != "":
print(s.rstrip())
# fail if test produces unexpected output (e.g.: from wp.expect_eq() builtins)
# we allow strings starting of the form "Module xxx load on device xxx"
# for lazy loaded modules
if s != "" and not s.startswith("Module"):
self.test.fail(f"Unexpected output:\n'{s.rstrip()}'")
def assert_array_equal(result: wp.array, expect: wp.array):
np.testing.assert_equal(result.numpy(), expect.numpy())
def assert_np_equal(result: np.ndarray, expect: np.ndarray, tol=0.0):
if tol != 0.0:
# TODO: Get all tests working without the .flatten()
np.testing.assert_allclose(result.flatten(), expect.flatten(), atol=tol, equal_nan=True)
else:
# TODO: Get all tests working with strict=True
np.testing.assert_array_equal(result, expect)
# if check_output is True any output to stdout will be treated as an error
def create_test_func(func, device, check_output, **kwargs):
# pass args to func
def test_func(self):
if check_output:
with CheckOutput(self):
func(self, device, **kwargs)
else:
func(self, device, **kwargs)
return test_func
def skip_test_func(self):
# A function to use so we can tell unittest that the test was skipped.
self.skipTest("No suitable devices to run the test.")
def sanitize_identifier(s):
"""replace all non-identifier characters with '_'"""
s = str(s)
if s.isidentifier():
return s
else:
import re
return re.sub(r"\W|^(?=\d)", "_", s)
def add_function_test(cls, name, func, devices=None, check_output=True, **kwargs):
if devices is None:
setattr(cls, name, create_test_func(func, None, check_output, **kwargs))
elif isinstance(devices, list):
if not devices:
# No devices to run this test
setattr(cls, name, skip_test_func)
else:
for device in devices:
setattr(
cls,
name + "_" + sanitize_identifier(device),
create_test_func(func, device, check_output, **kwargs),
)
else:
setattr(
cls,
name + "_" + sanitize_identifier(devices),
create_test_func(func, devices, check_output, **kwargs),
)
def add_kernel_test(cls, kernel, dim, name=None, expect=None, inputs=None, devices=None):
def test_func(self, device):
args = []
if inputs:
args.extend(inputs)
if expect:
# allocate outputs to match results
result = wp.array(expect, dtype=int, device=device)
output = wp.zeros_like(result)
args.append(output)
# force load so that we don't generate any log output during launch
kernel.module.load(device)
with CheckOutput(self):
wp.launch(kernel, dim=dim, inputs=args, device=device)
# check output values
if expect:
assert_array_equal(output, result)
if name is None:
name = kernel.key
# device is required for kernel tests, so use all devices if none were given
if devices is None:
devices = get_test_devices()
# register test func with class for the given devices
for d in devices:
# use a function to forward the device to the inner test function
def test_func_wrapper(test, device=d):
test_func(test, device)
setattr(cls, name + "_" + sanitize_identifier(d), test_func_wrapper)
# helper that first calls the test function to generate all kernel permutations
# so that compilation is done in one-shot instead of per-test
def add_function_test_register_kernel(cls, name, func, devices=None, **kwargs):
func(None, None, **kwargs, register_kernels=True)
add_function_test(cls, name, func, devices=devices, **kwargs)
def write_junit_results(
outfile: str,
test_records: list,
tests_run: int,
tests_failed: int,
tests_errored: int,
tests_skipped: int,
test_duration: float,
):
"""Write a JUnit XML from our report data
The report file is needed for GitLab to add test reports in merge requests.
"""
import xml.etree.ElementTree as ET
root = ET.Element(
"testsuite",
name="Warp Tests",
failures=str(tests_failed),
errors=str(tests_errored),
skipped=str(tests_skipped),
tests=str(tests_run),
time=f"{test_duration:.3f}",
)
for test_data in test_records:
test = test_data[0]
test_duration = test_data[1]
test_status = test_data[2]
test_case = ET.SubElement(
root, "testcase", classname=test.__class__.__name__, name=test._testMethodName, time=f"{test_duration:.3f}"
)
if test_status == "FAIL":
failure = ET.SubElement(test_case, "failure", message=str(test_data[3]))
failure.text = str(test_data[4]) # Stacktrace
elif test_status == "ERROR":
error = ET.SubElement(test_case, "error")
error.text = str(test_data[4]) # Stacktrace
elif test_status == "SKIP":
skip = ET.SubElement(test_case, "skipped")
skip.text = str(test_data[3]) # The skip reason
tree = ET.ElementTree(root)
if hasattr(ET, "indent"):
ET.indent(root) # Pretty-printed XML output, Python 3.9 required
tree.write(outfile, encoding="utf-8", xml_declaration=True)
class ParallelJunitTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
stream = type(stream)(sys.stderr)
self.test_record = []
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
if self.showAll:
self.stream.writeln(f"{self.getDescription(test)} ...")
self.stream.flush()
self.start_time = time.perf_counter_ns()
super(unittest.TextTestResult, self).startTest(test)
def _add_helper(self, test, dots_message, show_all_message):
if self.showAll:
self.stream.writeln(f"{self.getDescription(test)} ... {show_all_message}")
elif self.dots:
self.stream.write(dots_message)
self.stream.flush()
def _record_test(self, test, code, message=None, details=None):
duration = round((time.perf_counter_ns() - self.start_time) * 1e-9, 3) # [s]
self.test_record.append((test, duration, code, message, details))
def addSuccess(self, test):
super(unittest.TextTestResult, self).addSuccess(test)
self._add_helper(test, ".", "ok")
self._record_test(test, "OK")
def addError(self, test, err):
super(unittest.TextTestResult, self).addError(test, err)
self._add_helper(test, "E", "ERROR")
self._record_test(test, "ERROR", str(err[1]), self._exc_info_to_string(err, test))
def addFailure(self, test, err):
super(unittest.TextTestResult, self).addFailure(test, err)
self._add_helper(test, "F", "FAIL")
self._record_test(test, "FAIL", str(err[1]), self._exc_info_to_string(err, test))
def addSkip(self, test, reason):
super(unittest.TextTestResult, self).addSkip(test, reason)
self._add_helper(test, "s", f"skipped {reason!r}")
self._record_test(test, "SKIP", reason)
def addExpectedFailure(self, test, err):
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
self._add_helper(test, "x", "expected failure")
self._record_test(test, "OK", "expected failure")
def addUnexpectedSuccess(self, test):
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
self._add_helper(test, "u", "unexpected success")
self._record_test(test, "FAIL", "unexpected success")
def addSubTest(self, test, subtest, err):
super(unittest.TextTestResult, self).addSubTest(test, subtest, err)
if err is not None:
self._add_helper(test, "E", "ERROR")
# err is (class, error, traceback)
self._record_test(test, "FAIL", str(err[1]), self._exc_info_to_string(err, test))
def printErrors(self):
pass
| 14,463 | Python | 32.250575 | 120 | 0.617438 |
NVIDIA/warp/warp/tests/test_closest_point_edge_edge.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
epsilon = 0.00001
@wp.kernel
def closest_point_edge_edge_kernel(
p1: wp.array(dtype=wp.vec3),
q1: wp.array(dtype=wp.vec3),
p2: wp.array(dtype=wp.vec3),
q2: wp.array(dtype=wp.vec3),
epsilon: float,
st0: wp.array(dtype=wp.vec3),
c1: wp.array(dtype=wp.vec3),
c2: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
st = wp.closest_point_edge_edge(p1[tid], q1[tid], p2[tid], q2[tid], epsilon)
s = st[0]
t = st[1]
st0[tid] = st
c1[tid] = p1[tid] + (q1[tid] - p1[tid]) * s
c2[tid] = p2[tid] + (q2[tid] - p2[tid]) * t
def closest_point_edge_edge_launch(p1, q1, p2, q2, epsilon, st0, c1, c2, device):
n = len(p1)
wp.launch(
kernel=closest_point_edge_edge_kernel,
dim=n,
inputs=[p1, q1, p2, q2, epsilon],
outputs=[st0, c1, c2],
device=device,
)
def run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device):
p1 = wp.array(p1_h, dtype=wp.vec3, device=device)
q1 = wp.array(q1_h, dtype=wp.vec3, device=device)
p2 = wp.array(p2_h, dtype=wp.vec3, device=device)
q2 = wp.array(q2_h, dtype=wp.vec3, device=device)
st0 = wp.empty_like(p1)
c1 = wp.empty_like(p1)
c2 = wp.empty_like(p1)
closest_point_edge_edge_launch(p1, q1, p2, q2, epsilon, st0, c1, c2, device)
wp.synchronize()
view = st0.numpy()
return view
def test_edge_edge_middle_crossing(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[1, 1, 0]])
p2_h = np.array([[0, 1, 0]])
q2_h = np.array([[1, 0, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.5) # s value
test.assertAlmostEqual(st0[1], 0.5) # t value
def test_edge_edge_parallel_s1_t0(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[1, 1, 0]])
p2_h = np.array([[2, 2, 0]])
q2_h = np.array([[3, 3, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 1.0) # s value
test.assertAlmostEqual(st0[1], 0.0) # t value
def test_edge_edge_parallel_s0_t1(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[1, 1, 0]])
p2_h = np.array([[-2, -2, 0]])
q2_h = np.array([[-1, -1, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.0) # s value
test.assertAlmostEqual(st0[1], 1.0) # t value
def test_edge_edge_both_degenerate_case(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[0, 0, 0]])
p2_h = np.array([[1, 1, 1]])
q2_h = np.array([[1, 1, 1]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.0) # s value
test.assertAlmostEqual(st0[1], 0.0) # t value
def test_edge_edge_degenerate_first_edge(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[0, 0, 0]])
p2_h = np.array([[0, 1, 0]])
q2_h = np.array([[1, 0, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.0) # s value
test.assertAlmostEqual(st0[1], 0.5) # t value
def test_edge_edge_degenerate_second_edge(test, device):
p1_h = np.array([[1, 0, 0]])
q1_h = np.array([[0, 1, 0]])
p2_h = np.array([[1, 1, 0]])
q2_h = np.array([[1, 1, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.5) # s value
test.assertAlmostEqual(st0[1], 0.0) # t value
def test_edge_edge_parallel(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[1, 0, 0]])
p2_h = np.array([[-0.5, 1, 0]])
q2_h = np.array([[0.5, 1, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.0) # s value
test.assertAlmostEqual(st0[1], 0.5) # t value
def test_edge_edge_perpendicular_s1_t0(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[1, 1, 0]])
p2_h = np.array([[10, 1, 0]])
q2_h = np.array([[11, 0, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 1.0) # s value
test.assertAlmostEqual(st0[1], 0.0) # t value
def test_edge_edge_perpendicular_s0_t1(test, device):
p1_h = np.array([[0, 0, 0]])
q1_h = np.array([[1, 1, 0]])
p2_h = np.array([[-11, -1, 0]])
q2_h = np.array([[-5, 0, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.0) # s value
test.assertAlmostEqual(st0[1], 1.0) # t value
devices = get_test_devices()
class TestClosestPointEdgeEdgeMethods(unittest.TestCase):
pass
add_function_test(
TestClosestPointEdgeEdgeMethods,
"test_edge_edge_middle_crossing",
test_edge_edge_middle_crossing,
devices=devices,
)
add_function_test(
TestClosestPointEdgeEdgeMethods, "test_edge_edge_parallel_s1_t0", test_edge_edge_parallel_s1_t0, devices=devices
)
add_function_test(
TestClosestPointEdgeEdgeMethods, "test_edge_edge_parallel_s0_t1", test_edge_edge_parallel_s0_t1, devices=devices
)
add_function_test(
TestClosestPointEdgeEdgeMethods,
"test_edge_edge_both_degenerate_case",
test_edge_edge_both_degenerate_case,
devices=devices,
)
add_function_test(
TestClosestPointEdgeEdgeMethods,
"test_edge_edge_degenerate_first_edge",
test_edge_edge_degenerate_first_edge,
devices=devices,
)
add_function_test(
TestClosestPointEdgeEdgeMethods,
"test_edge_edge_degenerate_second_edge",
test_edge_edge_degenerate_second_edge,
devices=devices,
)
add_function_test(TestClosestPointEdgeEdgeMethods, "test_edge_edge_parallel", test_edge_edge_parallel, devices=devices)
add_function_test(
TestClosestPointEdgeEdgeMethods,
"test_edge_edge_perpendicular_s1_t0",
test_edge_edge_perpendicular_s1_t0,
devices=devices,
)
add_function_test(
TestClosestPointEdgeEdgeMethods,
"test_edge_edge_perpendicular_s0_t1",
test_edge_edge_perpendicular_s0_t1,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,831 | Python | 28.964912 | 119 | 0.624213 |
NVIDIA/warp/warp/tests/test_sim_grad.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
import warp.sim
from warp.tests.unittest_utils import *
@wp.kernel
def evaluate_loss(
joint_q: wp.array(dtype=float),
weighting: float,
target: float,
# output
loss: wp.array(dtype=float),
):
tid = wp.tid()
# wp.atomic_add(loss, 0, weighting * (target - joint_q[tid * 2 + 1]) ** 2.0)
d = wp.abs(target - joint_q[tid * 2 + 1])
wp.atomic_add(loss, 0, weighting * d)
@wp.kernel
def assign_action(action: wp.array(dtype=float), joint_act: wp.array(dtype=float)):
tid = wp.tid()
joint_act[2 * tid] = action[tid]
def gradcheck(func, inputs, device, eps=1e-1, tol=1e-2):
"""
Checks that the gradient of the Warp kernel is correct by comparing it to the
numerical gradient computed using finite differences.
"""
def f(xs):
# call the kernel without taping for finite differences
wp_xs = [wp.array(xs[i], ndim=1, dtype=inputs[i].dtype, device=device) for i in range(len(inputs))]
output = func(*wp_xs)
return output.numpy()[0]
# compute numerical gradient
numerical_grad = []
np_xs = []
for i in range(len(inputs)):
np_xs.append(inputs[i].numpy().flatten().copy())
numerical_grad.append(np.zeros_like(np_xs[-1]))
inputs[i].requires_grad = True
for i in range(len(np_xs)):
for j in range(len(np_xs[i])):
np_xs[i][j] += eps
y1 = f(np_xs)
np_xs[i][j] -= 2 * eps
y2 = f(np_xs)
np_xs[i][j] += eps
numerical_grad[i][j] = (y1 - y2) / (2 * eps)
# compute analytical gradient
tape = wp.Tape()
with tape:
output = func(*inputs)
tape.backward(loss=output)
# compare gradients
for i in range(len(inputs)):
grad = tape.gradients[inputs[i]]
assert_np_equal(grad.numpy(), numerical_grad[i], tol=tol)
# ensure the signs match
assert np.allclose(grad.numpy() * numerical_grad[i] > 0, True)
tape.zero()
def test_box_pushing_on_rails(test, device, joint_type, integrator_type):
# Two boxes on a rail (prismatic or D6 joint), one is pushed, the other is passive.
# The absolute distance to a target is measured and gradients are compared for
# a push that is too far and too close.
num_envs = 2
num_steps = 200
sim_substeps = 2
dt = 1 / 30
target = 5.0
if integrator_type == 0:
contact_ke = 1e5
contact_kd = 1e3
else:
contact_ke = 1e5
contact_kd = 1e1
complete_builder = wp.sim.ModelBuilder()
complete_builder.default_shape_ke = contact_ke
complete_builder.default_shape_kd = contact_kd
for _ in range(num_envs):
builder = wp.sim.ModelBuilder()
builder.default_shape_ke = complete_builder.default_shape_ke
builder.default_shape_kd = complete_builder.default_shape_kd
b0 = builder.add_body(name="pusher")
builder.add_shape_box(b0, density=1000.0)
b1 = builder.add_body(name="passive")
builder.add_shape_box(b1, hx=0.4, hy=0.4, hz=0.4, density=1000.0)
if joint_type == 0:
builder.add_joint_prismatic(-1, b0)
builder.add_joint_prismatic(-1, b1)
else:
builder.add_joint_d6(-1, b0, linear_axes=[wp.sim.JointAxis((1.0, 0.0, 0.0))])
builder.add_joint_d6(-1, b1, linear_axes=[wp.sim.JointAxis((1.0, 0.0, 0.0))])
builder.joint_q[-2:] = [0.0, 1.0]
complete_builder.add_builder(builder)
assert complete_builder.body_count == 2 * num_envs
assert complete_builder.joint_count == 2 * num_envs
assert set(complete_builder.shape_collision_group) == set(range(1, num_envs + 1))
complete_builder.gravity = 0.0
model = complete_builder.finalize(device=device, requires_grad=True)
model.ground = False
model.joint_attach_ke = 32000.0 * 16
model.joint_attach_kd = 500.0 * 4
if integrator_type == 0:
integrator = wp.sim.FeatherstoneIntegrator(model, update_mass_matrix_every=num_steps * sim_substeps)
elif integrator_type == 1:
integrator = wp.sim.SemiImplicitIntegrator()
sim_substeps *= 5
else:
integrator = wp.sim.XPBDIntegrator(iterations=2, rigid_contact_relaxation=1.0)
# renderer = wp.sim.render.SimRenderer(model, "test_sim_grad.usd", scaling=1.0)
renderer = None
render_time = 0.0
def rollout(action: wp.array) -> wp.array:
nonlocal render_time
states = [model.state() for _ in range(num_steps * sim_substeps + 1)]
if not isinstance(integrator, wp.sim.FeatherstoneIntegrator):
# apply initial generalized coordinates
wp.sim.eval_fk(model, model.joint_q, model.joint_qd, None, states[0])
control_active = model.control()
control_nop = model.control()
wp.launch(
assign_action,
dim=num_envs,
inputs=[action],
outputs=[control_active.joint_act],
device=model.device,
)
i = 0
for step in range(num_steps):
wp.sim.collide(model, states[i])
control = control_active if step < 10 else control_nop
if renderer:
renderer.begin_frame(render_time)
renderer.render(states[i])
renderer.end_frame()
render_time += dt
for _ in range(sim_substeps):
integrator.simulate(model, states[i], states[i + 1], dt / sim_substeps, control)
i += 1
if not isinstance(integrator, wp.sim.FeatherstoneIntegrator):
# compute generalized coordinates
wp.sim.eval_ik(model, states[-1], states[-1].joint_q, states[-1].joint_qd)
loss = wp.zeros(1, requires_grad=True, device=device)
wp.launch(
evaluate_loss,
dim=num_envs,
inputs=[states[-1].joint_q, 1.0, target],
outputs=[loss],
device=model.device,
)
if renderer:
renderer.save()
return loss
action_too_far = wp.array(
[5000.0 for _ in range(num_envs)],
device=device,
dtype=wp.float32,
requires_grad=True,
)
tol = 1e-2
if isinstance(integrator, wp.sim.XPBDIntegrator):
# Euler, XPBD do not yield as accurate gradients, but at least the
# signs should match
tol = 0.1
gradcheck(rollout, [action_too_far], device=device, eps=0.2, tol=tol)
action_too_close = wp.array(
[1500.0 for _ in range(num_envs)],
device=device,
dtype=wp.float32,
requires_grad=True,
)
gradcheck(rollout, [action_too_close], device=device, eps=0.2, tol=tol)
devices = get_test_devices()
class TestSimGradients(unittest.TestCase):
pass
for int_type, int_name in enumerate(["featherstone", "semiimplicit"]):
for jt_type, jt_name in enumerate(["prismatic", "d6"]):
test_name = f"test_box_pushing_on_rails_{int_name}_{jt_name}"
def test_fn(self, device, jt_type=jt_type, int_type=int_type):
return test_box_pushing_on_rails(self, device, jt_type, int_type)
add_function_test(TestSimGradients, test_name, test_fn, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 7,819 | Python | 31.314049 | 108 | 0.611203 |
NVIDIA/warp/warp/tests/test_bvh.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def bvh_query_aabb(bvh_id: wp.uint64, lower: wp.vec3, upper: wp.vec3, bounds_intersected: wp.array(dtype=int)):
query = wp.bvh_query_aabb(bvh_id, lower, upper)
bounds_nr = int(0)
while wp.bvh_query_next(query, bounds_nr):
bounds_intersected[bounds_nr] = 1
@wp.kernel
def bvh_query_ray(bvh_id: wp.uint64, start: wp.vec3, dir: wp.vec3, bounds_intersected: wp.array(dtype=int)):
query = wp.bvh_query_ray(bvh_id, start, dir)
bounds_nr = int(0)
while wp.bvh_query_next(query, bounds_nr):
bounds_intersected[bounds_nr] = 1
def aabb_overlap(a_lower, a_upper, b_lower, b_upper):
if (
a_lower[0] > b_upper[0]
or a_lower[1] > b_upper[1]
or a_lower[2] > b_upper[2]
or a_upper[0] < b_lower[0]
or a_upper[1] < b_lower[1]
or a_upper[2] < b_lower[2]
):
return 0
else:
return 1
def intersect_ray_aabb(start, dir, lower, upper):
l1 = (lower[0] - start[0]) * dir[0]
l2 = (upper[0] - start[0]) * dir[0]
lmin = min(l1, l2)
lmax = max(l1, l2)
l1 = (lower[1] - start[1]) * dir[1]
l2 = (upper[1] - start[1]) * dir[1]
lmin = max(min(l1, l2), lmin)
lmax = min(max(l1, l2), lmax)
l1 = (lower[2] - start[2]) * dir[2]
l2 = (upper[2] - start[2]) * dir[2]
lmin = max(min(l1, l2), lmin)
lmax = min(max(l1, l2), lmax)
if lmax >= 0.0 and lmax >= lmin:
return 1
else:
return 0
def test_bvh(test, type, device):
rng = np.random.default_rng(123)
num_bounds = 100
lowers = rng.random(size=(num_bounds, 3)) * 5.0
uppers = lowers + rng.random(size=(num_bounds, 3)) * 5.0
device_lowers = wp.array(lowers, dtype=wp.vec3, device=device)
device_uppers = wp.array(uppers, dtype=wp.vec3, device=device)
bvh = wp.Bvh(device_lowers, device_uppers)
bounds_intersected = wp.zeros(shape=(num_bounds), dtype=int, device=device)
query_lower = wp.vec3(2.0, 2.0, 2.0)
query_upper = wp.vec3(8.0, 8.0, 8.0)
query_start = wp.vec3(0.0, 0.0, 0.0)
query_dir = wp.normalize(wp.vec3(1.0, 1.0, 1.0))
for test_case in range(2):
if type == "AABB":
wp.launch(
kernel=bvh_query_aabb,
dim=1,
inputs=[bvh.id, query_lower, query_upper, bounds_intersected],
device=device,
)
else:
wp.launch(
kernel=bvh_query_ray, dim=1, inputs=[bvh.id, query_start, query_dir, bounds_intersected], device=device
)
device_intersected = bounds_intersected.numpy()
for i in range(num_bounds):
lower = lowers[i]
upper = uppers[i]
if type == "AABB":
host_intersected = aabb_overlap(lower, upper, query_lower, query_upper)
else:
host_intersected = intersect_ray_aabb(query_start, query_dir, lower, upper)
test.assertEqual(host_intersected, device_intersected[i])
if test_case == 0:
lowers = rng.random(size=(num_bounds, 3)) * 5.0
uppers = lowers + rng.random(size=(num_bounds, 3)) * 5.0
wp.copy(device_lowers, wp.array(lowers, dtype=wp.vec3, device=device))
wp.copy(device_uppers, wp.array(uppers, dtype=wp.vec3, device=device))
bvh.refit()
bounds_intersected.zero_()
def test_bvh_query_aabb(test, device):
test_bvh(test, "AABB", device)
def test_bvh_query_ray(test, device):
test_bvh(test, "ray", device)
devices = get_test_devices()
class TestBvh(unittest.TestCase):
def test_bvh_codegen_adjoints_with_select(self):
def kernel_fn(bvh: wp.uint64):
v = wp.vec3(0.0, 0.0, 0.0)
bounds_nr = int(0)
if True:
query_1 = wp.bvh_query_aabb(bvh, v, v)
query_2 = wp.bvh_query_ray(bvh, v, v)
wp.bvh_query_next(query_1, bounds_nr)
wp.bvh_query_next(query_2, bounds_nr)
else:
query_1 = wp.bvh_query_aabb(bvh, v, v)
query_2 = wp.bvh_query_ray(bvh, v, v)
wp.bvh_query_next(query_1, bounds_nr)
wp.bvh_query_next(query_2, bounds_nr)
wp.Kernel(func=kernel_fn)
add_function_test(TestBvh, "test_bvh_aabb", test_bvh_query_aabb, devices=devices)
add_function_test(TestBvh, "test_bvh_ray", test_bvh_query_ray, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,050 | Python | 29.98773 | 119 | 0.590099 |
NVIDIA/warp/warp/thirdparty/dlpack.py | import ctypes
_c_str_dltensor = b"dltensor"
class DLDeviceType(ctypes.c_int):
"""The enum that encodes the type of the device where
DLTensor memory is allocated.
"""
kDLCPU = 1
kDLCUDA = 2
kDLCUDAHost = 3
kDLOpenCL = 4
kDLVulkan = 7
kDLMetal = 8
kDLVPI = 9
kDLROCM = 10
kDLROCMHost = 11
kDLCUDAManaged = 13
kDLOneAPI = 14
def __str__(self):
return {
self.kDLCPU: "CPU",
self.kDLCUDA: "CUDA",
self.kDLCUDAHost: "CUDAHost",
self.kDLOpenCL: "OpenCL",
self.kDLVulkan: "Vulkan",
self.kDLMetal: "Metal",
self.kDLVPI: "VPI",
self.kDLROCM: "ROCM",
self.kDLROCMHost: "ROMCHost",
self.kDLCUDAManaged: "CUDAManaged",
self.kDLOneAPI: "oneAPI",
}[self.value]
class DLDevice(ctypes.Structure):
"""Represents the device where DLTensor memory is allocated.
The device is represented by the pair of fields:
device_type: DLDeviceType
device_id: c_int
"""
_fields_ = [
("device_type", DLDeviceType),
("device_id", ctypes.c_int),
]
class DLDataTypeCode(ctypes.c_uint8):
"""An integer that encodes the category of DLTensor elements' data type."""
kDLInt = 0
kDLUInt = 1
kDLFloat = 2
kDLOpaquePointer = 3
kDLBfloat = 4
kDLComplex = 5
def __str__(self):
return {
self.kDLInt: "int",
self.kDLUInt: "uint",
self.kDLFloat: "float",
self.kDLBfloat: "bfloat",
self.kDLComplex: "complex",
self.kDLOpaquePointer: "void_p",
}[self.value]
class DLDataType(ctypes.Structure):
"""Descriptor of data type for elements of DLTensor.
The data type is described by a triple, `DLDataType.type_code`,
`DLDataType.bits`, and `DLDataType.lanes`.
The element is understood as packed `lanes` repetitions of
elements from `type_code` data-category of width `bits`.
"""
_fields_ = [
("type_code", DLDataTypeCode),
("bits", ctypes.c_uint8),
("lanes", ctypes.c_uint16),
]
TYPE_MAP = {
"bool": (DLDataTypeCode.kDLUInt, 1, 1),
"int8": (DLDataTypeCode.kDLInt, 8, 1),
"int16": (DLDataTypeCode.kDLInt, 16, 1),
"int32": (DLDataTypeCode.kDLInt, 32, 1),
"int64": (DLDataTypeCode.kDLInt, 64, 1),
"uint8": (DLDataTypeCode.kDLUInt, 8, 1),
"uint16": (DLDataTypeCode.kDLUInt, 16, 1),
"uint32": (DLDataTypeCode.kDLUInt, 32, 1),
"uint64": (DLDataTypeCode.kDLUInt, 64, 1),
"float16": (DLDataTypeCode.kDLFloat, 16, 1),
"float32": (DLDataTypeCode.kDLFloat, 32, 1),
"float64": (DLDataTypeCode.kDLFloat, 64, 1),
"complex64": (DLDataTypeCode.kDLComplex, 64, 1),
"complex128": (DLDataTypeCode.kDLComplex, 128, 1),
}
class DLTensor(ctypes.Structure):
"""Structure describing strided layout of DLTensor.
Fields are:
data: void pointer
device: DLDevice
ndim: number of indices needed to reference an
element of the tensor
dtype: data type descriptor
shape: tuple with lengths of the corresponding
tensor dimensions
strides: tuple of numbers of array elements to
step in each dimension when traversing
the tensor
byte_offset: data + byte_offset gives the address of
tensor element with index (0,) * ndim
"""
_fields_ = [
("data", ctypes.c_void_p),
("device", DLDevice),
("ndim", ctypes.c_int),
("dtype", DLDataType),
("shape", ctypes.POINTER(ctypes.c_int64)),
("strides", ctypes.POINTER(ctypes.c_int64)),
("byte_offset", ctypes.c_uint64),
]
class DLManagedTensor(ctypes.Structure):
"""Structure storing the pointer to the tensor descriptor,
deleter callable for the tensor descriptor, and pointer to
some additional data. These are stored in fields `dl_tensor`,
`deleter`, and `manager_ctx`."""
_fields_ = [
("dl_tensor", DLTensor),
("manager_ctx", ctypes.c_void_p),
("deleter", ctypes.CFUNCTYPE(None, ctypes.c_void_p)),
]
| 4,273 | Python | 28.680555 | 79 | 0.586707 |
NVIDIA/warp/warp/thirdparty/appdirs.py | # -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <https://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version__ = "1.4.4"
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith("java"):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
system = "win32"
elif os_name.startswith("Mac"): # "Mac OS X", etc.
system = "darwin"
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = "linux2"
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = "CSIDL_APPDATA" if roaming else "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == "darwin":
path = os.path.expanduser("~/Library/Application Support/")
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == "darwin":
path = os.path.expanduser("/Library/Application Support")
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv("XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"]))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: ~/Library/Preferences/<AppName>
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system == "win32":
path = user_data_dir(appname, appauthor, None, roaming)
elif system == "darwin":
path = os.path.expanduser("~/Library/Preferences/")
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
elif system == "darwin":
path = os.path.expanduser("/Library/Preferences")
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == "darwin":
path = os.path.expanduser("~/Library/Caches")
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None, roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor, version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor, version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor, version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor, version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor, version=self.version)
# ---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros("c", buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros("c", buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
def _get_win_folder_from_environ(csidl_name):
env_var_name = {
"CSIDL_APPDATA": "APPDATA",
"CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
"CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
}[csidl_name]
return os.environ[env_var_name]
if system == "win32":
try:
from ctypes import windll
except ImportError:
try:
import com.sun.jna
except ImportError:
try:
if PY3:
import winreg as _winreg
else:
import _winreg
except ImportError:
_get_win_folder = _get_win_folder_from_environ
else:
_get_win_folder = _get_win_folder_from_registry
else:
_get_win_folder = _get_win_folder_with_jna
else:
_get_win_folder = _get_win_folder_with_ctypes
# ---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = (
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir",
)
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| 24,253 | Python | 39.490818 | 122 | 0.624005 |
NVIDIA/warp/warp/thirdparty/unittest_parallel.py | # Licensed under the MIT License
# https://github.com/craigahobbs/unittest-parallel/blob/main/LICENSE
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
unittest-parallel command-line script main module
"""
import argparse
import concurrent.futures # NVIDIA Modification
import multiprocessing
import os
import sys
import tempfile
import time
import unittest
from contextlib import contextmanager
from io import StringIO
import warp.tests.unittest_suites # NVIDIA Modification
from warp.tests.unittest_utils import ( # NVIDIA modification
ParallelJunitTestResult,
write_junit_results,
)
try:
import coverage
COVERAGE_AVAILABLE = True # NVIDIA Modification
except ImportError:
COVERAGE_AVAILABLE = False # NVIDIA Modification
# The following variables are NVIDIA Modifications
START_DIRECTORY = os.path.dirname(__file__) # The directory to start test discovery
def main(argv=None):
"""
unittest-parallel command-line script main entry point
"""
# Command line arguments
parser = argparse.ArgumentParser(
prog="unittest-parallel",
# NVIDIA Modifications follow:
formatter_class=argparse.RawTextHelpFormatter,
epilog="""Example usage:
python -m warp.tests -s autodetect -p 'test_a*.py'
python -m warp.tests -s kit
python -m warp.tests -k 'mgpu' -k 'cuda'
""",
)
# parser.add_argument("-v", "--verbose", action="store_const", const=2, default=1, help="Verbose output")
parser.add_argument("-q", "--quiet", dest="verbose", action="store_const", const=0, default=2, help="Quiet output")
parser.add_argument("-f", "--failfast", action="store_true", default=False, help="Stop on first fail or error")
parser.add_argument(
"-b", "--buffer", action="store_true", default=False, help="Buffer stdout and stderr during tests"
)
parser.add_argument(
"-k",
dest="testNamePatterns",
action="append",
type=_convert_select_pattern,
help="Only run tests which match the given substring",
)
parser.add_argument(
"-p",
"--pattern",
metavar="PATTERN",
default="test*.py",
help="'autodetect' suite only: Pattern to match tests ('test*.py' default)", # NVIDIA Modification
)
parser.add_argument(
"-t",
"--top-level-directory",
metavar="TOP",
help="Top level directory of project (defaults to start directory)",
)
parser.add_argument(
"--junit-report-xml", metavar="FILE", help="Generate JUnit report format XML file"
) # NVIDIA Modification
parser.add_argument(
"-s",
"--suite",
type=str,
default="default",
choices=["autodetect", "default", "kit"],
help="Name of the test suite to run (default is 'default').",
) # NVIDIA Modification
group_parallel = parser.add_argument_group("parallelization options")
group_parallel.add_argument(
"-j",
"--jobs",
metavar="COUNT",
type=int,
default=0,
help="The number of test processes (default is 0, all cores)",
)
group_parallel.add_argument(
"-m",
"--maxjobs",
metavar="MAXCOUNT",
type=int,
default=8,
help="The maximum number of test processes (default is 8)",
) # NVIDIA Modification
group_parallel.add_argument(
"--level",
choices=["module", "class", "test"],
default="class",
help="Set the test parallelism level (default is 'class')",
)
group_parallel.add_argument(
"--disable-process-pooling",
action="store_true",
default=False,
help="Do not reuse processes used to run test suites",
)
group_parallel.add_argument(
"--disable-concurrent-futures",
action="store_true",
default=False,
help="Use multiprocessing instead of concurrent.futures.",
) # NVIDIA Modification
group_parallel.add_argument(
"--serial-fallback",
action="store_true",
default=False,
help="Run in a single-process (no spawning) mode without multiprocessing or concurrent.futures.",
) # NVIDIA Modification
group_coverage = parser.add_argument_group("coverage options")
group_coverage.add_argument("--coverage", action="store_true", help="Run tests with coverage")
group_coverage.add_argument("--coverage-branch", action="store_true", help="Run tests with branch coverage")
group_coverage.add_argument(
"--coverage-html",
metavar="DIR",
help="Generate coverage HTML report",
default=os.path.join(START_DIRECTORY, "..", "..", "htmlcov"),
)
group_coverage.add_argument("--coverage-xml", metavar="FILE", help="Generate coverage XML report")
group_coverage.add_argument(
"--coverage-fail-under", metavar="MIN", type=float, help="Fail if coverage percentage under min"
)
group_warp = parser.add_argument_group("NVIDIA Warp options") # NVIDIA Modification
group_warp.add_argument(
"--no-shared-cache", action="store_true", help="Use a separate kernel cache per test process."
)
args = parser.parse_args(args=argv)
if args.coverage_branch:
args.coverage = args.coverage_branch
if args.coverage and not COVERAGE_AVAILABLE:
parser.exit(
status=2, message="--coverage was used, but coverage was not found. Is it installed?\n"
) # NVIDIA Modification
process_count = max(0, args.jobs)
if process_count == 0:
process_count = multiprocessing.cpu_count()
process_count = min(process_count, args.maxjobs) # NVIDIA Modification
import warp as wp # NVIDIA Modification
# Clear the Warp cache (NVIDIA Modification)
wp.build.clear_kernel_cache()
print("Cleared Warp kernel cache")
# Create the temporary directory (for coverage files)
with tempfile.TemporaryDirectory() as temp_dir:
# Discover tests
with _coverage(args, temp_dir):
test_loader = unittest.TestLoader()
if args.testNamePatterns:
test_loader.testNamePatterns = args.testNamePatterns
auto_discover_suite = warp.tests.unittest_suites.auto_discover_suite(
test_loader, args.pattern
) # NVIDIA Modification
# NVIDIA Modification
if args.suite != "autodetect":
# Print notices for test classes missing from the suite when compared to auto-discovered tests
discover_suite = warp.tests.unittest_suites.compare_unittest_suites(
test_loader, args.suite, auto_discover_suite
)
else:
discover_suite = auto_discover_suite
# Get the parallelizable test suites
if args.level == "test":
test_suites = list(_iter_test_cases(discover_suite))
elif args.level == "class":
test_suites = list(_iter_class_suites(discover_suite))
else: # args.level == 'module'
test_suites = list(_iter_module_suites(discover_suite))
# Don't use more processes than test suites
process_count = max(1, min(len(test_suites), process_count))
if not args.serial_fallback:
# Report test suites and processes
print(
f"Running {len(test_suites)} test suites ({discover_suite.countTestCases()} total tests) across {process_count} processes",
file=sys.stderr,
)
if args.verbose > 1:
print(file=sys.stderr)
# Create the shared index object used in Warp caches (NVIDIA Modification)
manager = multiprocessing.Manager()
shared_index = manager.Value("i", -1)
# Run the tests in parallel
start_time = time.perf_counter()
if args.disable_concurrent_futures:
multiprocessing_context = multiprocessing.get_context(method="spawn")
maxtasksperchild = 1 if args.disable_process_pooling else None
with multiprocessing_context.Pool(
process_count,
maxtasksperchild=maxtasksperchild,
initializer=initialize_test_process,
initargs=(manager.Lock(), shared_index, args, temp_dir),
) as pool:
test_manager = ParallelTestManager(manager, args, temp_dir)
results = pool.map(test_manager.run_tests, test_suites)
else:
# NVIDIA Modification added concurrent.futures
with concurrent.futures.ProcessPoolExecutor(
max_workers=process_count,
mp_context=multiprocessing.get_context(method="spawn"),
initializer=initialize_test_process,
initargs=(manager.Lock(), shared_index, args, temp_dir),
) as executor:
test_manager = ParallelTestManager(manager, args, temp_dir)
results = list(executor.map(test_manager.run_tests, test_suites, timeout=7200))
else:
# This entire path is an NVIDIA Modification
# Report test suites and processes
print(f"Running {discover_suite.countTestCases()} total tests (serial fallback)", file=sys.stderr)
if args.verbose > 1:
print(file=sys.stderr)
# Run the tests in serial
start_time = time.perf_counter()
with multiprocessing.Manager() as manager:
test_manager = ParallelTestManager(manager, args, temp_dir)
results = [test_manager.run_tests(discover_suite)]
stop_time = time.perf_counter()
test_duration = stop_time - start_time
# Aggregate parallel test run results
tests_run = 0
errors = []
failures = []
skipped = 0
expected_failures = 0
unexpected_successes = 0
test_records = [] # NVIDIA Modification
for result in results:
tests_run += result[0]
errors.extend(result[1])
failures.extend(result[2])
skipped += result[3]
expected_failures += result[4]
unexpected_successes += result[5]
test_records += result[6] # NVIDIA Modification
is_success = not (errors or failures or unexpected_successes)
# Compute test info
infos = []
if failures:
infos.append(f"failures={len(failures)}")
if errors:
infos.append(f"errors={len(errors)}")
if skipped:
infos.append(f"skipped={skipped}")
if expected_failures:
infos.append(f"expected failures={expected_failures}")
if unexpected_successes:
infos.append(f"unexpected successes={unexpected_successes}")
# Report test errors
if errors or failures:
print(file=sys.stderr)
for error in errors:
print(error, file=sys.stderr)
for failure in failures:
print(failure, file=sys.stderr)
elif args.verbose > 0:
print(file=sys.stderr)
# Test report
print(unittest.TextTestResult.separator2, file=sys.stderr)
print(f'Ran {tests_run} {"tests" if tests_run > 1 else "test"} in {test_duration:.3f}s', file=sys.stderr)
print(file=sys.stderr)
print(f'{"OK" if is_success else "FAILED"}{" (" + ", ".join(infos) + ")" if infos else ""}', file=sys.stderr)
if test_records and args.junit_report_xml:
# NVIDIA modification to report results in Junit XML format
write_junit_results(
args.junit_report_xml,
test_records,
tests_run,
len(failures) + unexpected_successes,
len(errors),
skipped,
test_duration,
)
# Return an error status on failure
if not is_success:
parser.exit(status=len(errors) + len(failures) + unexpected_successes)
# Coverage?
if args.coverage:
# Combine the coverage files
cov_options = {}
cov_options["config_file"] = True # Grab configuration from pyproject.toml (must install coverage[toml])
cov = coverage.Coverage(**cov_options)
cov.combine(data_paths=[os.path.join(temp_dir, x) for x in os.listdir(temp_dir)])
# Coverage report
print(file=sys.stderr)
percent_covered = cov.report(ignore_errors=True, file=sys.stderr)
print(f"Total coverage is {percent_covered:.2f}%", file=sys.stderr)
# HTML coverage report
if args.coverage_html:
cov.html_report(directory=args.coverage_html, ignore_errors=True)
# XML coverage report
if args.coverage_xml:
cov.xml_report(outfile=args.coverage_xml, ignore_errors=True)
# Fail under
if args.coverage_fail_under and percent_covered < args.coverage_fail_under:
parser.exit(status=2)
def _convert_select_pattern(pattern):
if "*" not in pattern:
return f"*{pattern}*"
return pattern
@contextmanager
def _coverage(args, temp_dir):
# Running tests with coverage?
if args.coverage:
# Generate a random coverage data file name - file is deleted along with containing directory
with tempfile.NamedTemporaryFile(dir=temp_dir, delete=False) as coverage_file:
pass
# Create the coverage object
cov_options = {
"branch": args.coverage_branch,
"data_file": coverage_file.name,
# NVIDIA Modification removed unneeded options
}
cov_options["config_file"] = True # Grab configuration from pyproject.toml (must install coverage[toml])
cov = coverage.Coverage(**cov_options)
try:
# Start measuring code coverage
cov.start()
# Yield for unit test running
yield cov
finally:
# Stop measuring code coverage
cov.stop()
# Save the collected coverage data to the data file
cov.save()
else:
# Not running tests with coverage - yield for unit test running
yield None
# Iterate module-level test suites - all top-level test suites returned from TestLoader.discover
def _iter_module_suites(test_suite):
for module_suite in test_suite:
if module_suite.countTestCases():
yield module_suite
# Iterate class-level test suites - test suites that contains test cases
def _iter_class_suites(test_suite):
has_cases = any(isinstance(suite, unittest.TestCase) for suite in test_suite)
if has_cases:
yield test_suite
else:
for suite in test_suite:
yield from _iter_class_suites(suite)
# Iterate test cases (methods)
def _iter_test_cases(test_suite):
if isinstance(test_suite, unittest.TestCase):
yield test_suite
else:
for suite in test_suite:
yield from _iter_test_cases(suite)
class ParallelTestManager:
def __init__(self, manager, args, temp_dir):
self.args = args
self.temp_dir = temp_dir
self.failfast = manager.Event()
def run_tests(self, test_suite):
# Fail fast?
if self.failfast.is_set():
return [0, [], [], 0, 0, 0, []] # NVIDIA Modification
# NVIDIA Modification for GitLab
import warp.tests.unittest_utils
warp.tests.unittest_utils.coverage_enabled = self.args.coverage
warp.tests.unittest_utils.coverage_temp_dir = self.temp_dir
warp.tests.unittest_utils.coverage_branch = self.args.coverage_branch
if self.args.junit_report_xml:
resultclass = ParallelJunitTestResult
else:
resultclass = ParallelTextTestResult
# Run unit tests
with _coverage(self.args, self.temp_dir):
runner = unittest.TextTestRunner(
stream=StringIO(),
resultclass=resultclass, # NVIDIA Modification
verbosity=self.args.verbose,
failfast=self.args.failfast,
buffer=self.args.buffer,
)
result = runner.run(test_suite)
# Set failfast, if necessary
if result.shouldStop:
self.failfast.set()
# Return (test_count, errors, failures, skipped_count, expected_failure_count, unexpected_success_count)
return (
result.testsRun,
[self._format_error(result, error) for error in result.errors],
[self._format_error(result, failure) for failure in result.failures],
len(result.skipped),
len(result.expectedFailures),
len(result.unexpectedSuccesses),
result.test_record, # NVIDIA modification
)
@staticmethod
def _format_error(result, error):
return "\n".join(
[
unittest.TextTestResult.separator1,
result.getDescription(error[0]),
unittest.TextTestResult.separator2,
error[1],
]
)
class ParallelTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
stream = type(stream)(sys.stderr)
super().__init__(stream, descriptions, verbosity)
self.test_record = [] # NVIDIA modification
def startTest(self, test):
if self.showAll:
self.stream.writeln(f"{self.getDescription(test)} ...")
self.stream.flush()
super(unittest.TextTestResult, self).startTest(test)
def _add_helper(self, test, dots_message, show_all_message):
if self.showAll:
self.stream.writeln(f"{self.getDescription(test)} ... {show_all_message}")
elif self.dots:
self.stream.write(dots_message)
self.stream.flush()
def addSuccess(self, test):
super(unittest.TextTestResult, self).addSuccess(test)
self._add_helper(test, ".", "ok")
def addError(self, test, err):
super(unittest.TextTestResult, self).addError(test, err)
self._add_helper(test, "E", "ERROR")
def addFailure(self, test, err):
super(unittest.TextTestResult, self).addFailure(test, err)
self._add_helper(test, "F", "FAIL")
def addSkip(self, test, reason):
super(unittest.TextTestResult, self).addSkip(test, reason)
self._add_helper(test, "s", f"skipped {reason!r}")
def addExpectedFailure(self, test, err):
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
self._add_helper(test, "x", "expected failure")
def addUnexpectedSuccess(self, test):
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
self._add_helper(test, "u", "unexpected success")
def printErrors(self):
pass
def initialize_test_process(lock, shared_index, args, temp_dir):
"""Necessary operations to be executed at the start of every test process.
Currently this function can be used to set a separate Warp cache. (NVIDIA modification)
If the environment variable `WARP_CACHE_ROOT` is detected, the cache will be placed in the provided path.
It also ensures that Warp is initialized prior to running any tests.
"""
with lock:
shared_index.value += 1
worker_index = shared_index.value
with _coverage(args, temp_dir):
import warp as wp
if args.no_shared_cache:
from warp.thirdparty import appdirs
if "WARP_CACHE_ROOT" in os.environ:
cache_root_dir = os.path.join(os.getenv("WARP_CACHE_ROOT"), f"{wp.config.version}-{worker_index:03d}")
else:
cache_root_dir = appdirs.user_cache_dir(
appname="warp", appauthor="NVIDIA", version=f"{wp.config.version}-{worker_index:03d}"
)
wp.config.kernel_cache_dir = cache_root_dir
wp.build.clear_kernel_cache()
else:
# Initialize Warp is if hasn't been initialized already
wp.init()
if __name__ == "__main__": # pragma: no cover
main()
| 20,998 | Python | 36.232269 | 139 | 0.611439 |
NVIDIA/warp/docs/faq.rst | FAQ
===
How does Warp relate to other Python projects for GPU programming, e.g.: Numba, Taichi, cuPy, PyTorch, etc.?
------------------------------------------------------------------------------------------------------------
Warp is inspired by many of these projects, and is closely related to
Numba and Taichi which both expose kernel programming to Python. These
frameworks map to traditional GPU programming models, so many of the
high-level concepts are similar, however there are some functionality
and implementation differences.
Compared to Numba, Warp supports a smaller subset of Python, but
offering auto-differentiation of kernel programs, which is useful for
machine learning. Compared to Taichi Warp uses C++/CUDA as an
intermediate representation, which makes it convenient to implement and
expose low-level routines. In addition, we are building in
data structures to support geometry processing (meshes, sparse volumes,
point clouds, USD data) as first-class citizens that are not exposed in
other runtimes.
Warp does not offer a full tensor-based programming model like PyTorch
and JAX, but is designed to work well with these frameworks through data
sharing mechanisms like ``__cuda_array_interface__``. For computations
that map well to tensors (e.g.: neural-network inference) it makes sense
to use these existing tools. For problems with a lot of e.g.: sparsity,
conditional logic, heterogeneous workloads (like the ones we often find in
simulation and graphics), then the kernel-based programming model like
the one in Warp are often more convenient since users have control over
individual threads.
Does Warp support all of the Python language?
---------------------------------------------
No, Warp supports a subset of Python that maps well to the GPU. Our goal
is to not have any performance cliffs so that users can expect
consistently good behavior from kernels that is close to native code.
Examples of unsupported concepts that don’t map well to the GPU are
dynamic types, list comprehensions, exceptions, garbage collection, etc.
When should I call ``wp.synchronize()``?
----------------------------------------
One of the common sources of confusion for new users is when calls to
``wp.synchronize()`` are necessary. The answer is “almost never”!
Synchronization is quite expensive, and should generally be avoided
unless necessary. Warp naturally takes care of synchronization between
operations (e.g.: kernel launches, device memory copies).
For example, the following requires no manual synchronization, as the
conversion to NumPy will automatically synchronize:
.. code:: python
# run some kernels
wp.launch(kernel_1, dim, [array_x, array_y], device="cuda")
wp.launch(kernel_2, dim, [array_y, array_z], device="cuda")
# bring data back to host (and implicitly synchronize)
x = array_z.numpy()
The *only* case where manual synchronization is needed is when copies
are being performed back to CPU asynchronously, e.g.:
.. code:: python
# copy data back to cpu from gpu, all copies will happen asynchronously to Python
wp.copy(cpu_array_1, gpu_array_1)
wp.copy(cpu_array_2, gpu_array_2)
wp.copy(cpu_array_3, gpu_array_3)
# ensure that the copies have finished
wp.synchronize()
# return a numpy wrapper around the cpu arrays, note there is no implicit synchronization here
a1 = cpu_array_1.numpy()
a2 = cpu_array_2.numpy()
a3 = cpu_array_3.numpy()
For more information about asynchronous operations, please refer to the :doc:`concurrency documentation <modules/concurrency>`
and :ref:`synchronization guidance <synchronization_guidance>`.
What happens when you differentiate a function like ``wp.abs(x)``?
------------------------------------------------------------------
Non-smooth functions such as ``y=|x|`` do not have a single unique
gradient at ``x=0``, rather they have what is known as a
``subgradient``, which is formally the convex hull of directional
derivatives at that point. The way that Warp (and most
auto-differentiation frameworks) handles these points is to pick an
arbitrary gradient from this set, e.g.: for ``wp.abs()``, it will
arbitrarily choose the gradient to be 1.0 at the origin. You can find
the implementation for these functions in ``warp/native/builtin.h``.
Most optimizers (particularly ones that exploit stochasticity), are not
sensitive to the choice of which gradient to use from the subgradient,
although there are exceptions.
Does Warp support multi-GPU programming?
----------------------------------------
Yes! Since version ``0.4.0`` we support allocating, launching, and
copying between multiple GPUs in a single process. We follow the naming
conventions of PyTorch and use aliases such as ``cuda:0``, ``cuda:1``,
``cpu`` to identify individual devices.
Should I switch to Warp over IsaacGym/PhysX?
----------------------------------------------
Warp is not a replacement for IsaacGym, IsaacSim, or PhysX - while Warp
does offer some physical simulation capabilities this is primarily aimed
at developers who need differentiable physics, rather than a fully
featured physics engine. Warp is also integrated with IsaacGym and is
great for performing auxiliary tasks such as reward and observation
computations for reinforcement learning.
Why aren't assignments to Warp arrays aren'supported outside of kernels?
------------------------------------------------------------------------
For best performance, reading and writing data that is living on the GPU can
only be performed inside Warp CUDA kernels. Otherwise individual element accesses
such as ``array[i] = 1.0`` in Python scope would require prohibitively slow device
synchronization and copies.
We recommend to either initialize Warp arrays from other native arrays
(e.g.: Python list, NumPy array, ...) or by launching a kernel to set its values.
For the common use case of wanting to fill an array with a given value, we
also support the following forms:
- ``wp.full(8, 1.23, dtype=float)``: initializes a new array of 8 float values set
to ``1.23``.
- ``arr.fill_(1.23)``: sets the content of an existing float array to ``1.23``.
- ``arr[:4].fill(1.23)``: sets the four first values of an existing float array to ``1.23``.
| 6,254 | reStructuredText | 45.333333 | 126 | 0.7181 |
NVIDIA/warp/docs/installation.rst | Installation
============
Python version 3.9 or newer is recommended. Warp can run on x86-64 and ARMv8 CPUs on Windows, Linux, and macOS. GPU support requires a CUDA-capable NVIDIA GPU and driver (minimum GeForce GTX 9xx).
The easiest way to install Warp is from `PyPI <https://pypi.org/project/warp-lang>`_:
.. code-block:: sh
$ pip install warp-lang
Pre-built binary packages are also available on the `Releases <https://github.com/NVIDIA/warp/releases>`_ page.
To install in your local Python environment extract the archive and run the following command from the root directory:
.. code-block:: sh
$ pip install .
Dependencies
------------
Warp supports Python versions 3.7 onwards, with 3.9 or newer recommended for full functionality. Note that :ref:`some optional dependencies may not support the latest version of Python<conda>`.
`NumPy <https://numpy.org>`_ must be installed.
The following optional dependencies are required to support certain features:
* `usd-core <https://pypi.org/project/usd-core>`_: Required for some Warp examples, ``warp.sim.parse_usd()``, and ``warp.render.UsdRenderer``.
* `JAX <https://jax.readthedocs.io/en/latest/installation.html>`_: Required for JAX interoperability (see :ref:`jax-interop`).
* `PyTorch <https://pytorch.org/get-started/locally/>`_: Required for PyTorch interoperability (see :ref:`pytorch-interop`).
* `NVTX for Python <https://github.com/NVIDIA/NVTX#python>`_: Required to use :class:`wp.ScopedTimer(use_nvtx=True) <warp.ScopedTimer>`.
Building the Warp documentation requires:
* `Sphinx <https://www.sphinx-doc.org>`_
* `Furo <https://github.com/pradyunsg/furo>`_
* `Sphinx-copybutton <https://sphinx-copybutton.readthedocs.io/en/latest/index.html>`_
Building from source
--------------------
For developers who want to build the library themselves the following tools are required:
* Microsoft Visual Studio (Windows), minimum version 2019
* GCC (Linux), minimum version 9.4
* `CUDA Toolkit <https://developer.nvidia.com/cuda-toolkit>`_, minimum version 11.5
* `Git Large File Storage <https://git-lfs.com>`_
After cloning the repository, users should run:
.. code-block:: console
$ python build_lib.py
This will generate the ``warp.dll`` / ``warp.so`` core library respectively. It
will search for the CUDA Toolkit in the default install directory. This path can
be overridden by setting the ``CUDA_PATH`` environment variable. Alternatively,
the path to the CUDA Toolkit can be passed to the build command as
``--cuda_path="..."``. After building, the Warp package should be installed using:
.. code-block:: console
$ pip install -e .
Which ensures that subsequent modifications to the library will be
reflected in the Python package.
.. _conda:
Conda environments
------------------
Some modules, such as ``usd-core``, don't support the latest Python version.
To manage running Warp and other projects on different Python versions one can
make use of an environment management system such as
`Conda <https://docs.conda.io/>`__.
**WARNING:** When building and running Warp in a different environment, make sure
the build environment has the same C++ runtime library version, or an older
one, than the execution environment. Otherwise Warp's shared libraries may end
up looking for a newer runtime library version than the one available in the
execution environment. For example on Linux this error could occur:
``OSError: <...>/libstdc++.so.6: version `GLIBCXX_3.4.30' not found (required by <...>/warp/warp/bin/warp.so)``
This can be solved by installing a newer C++ runtime version in the runtime
conda environment using ``conda install -c conda-forge libstdcxx-ng=12.1`` or
newer. Or, the build environment's C++ toolchain can be downgraded using
``conda install -c conda-forge libstdcxx-ng=8.5``. Or, one can ``activate`` or
``deactivate`` conda environments as needed for building vs. running Warp.
| 3,915 | reStructuredText | 41.565217 | 196 | 0.739208 |
NVIDIA/warp/docs/basics.rst | Basics
======
.. currentmodule:: warp
Initialization
--------------
When calling a Warp function like `wp.launch()` for the first time,
Warp will initialize itself and, as a result, will print some startup information
about the compute devices available, driver versions, and the location for any
generated kernel code, e.g.:
.. code:: bat
Warp 1.0.0 initialized:
CUDA Toolkit: 11.8, Driver: 12.1
Devices:
"cpu" | AMD64 Family 25 Model 33 Stepping 0, AuthenticAMD
"cuda:0" | NVIDIA GeForce RTX 4080 (sm_89)
Kernel cache: C:\Users\mmacklin\AppData\Local\NVIDIA\warp\Cache\1.0.0
It's also possible to explicitly initialize Warp with the ``wp.init()`` method as such::
import warp as wp
wp.init()
Kernels
-------
In Warp, compute kernels are defined as Python functions and annotated with the ``@wp.kernel`` decorator, as follows::
@wp.kernel
def simple_kernel(a: wp.array(dtype=wp.vec3),
b: wp.array(dtype=wp.vec3),
c: wp.array(dtype=float)):
# get thread index
tid = wp.tid()
# load two vec3s
x = a[tid]
y = b[tid]
# compute the dot product between vectors
r = wp.dot(x, y)
# write result back to memory
c[tid] = r
Because Warp kernels are compiled to native C++/CUDA code, all the function input arguments must be statically typed. This allows
Warp to generate fast code that executes at essentially native speeds. Because kernels may be run on either the CPU
or GPU, they cannot access arbitrary global state from the Python environment. Instead they must read and write data
through their input parameters such as arrays.
Warp kernels functions have a one-to-one correspondence with CUDA kernels.
To launch a kernel with 1024 threads, we use :func:`wp.launch() <warp.launch>`
as follows::
wp.launch(kernel=simple_kernel, # kernel to launch
dim=1024, # number of threads
inputs=[a, b, c], # parameters
device="cuda") # execution device
Inside the kernel, we retrieve the *thread index* of the each thread using the :func:`wp.tid() <tid>` built-in function::
# get thread index
i = wp.tid()
Kernels can be launched with 1D, 2D, 3D, or 4D grids of threads.
To launch a 2D grid of threads to process a 1024x1024 image, we could write::
wp.launch(kernel=compute_image, dim=(1024, 1024), inputs=[img], device="cuda")
We retrieve a 2D thread index inside the kernel as follows:
.. code-block:: python
@wp.kernel
def compute_image(pixel_data: wp.array2d(dtype=wp.vec3)):
# get thread index
i, j = wp.tid()
Arrays
------
Memory allocations are exposed via the ``wp.array`` type. Arrays wrap an underlying memory allocation that may live in
either host (CPU), or device (GPU) memory. Arrays are strongly typed and store a linear sequence of built-in values
(``float``, ``int``, ``vec3``, ``matrix33``, etc).
Arrays can be allocated similar to PyTorch::
# allocate an uninitialized array of vec3s
v = wp.empty(shape=n, dtype=wp.vec3, device="cuda")
# allocate a zero-initialized array of quaternions
q = wp.zeros(shape=n, dtype=wp.quat, device="cuda")
# allocate and initialize an array from a NumPy array
# will be automatically transferred to the specified device
a = np.ones((10, 3), dtype=np.float32)
v = wp.from_numpy(a, dtype=wp.vec3, device="cuda")
By default, Warp arrays that are initialized from external data (e.g.: NumPy, Lists, Tuples) will create a copy the data to new memory for the
device specified. However, it is possible for arrays to alias external memory using the ``copy=False`` parameter to the
array constructor provided the input is contiguous and on the same device. See the :doc:`/modules/interoperability`
section for more details on sharing memory with external frameworks.
To read GPU array data back to CPU memory we can use :func:`array.numpy`::
# bring data from device back to host
view = device_array.numpy()
This will automatically synchronize with the GPU to ensure that any outstanding work has finished, and will
copy the array back to CPU memory where it is passed to NumPy.
Calling :func:`array.numpy` on a CPU array will return a zero-copy NumPy view
onto the Warp data.
Please see the :ref:`Arrays Reference <Arrays>` for more details.
User Functions
--------------
Users can write their own functions using the ``@wp.func`` decorator, for example::
@wp.func
def square(x: float):
return x*x
Kernels can call user functions defined in the same module or defined in a different module.
As the example shows, return type hints for user functions are **optional**.
Anything that can be done in a Warp kernel can also be done in a user function **with the exception**
of :func:`wp.tid() <tid>`. The thread index can be passed in through the arguments of a user function if it is required.
Functions can accept arrays and structs as inputs:
.. code-block:: python
@wp.func
def lookup(foos: wp.array(dtype=wp.uint32), index: int):
return foos[index]
Functions may also return multiple values:
.. code-block:: python
@wp.func
def multi_valued_func(a: wp.float32, b: wp.float32):
return a + b, a - b, a * b, a / b
@wp.kernel
def test_multi_valued_kernel(test_data1: wp.array(dtype=wp.float32), test_data2: wp.array(dtype=wp.float32)):
tid = wp.tid()
d1, d2 = test_data1[tid], test_data2[tid]
a, b, c, d = multi_valued_func(d1, d2)
User functions may also be overloaded by defining multiple function signatures with the same function name:
.. code-block:: python
@wp.func
def custom(x: int):
return x + 1
@wp.func
def custom(x: float):
return x + 1.0
@wp.func
def custom(x: wp.vec3):
return x + wp.vec3(1.0, 0.0, 0.0)
See :ref:`Generic Functions` for details on using ``typing.Any`` in user function signatures.
See :doc:`modules/differentiability` for details on how to define custom gradient functions,
custom replay functions, and custom native functions.
User Structs
--------------
Users can define their own structures using the ``@wp.struct`` decorator, for example::
@wp.struct
class MyStruct:
pos: wp.vec3
vel: wp.vec3
active: int
indices: wp.array(dtype=int)
Structs may be used as a ``dtype`` for ``wp.arrays``, and may be passed to kernels directly as arguments,
please see :ref:`Structs Reference <Structs>` for more details.
.. note::
As with kernel parameters, all attributes of a struct must have valid type hints at class definition time.
Compilation Model
-----------------
Warp uses a Python->C++/CUDA compilation model that generates kernel code from Python function definitions.
All kernels belonging to a Python module are runtime compiled into dynamic libraries and PTX.
The result is then cached between application restarts for fast startup times.
Note that compilation is triggered on the first kernel launch for that module.
Any kernels registered in the module with ``@wp.kernel`` will be included in the shared library.
.. image:: ./img/compiler_pipeline.svg
Language Details
----------------
To support GPU computation and differentiability, there are some differences from the CPython runtime.
Built-in Types
^^^^^^^^^^^^^^
Warp supports a number of built-in math types similar to high-level shading languages,
e.g. ``vec2, vec3, vec4, mat22, mat33, mat44, quat, array``.
All built-in types have value semantics so that expressions such as ``a = b``
generate a copy of the variable ``b`` rather than a reference.
Strong Typing
^^^^^^^^^^^^^
Unlike Python, in Warp all variables must be typed.
Types are inferred from source expressions and function signatures using the Python typing extensions.
All kernel parameters must be annotated with the appropriate type, for example::
@wp.kernel
def simple_kernel(a: wp.array(dtype=vec3),
b: wp.array(dtype=vec3),
c: float):
Tuple initialization is not supported, instead variables should be explicitly typed::
# invalid
a = (1.0, 2.0, 3.0)
# valid
a = wp.vec3(1.0, 2.0, 3.0)
Limitations and Unsupported Features
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
See :doc:`limitations` for a list of Warp limitations and unsupported features.
| 8,483 | reStructuredText | 32.011673 | 142 | 0.685489 |
NVIDIA/warp/docs/limitations.rst | Limitations
===========
.. currentmodule:: warp
This section summarizes various limitations and currently unsupported features in Warp.
Problems, questions, and feature requests can be opened on `GitHub Issues <https://github.com/NVIDIA/warp/issues>`_.
Unsupported Features
--------------------
To achieve good performance on GPUs some dynamic language features are not supported:
* Lambda functions
* List comprehensions
* Exceptions
* Recursion
* Runtime evaluation of expressions, e.g.: eval()
* Dynamic structures such as lists, sets, dictionaries, etc.
Kernels and User Functions
--------------------------
* Strings cannot be passed into kernels.
* Short-circuit evaluation is not supported
* :func:`wp.atomic_add() <atomic_add>` does not support ``wp.int64``.
* :func:`wp.tid() <tid>` cannot be called from user functions.
* Modifying the value of a :class:`wp.constant() <constant>` during runtime will not trigger
recompilation of the affected kernels if the modules have already been loaded
(e.g. through a :func:`wp.launch() <launch>` or a ``wp.load_module()``).
* A :class:`wp.constant() <constant>` can suffer precision loss if used with ``wp.float64``
as it is initially assigned to a ``wp.float32`` variable in the generated code.
A limitation of Warp is that each dimension of the grid used to launch a kernel must be representable as a 32-bit
signed integer. Therefore, no single dimension of a grid should exceed :math:`2^{31}-1`.
Warp also currently uses a fixed block size of 256 (CUDA) threads per block.
By default, Warp will try to process one element from the Warp grid in one CUDA thread.
This is not always possible for kernels launched with multi-dimensional grid bounds, as there are
`hardware limitations <https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications-technical-specifications-per-compute-capability>`_
on CUDA block dimensions.
Warp will automatically fallback to using
`grid-stride loops <https://developer.nvidia.com/blog/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/>`_ when
it is not possible for a CUDA thread to process only one element from the Warp grid
When this happens, some CUDA threads may process more than one element from the Warp grid.
Users can also set the ``max_blocks`` parameter to fine-tune the grid-striding behavior of kernels, even for kernels that are otherwise
able to process one Warp-grid element per CUDA thread.
Arrays
------
* Arrays can have a maximum of four dimensions.
* Each dimension of a Warp array cannot be greater than the maximum value representable by a 32-bit signed integer,
:math:`2^{31}-1`.
* There are currently no data types that support complex numbers.
Structs
-------
* Structs cannot have generic members, i.e. of type ``typing.Any``.
Volumes
-------
* The sparse-volume *topology* cannot be changed after the tiles for the :class:`Volume` have been allocated.
Multiple Processes
------------------
* A CUDA context created in the parent process cannot be used in a *forked* child process.
Use the spawn start method instead, or avoid creating CUDA contexts in the parent process.
* There can be issues with using same user kernel cache directory when running with multiple processes.
A workaround is to use a separate cache directory for every process.
See the :ref:`Configuration` section for how the cache directory may be changed.
Scalar Math Functions
---------------------
This section details some limitations and differences from CPython semantics for scalar math functions.
Modulus Operator
""""""""""""""""
Deviation from Python behavior can occur when the modulus operator (``%``) is used with a negative dividend or divisor
(also see :func:`wp.mod() <mod>`).
The behavior of the modulus operator in a Warp kernel follows that of C++11: The sign of the result follows the sign of
*dividend*. In Python, the sign of the result follows the sign of the *divisor*:
.. code-block:: python
@wp.kernel
def modulus_test():
# Kernel-scope behavior:
a = -3 % 2 # a is -1
b = 3 % -2 # b is 1
c = 3 % 0 # Undefined behavior
# Python-scope behavior:
a = -3 % 2 # a is 1
b = 3 % -2 # b is -1
c = 3 % 0 # ZeroDivisionError
Power Operator
""""""""""""""
The power operator (``**``) in Warp kernels only works on floating-point numbers (also see :func:`wp.pow <pow>`).
In Python, the power operator can also be used on integers.
Inverse Sine and Cosine
"""""""""""""""""""""""
:func:`wp.asin() <asin>` and :func:`wp.acos() <acos>` automatically clamp the input to fall in the range [-1, 1].
In Python, using :external+python:py:func:`math.asin` or :external+python:py:func:`math.acos`
with an input outside [-1, 1] raises a ``ValueError`` exception.
Rounding
""""""""
:func:`wp.round() <round>` rounds halfway cases away from zero, but Python's
:external+python:py:func:`round` rounds halfway cases to the nearest even
choice (Banker's rounding). Use :func:`wp.rint() <rint>` when Banker's rounding is
desired. Unlike Python, the return type in Warp of both of these rounding
functions is the same type as the input:
.. code-block:: python
@wp.kernel
def halfway_rounding_test():
# Kernel-scope behavior:
a = wp.round(0.5) # a is 1.0
b = wp.rint(0.5) # b is 0.0
c = wp.round(1.5) # c is 2.0
d = wp.rint(1.5) # d is 2.0
# Python-scope behavior:
a = round(0.5) # a is 0
c = round(1.5) # c is 2
| 5,507 | reStructuredText | 38.625899 | 176 | 0.706374 |
NVIDIA/warp/docs/profiling.rst | Profiling
=========
ScopedTimer
-----------
``wp.ScopedTimer`` objects can be used to gain some basic insight into the performance of Warp applications:
.. code:: python
@wp.kernel
def inc_loop(a: wp.array(dtype=float), num_iters: int):
i = wp.tid()
for j in range(num_iters):
a[i] += 1.0
n = 10_000_000
devices = wp.get_cuda_devices()
# pre-allocate host arrays for readback
host_arrays = [
wp.empty(n, dtype=float, device="cpu", pinned=True) for _ in devices
]
# code for profiling
with wp.ScopedTimer("Demo"):
for i, device in enumerate(devices):
a = wp.zeros(n, dtype=float, device=device)
wp.launch(inc_loop, dim=n, inputs=[a, 500], device=device)
wp.launch(inc_loop, dim=n, inputs=[a, 1000], device=device)
wp.launch(inc_loop, dim=n, inputs=[a, 1500], device=device)
wp.copy(host_arrays[i], a)
The only required argument for the ``ScopedTimer`` constructor is a string label, which can be used to distinguish multiple timed code sections when reading the output. The snippet above will print a message like this:
.. code:: console
Demo took 0.52 ms
By default, ``ScopedTimer`` measures the elapsed time on the CPU and does not introduce any CUDA synchronization. Since most CUDA operations are asynchronous, the result does not include the time spent executing kernels and memory transfers on the CUDA device. It's still a useful measurement, because it shows how long it took to schedule the CUDA operations on the CPU.
To get the total amount of time including the device executions time, create the ``ScopedTimer`` with the ``synchronize=True`` flag. This is equivalent to calling ``wp.synchronize()`` before and after the timed section of code. Synchronizing at the beginning ensures that all prior CUDA work has completed prior to starting the timer. Synchronizing at the end ensures that all timed work finishes before stopping the timer. With the example above, the result might look like this:
.. code:: console
Demo took 4.91 ms
The timing values will vary slightly from run to run and will depend on the system hardware and current load. The sample results presented here were obtained on a system with one RTX 4090 GPU, one RTX 3090 GPU, and an AMD Ryzen Threadripper Pro 5965WX CPU. For each GPU, the code allocates and initializes an array with 10 million floating point elements. It then launches the ``inc_loop`` kernel three times on the array. The kernel increments each array element a given number of times - 500, 1000, and 1500. Finally, the code copies the array contents to the CPU.
Profiling complex programs with many asynchronous and concurrent operations can be tricky. Profiling tools like `NVIDIA Nsight Systems <https://developer.nvidia.com/nsight-systems>`_ can present the results in a visual way and capture a plethora of timing information for deeper study. For profiling tools capable of visualizing NVTX ranges, ``ScopedTimer`` can be created with the ``use_nvtx=True`` argument. This will mark the CPU execution range on the timeline for easier visual inspection. The color can be customized using the ``color`` argument, as shown below:
.. code:: python
with wp.ScopedTimer("Demo", use_nvtx=True, color="yellow"):
...
To use NVTX integration, you will need to install the `NVIDIA NVTX Python package <https://github.com/NVIDIA/NVTX/tree/release-v3/python>`_.
.. code::
pip install nvtx
The package allows you to insert custom NVTX ranges into your code (``nvtx.annotate``) and customize the `colors <https://github.com/NVIDIA/NVTX/blob/release-v3/python/nvtx/colors.py>`_.
Here is what the demo code looks like in Nsight Systems (click to enlarge the image):
.. image:: ./img/profiling_nosync.png
:width: 95%
:align: center
There are a few noteworthy observations we can make from this capture. Scheduling and launching the work on the CPU takes about half a millisecond, as shown in the `NVTX / Start & End` row. This time also includes the allocation of arrays on both CUDA devices. We can see that the execution on each device is asynchronous with respect to the host, since CUDA operations start running before the yellow `Demo` NVTX range finishes. We can also see that the operations on different CUDA devices execute concurrently, including kernels and memory transfers. The kernels run faster on the first CUDA device (RTX 4090) than the second device (RTX 3090). Memory transfers take about the same time on each device. Using pinned CPU arrays for the transfer destinations allows the transfers to run asynchronously without involving the CPU.
Check out the :doc:`concurrency documentation <modules/concurrency>` for more information about asynchronous operations.
Note that synchronization was not enabled in this run, so the NVTX range only spans the CPU operations used to schedule the CUDA work. When synchronization is enabled, the timer will wait for all CUDA work to complete, so the NVTX range will span the synchronization of both devices:
.. code:: python
with wp.ScopedTimer("Demo", use_nvtx=True, color="yellow", synchronize=True):
...
.. image:: ./img/profiling_sync.png
:width: 95%
:align: center
CUDA Activity Profiling
-----------------------
``ScopedTimer`` supports timing individual CUDA activities like kernels and memory operations. This is done by measuring the time taken between :ref:`CUDA events <cuda_events>` on the device. To get information about CUDA activities, pass the ``cuda_filter`` argument to the ``ScopedTimer`` constructor. The ``cuda_filter`` can be a bitwise combination of the following values:
.. list-table:: CUDA profiling flags
:widths: 25 50
:header-rows: 0
* - ``wp.TIMING_KERNELS``
- Warp kernels (this includes all kernels written in Python as ``@wp.kernel``)
* - ``wp.TIMING_KERNELS_BUILTIN``
- Builtin kernels (this includes kernels used by the Warp library under the hood)
* - ``wp.TIMING_MEMCPY``
- CUDA memory transfers (host-to-device, device-to-host, device-to-device, and peer-to-peer)
* - ``wp.TIMING_MEMSET``
- CUDA memset operations (e.g., zeroing out memory in ``wp.zeros()``)
* - ``wp.TIMING_GRAPH``
- CUDA graph launches
* - ``wp.TIMING_ALL``
- Combines all of the above for convenience.
When a non-zero ``cuda_filter`` is specified, Warp will inject CUDA events for timing purposes and report the results when the ``ScopeTimer`` finishes. This adds some overhead to the code, so should be used only during profiling.
CUDA event timing resolution is about 0.5 microseconds. The reported execution time of short operations will likely be longer than the operations actually took on the device. This is due to the timing resolution and the overhead of added instrumentation code. For more precise analysis of short operations, a tool like Nsight Systems can report more accurate data.
Enabling CUDA profiling with the demo code can be done like this:
.. code:: python
with wp.ScopedTimer("Demo", cuda_filter=wp.TIMING_ALL):
...
This adds additional information to the output:
.. code::
CUDA timeline:
----------------+---------+------------------------
Time | Device | Activity
----------------+---------+------------------------
0.021504 ms | cuda:0 | memset
0.163840 ms | cuda:0 | forward kernel inc_loop
0.306176 ms | cuda:0 | forward kernel inc_loop
0.451584 ms | cuda:0 | forward kernel inc_loop
2.455520 ms | cuda:0 | memcpy DtoH
0.051200 ms | cuda:1 | memset
0.374784 ms | cuda:1 | forward kernel inc_loop
0.707584 ms | cuda:1 | forward kernel inc_loop
1.042432 ms | cuda:1 | forward kernel inc_loop
2.136096 ms | cuda:1 | memcpy DtoH
CUDA activity summary:
----------------+---------+------------------------
Total time | Count | Activity
----------------+---------+------------------------
0.072704 ms | 2 | memset
3.046400 ms | 6 | forward kernel inc_loop
4.591616 ms | 2 | memcpy DtoH
CUDA device summary:
----------------+---------+------------------------
Total time | Count | Device
----------------+---------+------------------------
3.398624 ms | 5 | cuda:0
4.312096 ms | 5 | cuda:1
Demo took 0.92 ms
The first section is the `CUDA timeline`, which lists all captured activities in issue order. We see a `memset` on device ``cuda:0``, which corresponds to clearing the memory in ``wp.zeros()``. This is followed by three launches of the ``inc_loop`` kernel on ``cuda:0`` and a memory transfer from device to host issued by ``wp.copy()``. The remaining entries repeat similar operations on device ``cuda:1``.
The next section is the `CUDA activity summary`, which reports the cumulative time taken by each activity type. Here, the `memsets`, kernel launches, and memory transfer operations are grouped together. This is a good way to see where time is being spent overall. The `memsets` are quite fast. The ``inc_loop`` kernel launches took about three milliseconds of combined GPU time. The memory transfers took the longest, over four milliseconds.
The `CUDA device summary` shows the total time taken per device. We see that device ``cuda:0`` took about 3.4 ms to complete the tasks and device ``cuda:1`` took about 4.3 ms. This summary can be used to asses the workload distribution in multi-GPU applications.
The final line shows the time taken by the CPU, as with the default ``ScopedTimer`` options (without synchronization in this case).
Customizing the output
~~~~~~~~~~~~~~~~~~~~~~
It is possible to customize how the activity timing results are reported. The function :func:`warp.timing_print` is used by default. To use a different reporting function, pass it as the ``report_func`` argument to ``ScopedTimer``. The custom report function should take a list of :class:`warp.TimingResult` objects as the first argument. Each result in the list corresponds to a single activity and the list represents the complete recorded timeline. By manually traversing the list, you can customize the formatting of the output, apply custom sorting rules, and aggregate the results as desired. The second argument is a string indent that should be printed at the beginning of each line. This is for compatibility with ``ScopedTimer`` indenting rules used with nested timers.
Here is an example of a custom reporting function, which aggregates the total time spend in forward and backward kernels:
.. code:: python
def print_custom_report(results, indent=""):
forward_time = 0
backward_time = 0
for r in results:
# aggregate all forward kernels
if r.name.startswith("forward kernel"):
forward_time += r.elapsed
# aggregate all backward kernels
elif r.name.startswith("backward kernel"):
backward_time += r.elapsed
print(f"{indent}Forward kernels : {forward_time:.6f} ms")
print(f"{indent}Backward kernels : {backward_time:.6f} ms")
Let's apply it to one of the Warp examples:
.. code:: python
from warp.examples.optim.example_cloth_throw import Example
example = Example(None)
example.use_graph = False # disable graphs so we get timings for individual kernels
with wp.ScopedTimer("Example", cuda_filter=wp.TIMING_KERNEL, report_func=print_custom_report):
for iteration in range(5):
example.step()
This produces a report like this:
.. code::
Forward kernels : 187.098367 ms
Backward kernels : 245.070177 ms
Using the activity timing functions directly
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is also possible to capture activity timings without using the ``ScopedTimer`` at all. Simply call :func:`warp.timing_begin` to start recording activity timings and :func:`warp.timing_end` to stop and get a list of recorded activities. You can use :func:`warp.timing_print` to print the default activity report or generate your own report from the list of results.
.. code:: python
wp.timing_begin(cuda_filter=wp.TIMING_ALL)
...
results = wp.timing_end()
wp.timing_print(results)
Limitations
~~~~~~~~~~~
Currently, detailed activity timing is only available for CUDA devices, but support for CPU timing may be added in the future.
The activity profiling only records activities initiated using the Warp API. It does not capture CUDA activity initiated by other frameworks. A profiling tool like Nsight Systems can be used to examine whole program activities.
Using CUDA Events
-----------------
CUDA events can be used for timing purposes outside of the ``ScopedTimer``. Here is an example:
.. code:: python
with wp.ScopedDevice("cuda:0") as device:
# ensure the module is loaded
wp.load_module(device=device)
# create events with enabled timing
e1 = wp.Event(enable_timing=True)
e2 = wp.Event(enable_timing=True)
n = 10000000
# start timing...
wp.record_event(e1)
a = wp.zeros(n, dtype=float)
wp.launch(inc, dim=n, inputs=[a])
# ...end timing
wp.record_event(e2)
# get elapsed time between the two events
elapsed = wp.get_event_elapsed_time(e1, e2)
print(elapsed)
The events must be created with the flag ``enable_timing=True``. The first event is recorded at the start of the timed code and the second event is recorded at the end. The function :func:`warp.get_event_elapsed_time()` is used to compute the time difference between the two events. We must ensure that both events have completed on the device before calling :func:`warp.get_event_elapsed_time()`. By default, this function will synchronize on the second event using :func:`warp.synchronize_event()`. If that is not desired, the user may pass the ``synchronize=False`` flag and must use some other means of ensuring that both events have completed prior to calling the function.
Note that timing very short operations may yield inflated results, due to the timing resolution of CUDA events and the overhead of the profiling code. In most cases, CUDA activity profiling with ``ScopedTimer`` will have less overhead and better precision. For the most accurate results, a profiling tool such as Nsight Systems should be used. The main benefit of using the manual event timing API is that it allows timing arbitrary sections of code rather than individual activities.
Profiling API Reference
-----------------------
.. autoclass:: warp.ScopedTimer
.. autofunction:: warp.get_event_elapsed_time
.. autofunction:: warp.synchronize_event
.. autoclass:: warp.TimingResult
.. autofunction:: warp.timing_begin
.. autofunction:: warp.timing_end
.. autofunction:: warp.timing_print
| 15,040 | reStructuredText | 53.104316 | 836 | 0.697074 |
NVIDIA/warp/docs/debugging.rst | Debugging
=========
Printing Values
---------------
Often one of the best debugging methods is to simply print values from kernels. Warp supports printing all built-in
types using the ``print()`` function, e.g.::
v = wp.vec3(1.0, 2.0, 3.0)
print(v)
In addition, formatted C-style printing is available through the ``wp.printf()`` function, e.g.::
x = 1.0
i = 2
wp.printf("A float value %f, an int value: %d", x, i)
.. note:: Formatted printing is only available for scalar types (e.g.: ``int`` and ``float``) not vector types.
Printing Launches
-----------------
For complex applications it can be difficult to understand the order-of-operations that lead to a bug. To help diagnose
these issues Warp supports a simple option to print out all launches and arguments to the console::
wp.config.print_launches = True
Step-Through Debugging
----------------------
It is possible to attach IDE debuggers such as Visual Studio to Warp processes to step through generated kernel code.
Users should first compile the kernels in debug mode by setting::
wp.config.mode = "debug"
This setting ensures that line numbers, and debug symbols are generated correctly. After launching the Python process,
the debugger should be attached, and a breakpoint inserted into the generated code.
.. note:: Generated kernel code is not a 1:1 correspondence with the original Python code, but individual operations can still be replayed and variables inspected.
Also see :github:`warp/tests/walkthrough_debug.py` for an example of how to debug Warp kernel code running on the CPU.
Generated Code
--------------
Occasionally it can be useful to inspect the generated code for debugging or profiling.
The generated code for kernels is stored in a central cache location in the user's home directory, the cache location
is printed at startup when ``wp.init()`` is called, for example:
.. code-block:: console
Warp 0.8.1 initialized:
CUDA Toolkit: 11.8, Driver: 11.8
Devices:
"cpu" | AMD64 Family 25 Model 33 Stepping 0, AuthenticAMD
"cuda:0" | NVIDIA GeForce RTX 3090 (sm_86)
"cuda:1" | NVIDIA GeForce RTX 2080 Ti (sm_75)
Kernel cache: C:\Users\LukasW\AppData\Local\NVIDIA Corporation\warp\Cache\0.8.1
The kernel cache has folders beginning with ``wp_`` that contain the generated C++/CUDA code and the compiled binaries
for each module that was compiled at runtime.
The name of each folder ends with a hexadecimal hash constructed from the module contents to avoid potential
conflicts when using multiple processes and to support the caching of runtime-defined kernels.
Bounds Checking
---------------
Warp will perform bounds checking in debug build configurations to ensure that all array accesses lie within the defined
shape.
CUDA Verification
-----------------
It is possible to generate out-of-bounds memory access violations through poorly formed kernel code or inputs. In this
case the CUDA runtime will detect the violation and put the CUDA context into an error state. Subsequent kernel launches
may silently fail which can lead to hard to diagnose issues.
If a CUDA error is suspected a simple verification method is to enable::
wp.config.verify_cuda = True
This setting will check the CUDA context after every operation to ensure that it is still valid. If an error is
encountered it will raise an exception that often helps to narrow down the problematic kernel.
.. note:: Verifying CUDA state at each launch requires synchronizing CPU and GPU which has a significant overhead. Users should ensure this setting is only used during debugging.
| 3,652 | reStructuredText | 39.588888 | 178 | 0.736857 |
NVIDIA/warp/docs/index.rst | NVIDIA Warp Documentation
=========================
Warp is a Python framework for writing high-performance simulation and graphics code. Warp takes
regular Python functions and JIT compiles them to efficient kernel code that can run on the CPU or GPU.
Warp is designed for spatial computing and comes with a rich set of primitives that make it easy to write
programs for physics simulation, perception, robotics, and geometry processing. In addition, Warp kernels
are differentiable and can be used as part of machine-learning pipelines with frameworks such as PyTorch and JAX.
Below are some examples of simulations implemented using Warp:
.. image:: ./img/header.jpg
Quickstart
----------
Python version 3.9 or newer is recommended. Warp can run on x86-64 and ARMv8 CPUs on Windows, Linux, and macOS. GPU support requires a CUDA-capable NVIDIA GPU and driver (minimum GeForce GTX 9xx).
The easiest way to install Warp is from `PyPI <https://pypi.org/project/warp-lang>`_:
.. code-block:: sh
$ pip install warp-lang
Pre-built binary packages are also available on the `Releases <https://github.com/NVIDIA/warp/releases>`_ page.
To install in your local Python environment extract the archive and run the following command from the root directory:
.. code-block:: sh
$ pip install .
Basic Example
-------------
An example first program that computes the lengths of random 3D vectors is given below::
import warp as wp
import numpy as np
num_points = 1024
@wp.kernel
def length(points: wp.array(dtype=wp.vec3),
lengths: wp.array(dtype=float)):
# thread index
tid = wp.tid()
# compute distance of each point from origin
lengths[tid] = wp.length(points[tid])
# allocate an array of 3d points
points = wp.array(np.random.rand(num_points, 3), dtype=wp.vec3)
lengths = wp.zeros(num_points, dtype=float)
# launch kernel
wp.launch(kernel=length,
dim=len(points),
inputs=[points, lengths])
print(lengths)
Additional Examples
-------------------
The `examples <https://github.com/NVIDIA/warp/tree/main/warp/examples>`_ directory in
the Github repository contains a number of scripts that show how to
implement different simulation methods using the Warp API. Most examples
will generate USD files containing time-sampled animations in the
same directory as the example. Before running examples users should
ensure that the ``usd-core`` package is installed using::
pip install usd-core
Examples can be run from the command-line as follows::
python -m warp.examples.<example_subdir>.<example>
Most examples can be run on either the CPU or a CUDA-capable device, but a handful require a CUDA-capable device. These are marked at the top of the example script.
USD files can be viewed or rendered inside NVIDIA
`Omniverse <https://developer.nvidia.com/omniverse>`_,
Pixar's UsdView, and Blender. Note that Preview in macOS is not
recommended as it has limited support for time-sampled animations.
Built-in unit tests can be run from the command-line as follows::
python -m warp.tests
examples/core
^^^^^^^^^^^^^
.. list-table::
:class: gallery
* - .. image:: ./img/examples/core_dem.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_dem.py
- .. image:: ./img/examples/core_fluid.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_fluid.py
- .. image:: ./img/examples/core_graph_capture.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_graph_capture.py
- .. image:: ./img/examples/core_marching_cubes.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_marching_cubes.py
* - dem
- fluid
- graph capture
- marching cubes
* - .. image:: ./img/examples/core_mesh.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_mesh.py
- .. image:: ./img/examples/core_nvdb.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_nvdb.py
- .. image:: ./img/examples/core_raycast.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_raycast.py
- .. image:: ./img/examples/core_raymarch.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_raymarch.py
* - mesh
- nvdb
- raycast
- raymarch
* - .. image:: ./img/examples/core_sph.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_sph.py
- .. image:: ./img/examples/core_torch.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_torch.py
- .. image:: ./img/examples/core_wave.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/core/example_wave.py
-
* - sph
- torch
- wave
-
examples/fem
^^^^^^^^^^^^
.. list-table::
:class: gallery
* - .. image:: ./img/examples/fem_apic_fluid.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_apic_fluid.py
- .. image:: ./img/examples/fem_convection_diffusion.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_convection_diffusion.py
- .. image:: ./img/examples/fem_diffusion_3d.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_diffusion_3d.py
- .. image:: ./img/examples/fem_diffusion.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_diffusion.py
* - apic fluid
- convection diffusion
- diffusion 3d
- diffusion
* - .. image:: ./img/examples/fem_mixed_elasticity.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_mixed_elasticity.py
- .. image:: ./img/examples/fem_navier_stokes.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_navier_stokes.py
- .. image:: ./img/examples/fem_stokes_transfer.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_stokes_transfer.py
- .. image:: ./img/examples/fem_stokes.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/fem/example_stokes.py
* - mixed elasticity
- navier stokes
- stokes transfer
- stokes
examples/optim
^^^^^^^^^^^^^^
.. list-table::
:class: gallery
* - .. image:: ./img/examples/optim_bounce.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_bounce.py
- .. image:: ./img/examples/optim_cloth_throw.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_cloth_throw.py
- .. image:: ./img/examples/optim_diffray.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_diffray.py
- .. image:: ./img/examples/optim_drone.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_drone.py
* - bounce
- cloth throw
- diffray
- drone
* - .. image:: ./img/examples/optim_inverse_kinematics.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_inverse_kinematics.py
- .. image:: ./img/examples/optim_spring_cage.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_spring_cage.py
- .. image:: ./img/examples/optim_trajectory.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_trajectory.py
- .. image:: ./img/examples/optim_walker.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/optim/example_walker.py
* - inverse kinematics
- spring cage
- trajectory
- walker
examples/sim
^^^^^^^^^^^^
.. list-table::
:class: gallery
* - .. image:: ./img/examples/sim_cartpole.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_cartpole.py
- .. image:: ./img/examples/sim_cloth.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_cloth.py
- .. image:: ./img/examples/sim_granular.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_granular.py
- .. image:: ./img/examples/sim_granular_collision_sdf.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_granular_collision_sdf.py
* - cartpole
- cloth
- granular
- granular collision sdf
* - .. image:: ./img/examples/sim_jacobian_ik.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_jacobian_ik.py
- .. image:: ./img/examples/sim_quadruped.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_quadruped.py
- .. image:: ./img/examples/sim_rigid_chain.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_rigid_chain.py
- .. image:: ./img/examples/sim_rigid_contact.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_rigid_contact.py
* - jacobian ik
- quadruped
- rigid chain
- rigid contact
* - .. image:: ./img/examples/sim_rigid_force.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_rigid_force.py
- .. image:: ./img/examples/sim_rigid_gyroscopic.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_rigid_gyroscopic.py
- .. image:: ./img/examples/sim_rigid_soft_contact.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_rigid_soft_contact.py
- .. image:: ./img/examples/sim_soft_body.png
:target: https://github.com/NVIDIA/warp/tree/main/warp/examples/sim/example_soft_body.py
* - rigid force
- rigid gyroscopic
- rigid soft contact
- soft body
Omniverse
---------
Omniverse extensions for Warp are available in the extension registry inside
Omniverse Kit or USD Composer.
The ``omni.warp.core`` extension installs Warp into the Omniverse Application's
Python environment, which allows users to import the module in their scripts and nodes.
The ``omni.warp`` extension provides a collection of OmniGraph nodes and sample
scenes demonstrating uses of Warp in OmniGraph.
Enabling the ``omni.warp`` extension automatically enables the ``omni.warp.core`` extension.
Please see the
`Omniverse Warp Documentation <https://docs.omniverse.nvidia.com/extensions/latest/ext_warp.html>`_
for more details on how to use Warp in Omniverse.
Learn More
----------
Please see the following resources for additional background on Warp:
- `Product Page <https://developer.nvidia.com/warp-python>`_
- `GTC 2022
Presentation <https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41599>`_
- `GTC 2021
Presentation <https://www.nvidia.com/en-us/on-demand/session/gtcspring21-s31838>`_
- `SIGGRAPH Asia 2021 Differentiable Simulation
Course <https://dl.acm.org/doi/abs/10.1145/3476117.3483433>`_
- `GTC 2024 Presentation <https://www.nvidia.com/en-us/on-demand/session/gtc24-s63345>`_
The underlying technology in Warp has been used in a number of research
projects at NVIDIA including the following publications:
- Accelerated Policy Learning with Parallel Differentiable Simulation -
Xu, J., Makoviychuk, V., Narang, Y., Ramos, F., Matusik, W., Garg,
A., & Macklin, M.
`(2022) <https://short-horizon-actor-critic.github.io>`__
- DiSECt: Differentiable Simulator for Robotic Cutting - Heiden, E.,
Macklin, M., Narang, Y., Fox, D., Garg, A., & Ramos, F
`(2021) <https://github.com/NVlabs/DiSECt>`__
- gradSim: Differentiable Simulation for System Identification and
Visuomotor Control - Murthy, J. Krishna, Miles Macklin, Florian
Golemo, Vikram Voleti, Linda Petrini, Martin Weiss, Breandan
Considine et
al. `(2021) <https://gradsim.github.io>`__
Support
-------
Problems, questions, and feature requests can be opened on
`GitHub Issues <https://github.com/NVIDIA/warp/issues>`_.
The Warp team also monitors the **#warp** channel on the public
`Omniverse Discord <https://discord.com/invite/nvidiaomniverse>`_ server, come chat to us!
Versioning
----------
Versions take the format X.Y.Z, similar to `Python itself <https://devguide.python.org/developer-workflow/development-cycle/#devcycle>`_:
* Increments in X are reserved for major reworks of the project causing disruptive incompatibility (or reaching the 1.0 milestone).
* Increments in Y are for regular releases with a new set of features.
* Increments in Z are for bug fixes. In principle there are no new features. Can be omitted if 0 or not relevant.
This is similar to `Semantic Versioning <https://semver.org/>`_ but less strict around backward compatibility.
Like with Python, some breaking changes can be present between minor versions if well documented and gradually introduced.
Note that prior to 0.11.0 this schema was not strictly adhered to.
License
-------
Warp is provided under the NVIDIA Software License, please see
`LICENSE.md <https://github.com/NVIDIA/warp/blob/main/LICENSE.md>`_ for the full license text.
Contributing
------------
Contributions and pull requests from the community are welcome and are taken under the
terms described in the **9. Feedback** section of the `license <https://github.com/NVIDIA/warp/blob/main/LICENSE.md>`_.
`CONTRIBUTING.md <https://github.com/NVIDIA/warp/blob/main/CONTRIBUTING.md>`_. provides additional information on
how to open a pull request for Warp.
Citing
------
If you use Warp in your research please use the following citation:
.. code:: bibtex
@misc{warp2022,
title= {Warp: A High-performance Python Framework for GPU Simulation and Graphics},
author = {Miles Macklin},
month = {March},
year = {2022},
note= {NVIDIA GPU Technology Conference (GTC)},
howpublished = {\url{https://github.com/nvidia/warp}}
}
Full Table of Contents
----------------------
.. toctree::
:maxdepth: 2
:caption: User's Guide
installation
basics
modules/devices
modules/differentiability
modules/generics
modules/interoperability
configuration
debugging
limitations
faq
.. toctree::
:maxdepth: 2
:caption: Advanced Topics
modules/allocators
modules/concurrency
profiling
.. toctree::
:maxdepth: 2
:caption: Core Reference
modules/runtime
modules/functions
.. toctree::
:maxdepth: 2
:caption: Simulation Reference
modules/sim
modules/sparse
modules/fem
modules/render
.. toctree::
:hidden:
:caption: Project Links
GitHub <https://github.com/NVIDIA/warp>
PyPI <https://pypi.org/project/warp-lang>
Discord <https://discord.com/channels/827959428476174346/953756751977648148>
:ref:`Full Index <genindex>`
| 15,243 | reStructuredText | 38.187661 | 196 | 0.685954 |
NVIDIA/warp/docs/configuration.rst | .. _Configuration:
Configuration
=============
Warp has settings at the global, module, and kernel level that can be used to fine-tune the compilation and verbosity
of Warp programs. In cases in which a setting can be changed at multiple levels (e.g.: ``enable_backward``),
the setting at the more-specific scope takes precedence.
Global Settings
---------------
To change a setting, prepend ``wp.config.`` to the name of the variable and assign a value to it.
Some settings may be changed on the fly, while others need to be set prior to calling ``wp.init()`` to take effect.
For example, the location of the user kernel cache can be changed with:
.. code-block:: python
import os
import warp as wp
example_dir = os.path.dirname(os.path.realpath(__file__))
# set default cache directory before wp.init()
wp.config.kernel_cache_dir = os.path.join(example_dir, "tmp", "warpcache1")
wp.init()
Basic Global Settings
^^^^^^^^^^^^^^^^^^^^^
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
| Field | Type |Default Value| Description |
+================================================+=========+=============+==========================================================================+
|``verify_fp`` | Boolean | ``False`` | If ``True``, Warp will check that inputs and outputs are finite before |
| | | | and/or after various operations. **Has performance implications.** |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``verify_cuda`` | Boolean | ``False`` | If ``True``, Warp will check for CUDA errors after every launch and |
| | | | memory operation. CUDA error verification cannot be used during graph |
| | | | capture. **Has performance implications.** |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``print_launches`` | Boolean | ``False`` | If ``True``, Warp will print details of every kernel launch to standard |
| | | | out (e.g. launch dimensions, inputs, outputs, device, etc.). |
| | | | **Has performance implications.** |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``mode`` | String |``"release"``| Controls whether to compile Warp kernels in debug or release mode. |
| | | | Valid choices are ``"release"`` or ``"debug"``. |
| | | | **Has performance implications.** |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``max_unroll`` | Integer | Global | The maximum fixed-size loop to unroll. Note that ``max_unroll`` does not |
| | | setting | consider the total number of iterations in nested loops. This can result |
| | | | in a large amount of automatically generated code if each nested loop is |
| | | | below the ``max_unroll`` threshold. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``verbose`` | Boolean | ``False`` | If ``True``, additional information will be printed to standard out |
| | | | during code generation, compilation, etc. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``verbose_warnings`` | Boolean | ``False`` | If ``True``, Warp warnings will include extra information such as |
| | | | the source file and line number. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``quiet`` | Boolean | ``False`` | If ``True``, Warp module initialization messages will be disabled. |
| | | | This setting does not affect error messages and warnings. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``kernel_cache_dir`` | String | ``None`` | The path to the directory used for the user kernel cache. Subdirectories |
| | | | beginning with ``wp_`` will be created in this directory. If ``None``, |
| | | | a directory will be automatically determined using the value of the |
| | | | environment variable ``WARP_CACHE_PATH`` or the |
| | | | `appdirs.user_cache_directory <https://github.com/ActiveState/appdirs>`_ |
| | | | if ``WARP_CACHE_PATH`` is also not set. ``kernel_cache_dir`` will be |
| | | | updated to reflect the location of the cache directory used. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``enable_backward`` | Boolean | ``True`` | If ``True``, backward passes of kernels will be compiled by default. |
| | | | Disabling this setting can reduce kernel compilation times. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``enable_graph_capture_module_load_by_default`` | Boolean | ``True`` | If ``True``, ``wp.capture_begin()`` will call ``wp.force_load()`` to |
| | | | compile and load Warp kernels from all imported modules before graph |
| | | | capture if the ``force_module_load`` argument is not explicitly provided |
| | | | to ``wp.capture_begin()``. This setting is ignored if the CUDA driver |
| | | | supports CUDA 12.3 or newer. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
|``enable_mempools_at_init`` | Boolean | ``False`` | If ``True``, ``wp.init()`` will enable pooled allocators on all CUDA |
| | | | devices that support memory pools. |
| | | | Pooled allocators are generally faster and can be used during CUDA graph |
| | | | capture. For the caveats, see CUDA Pooled Allocators documentation. |
+------------------------------------------------+---------+-------------+--------------------------------------------------------------------------+
Advanced Global Settings
^^^^^^^^^^^^^^^^^^^^^^^^
+--------------------+---------+-------------+--------------------------------------------------------------------------+
| Field | Type |Default Value| Description |
+====================+=========+=============+==========================================================================+
|``cache_kernels`` | Boolean | ``True`` | If ``True``, kernels that have already been compiled from previous |
| | | | application launches will not be recompiled. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
|``cuda_output`` | String | ``None`` | The preferred CUDA output format for kernels. Valid choices are ``None``,|
| | | | ``"ptx"``, and ``"cubin"``. If ``None``, a format will be determined |
| | | | automatically. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
|``ptx_target_arch`` | Integer | 70 | The target architecture for PTX generation. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
|``llvm_cuda`` | Boolean | ``False`` | If ``True``, Clang/LLVM will be used to compile CUDA code instead of |
| | | | NVTRC. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
Module Settings
---------------
Module-level settings to control runtime compilation and code generation may be changed by passing a dictionary of
option pairs to ``wp.set_module_options()``.
For example, compilation of backward passes for the kernel in an entire module can be disabled with:
.. code:: python
wp.set_module_options({"enable_backward": False})
The options for a module can also be queried using ``wp.get_module_options()``.
+--------------------+---------+-------------+--------------------------------------------------------------------------+
| Field | Type |Default Value| Description |
+====================+=========+=============+==========================================================================+
|``mode`` | String | Global | Controls whether to compile the module's kernels in debug or release |
| | | setting | mode by default. Valid choices are ``"release"`` or ``"debug"``. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
|``max_unroll`` | Integer | Global | The maximum fixed-size loop to unroll. Note that ``max_unroll`` does not |
| | | setting | consider the total number of iterations in nested loops. This can result |
| | | | in a large amount of automatically generated code if each nested loop is |
| | | | below the ``max_unroll`` threshold. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
|``enable_backward`` | Boolean | Global | If ``True``, backward passes of kernels will be compiled by default. |
| | | setting | Valid choices are ``"release"`` or ``"debug"``. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
|``fast_math`` | Boolean | ``False`` | If ``True``, CUDA kernels will be compiled with the ``--use_fast_math`` |
| | | | compiler option, which enables some fast math operations that are faster |
| | | | but less accurate. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
|``cuda_output`` | String | ``None`` | The preferred CUDA output format for kernels. Valid choices are ``None``,|
| | | | ``"ptx"``, and ``"cubin"``. If ``None``, a format will be determined |
| | | | automatically. The module-level setting takes precedence over the global |
| | | | setting. |
+--------------------+---------+-------------+--------------------------------------------------------------------------+
Kernel Settings
---------------
``enable_backward`` is currently the only setting that can also be configured on a per-kernel level.
Backward-pass compilation can be disabled by passing an argument into the ``@wp.kernel`` decorator
as in the following example:
.. code-block:: python
@wp.kernel(enable_backward=False)
def scale_2(
x: wp.array(dtype=float),
y: wp.array(dtype=float),
):
y[0] = x[0] ** 2.0
| 14,673 | reStructuredText | 88.475609 | 163 | 0.321679 |
NVIDIA/warp/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from datetime import date
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import warp as wp # noqa: E402
# -- Project information -----------------------------------------------------
project = "Warp"
copyright = f"2022-{date.today().year}, NVIDIA"
author = "NVIDIA"
version = wp.__version__
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon", # Convert docstrings to reStructuredText
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.extlinks", # Markup to shorten external links
"sphinx.ext.githubpages",
# Third-party extensions:
"sphinx_copybutton",
# 'sphinx_tabs.tabs',
# 'autodocsumm'
]
# put type hints inside the description instead of the signature (easier to read)
autodoc_typehints = "description"
# document class *and* __init__ methods
autoclass_content = "both"
autodoc_member_order = "bysource"
# autodoc_typehints_format
# add_module_names = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://numpy.org/doc/stable", None),
"jax": ("https://jax.readthedocs.io/en/latest", None),
"pytorch": ("https://pytorch.org/docs/stable", None),
}
extlinks = {
"github": ("https://github.com/NVIDIA/warp/blob/main/%s", "%s"),
}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# sphinx_copybutton settings
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = f"Warp {version}"
html_show_sphinx = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["custom.css"]
html_theme_options = {
"top_of_page_button": None,
"light_css_variables": {
"admonition-title-font-size": "100%",
"admonition-font-size": "100%",
"color-api-pre-name": "#4e9a06", # "#76b900",
"color-api-name": "#4e9a06", # "#76b900",
"color-admonition-title--seealso": "#ffffff",
"color-admonition-title-background--seealso": "#448aff",
"color-admonition-title-background--note": "#76b900",
"color-admonition-title--note": "#ffffff",
},
"dark_css_variables": {
"color-admonition-title-background--note": "#76b900",
"color-admonition-title--note": "#ffffff",
},
"light_logo": "logo-light-mode.png",
"dark_logo": "logo-dark-mode.png",
"footer_icons": [
{
"name": "GitHub",
"url": "https://github.com/NVIDIA/warp",
"html": """
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
</svg>
""",
"class": "",
},
],
}
| 4,711 | Python | 37 | 624 | 0.615156 |
NVIDIA/warp/docs/modules/generics.rst | Generics
========
Warp supports writing generic kernels and functions, which act as templates that can be instantiated with different concrete types.
This allows you to write code once and reuse it with multiple data types.
The concepts discussed on this page also apply to :ref:`Runtime Kernel Creation`.
Generic Kernels
---------------
Generic kernel definition syntax is the same as regular kernels, but you can use ``typing.Any`` in place of concrete types:
.. code:: python
from typing import Any
# generic kernel definition using Any as a placeholder for concrete types
@wp.kernel
def scale(x: wp.array(dtype=Any), s: Any):
i = wp.tid()
x[i] = s * x[i]
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
n = len(data)
x16 = wp.array(data, dtype=wp.float16)
x32 = wp.array(data, dtype=wp.float32)
x64 = wp.array(data, dtype=wp.float64)
# run the generic kernel with different data types
wp.launch(scale, dim=n, inputs=[x16, wp.float16(3)])
wp.launch(scale, dim=n, inputs=[x32, wp.float32(3)])
wp.launch(scale, dim=n, inputs=[x64, wp.float64(3)])
print(x16)
print(x32)
print(x64)
Under the hood, Warp will automatically generate new instances of the generic kernel to match the given argument types.
Type Inference
~~~~~~~~~~~~~~
When a generic kernel is being launched, Warp infers the concrete types from the arguments. ``wp.launch()`` handles generic kernels without any special syntax, but we should be mindful of the data types passed as arguments to make sure that the correct types are inferred.
- Scalars can be passed as regular Python numeric values (e.g., ``42`` or ``0.5``). Python integers are interpreted as ``wp.int32`` and Python floating point values are interpreted as ``wp.float32``. To specify a different data type and to avoid ambiguity, Warp data types should be used instead (e.g., ``wp.int64(42)`` or ``wp.float16(0.5)``).
- Vectors and matrices should be passed as Warp types rather than tuples or lists (e.g., ``wp.vec3f(1.0, 2.0, 3.0)`` or ``wp.mat22h([[1.0, 0.0], [0.0, 1.0]])``).
- Warp arrays and structs can be passed normally.
.. _implicit_instantiation:
Implicit Instantiation
~~~~~~~~~~~~~~~~~~~~~~
When you launch a generic kernel with a new set of data types, Warp automatically creates a new instance of this kernel with the given types. This is convenient, but there are some downsides to this implicit instantiation.
Consider these three generic kernel launches:
.. code:: python
wp.launch(scale, dim=n, inputs=[x16, wp.float16(3)])
wp.launch(scale, dim=n, inputs=[x32, wp.float32(3)])
wp.launch(scale, dim=n, inputs=[x64, wp.float64(3)])
During each one of these launches, a new kernel instance is being generated, which forces the module to be reloaded. You might see something like this in the output:
.. code:: text
Module __main__ load on device 'cuda:0' took 170.37 ms
Module __main__ load on device 'cuda:0' took 171.43 ms
Module __main__ load on device 'cuda:0' took 179.49 ms
This leads to a couple of potential problems:
- The overhead of repeatedly rebuilding the modules can impact the overall performance of the program.
- Module reloading during graph capture is not allowed on older CUDA drivers, which will cause captures to fail.
Explicit instantiation can be used to overcome these issues.
.. _explicit_instantiation:
Explicit Instantiation
~~~~~~~~~~~~~~~~~~~~~~
Warp allows explicitly declaring instances of generic kernels with different types. One way is to use the ``@wp.overload`` decorator:
.. code:: python
@wp.overload
def scale(x: wp.array(dtype=wp.float16), s: wp.float16):
...
@wp.overload
def scale(x: wp.array(dtype=wp.float32), s: wp.float32):
...
@wp.overload
def scale(x: wp.array(dtype=wp.float64), s: wp.float64):
...
wp.launch(scale, dim=n, inputs=[x16, wp.float16(3)])
wp.launch(scale, dim=n, inputs=[x32, wp.float32(3)])
wp.launch(scale, dim=n, inputs=[x64, wp.float64(3)])
The ``@wp.overload`` decorator allows re-declaring generic kernels without repeating the kernel code. The kernel body is just replaced with the ellipsis (``...``). Warp keeps track of known overloads for each kernel, so if an overload exists it will not be instantiated again. If all the overloads are declared prior to kernel launches, the module will only load once with all the kernel instances in place.
We can also use ``wp.overload()`` as a function for a slightly more concise syntax. We just need to specify the generic kernel and a list of concrete argument types:
.. code:: python
wp.overload(scale, [wp.array(dtype=wp.float16), wp.float16])
wp.overload(scale, [wp.array(dtype=wp.float32), wp.float32])
wp.overload(scale, [wp.array(dtype=wp.float64), wp.float64])
Instead of an argument list, a dictionary can also be provided:
.. code:: python
wp.overload(scale, {"x": wp.array(dtype=wp.float16), "s": wp.float16})
wp.overload(scale, {"x": wp.array(dtype=wp.float32), "s": wp.float32})
wp.overload(scale, {"x": wp.array(dtype=wp.float64), "s": wp.float64})
A dictionary might be preferred for readability. With dictionaries, only generic arguments need to be specified, which can be even more concise when overloading kernels where some of the arguments are not generic.
We can easily create overloads in a single loop, like this:
.. code:: python
for T in [wp.float16, wp.float32, wp.float64]:
wp.overload(scale, [wp.array(dtype=T), T])
Finally, the ``wp.overload()`` function returns the concrete kernel instance, which can be saved in a variable:
.. code:: python
scale_f16 = wp.overload(scale, [wp.array(dtype=wp.float16), wp.float16])
scale_f32 = wp.overload(scale, [wp.array(dtype=wp.float32), wp.float32])
scale_f64 = wp.overload(scale, [wp.array(dtype=wp.float64), wp.float64])
These instances are treated as regular kernels, not generic. This means that launches should be faster, because Warp doesn't need to infer data types from the arguments like it does when launching generic kernels. The typing requirements for kernel arguments are also more relaxed than with generic kernels, because Warp can convert scalars, vectors, and matrices to the known required types.
.. code:: python
# launch concrete kernel instances
wp.launch(scale_f16, dim=n, inputs=[x16, 3])
wp.launch(scale_f32, dim=n, inputs=[x32, 3])
wp.launch(scale_f64, dim=n, inputs=[x64, 3])
.. _Generic Functions:
Generic Functions
-----------------
Like Warp kernels, we can also define generic Warp functions:
.. code:: python
# generic function
@wp.func
def f(x: Any):
return x * x
# use generic function in a regular kernel
@wp.kernel
def square_float(a: wp.array(dtype=float)):
i = wp.tid()
a[i] = f(a[i])
# use generic function in a generic kernel
@wp.kernel
def square_any(a: wp.array(dtype=Any)):
i = wp.tid()
a[i] = f(a[i])
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
n = len(data)
af = wp.array(data, dtype=float)
ai = wp.array(data, dtype=int)
# launch regular kernel
wp.launch(square_float, dim=n, inputs=[af])
# launch generic kernel
wp.launch(square_any, dim=n, inputs=[af])
wp.launch(square_any, dim=n, inputs=[ai])
A generic function can be used in regular and generic kernels. It's not necessary to explicitly overload generic functions. All required function overloads are generated automatically when those functions are used in kernels.
type() Operator
---------------
Consider the following generic function:
.. code:: python
@wp.func
def triple(x: Any):
return 3 * x
Using numeric literals like ``3`` is problematic in generic expressions due to Warp's strict typing rules. Operands in arithmetic expressions must have the same data types, but integer literals are always treated as ``wp.int32``. This function will fail to compile if ``x`` has a data type other than ``wp.int32``, which means that it's not generic at all.
The ``type()`` operator comes to the rescue here. The ``type()`` operator returns the type of its argument, which is handy in generic functions or kernels where the data types are not known in advance. We can rewrite the function like this to make it work with a wider range of types:
.. code:: python
@wp.func
def triple(x: Any):
return type(x)(3) * x
The ``type()`` operator is useful for type conversions in Warp kernels and functions. For example, here is a simple generic ``arange()`` kernel:
.. code:: python
@wp.kernel
def arange(a: wp.array(dtype=Any)):
i = wp.tid()
a[i] = type(a[0])(i)
n = 10
ai = wp.empty(n, dtype=wp.int32)
af = wp.empty(n, dtype=wp.float32)
wp.launch(arange, dim=n, inputs=[ai])
wp.launch(arange, dim=n, inputs=[af])
``wp.tid()`` returns an integer, but the value gets converted to the array's data type before storing it in the array. Alternatively, we could write our ``arange()`` kernel like this:
.. code:: python
@wp.kernel
def arange(a: wp.array(dtype=Any)):
i = wp.tid()
a[i] = a.dtype(i)
This variant uses the ``array.dtype()`` operator, which returns the type of the array's contents.
Limitations and Rough Edges
---------------------------
Warp generics are still in development and there are some limitations.
Module Reloading Behavior
~~~~~~~~~~~~~~~~~~~~~~~~~
As mentioned in the :ref:`implicit instantiation <implicit_instantiation>` section, launching new kernel overloads triggers the recompilation of the kernel module. This adds overhead and doesn't play well with Warp's current kernel caching strategy. Kernel caching relies on hashing the contents of the module, which includes all the concrete kernels and functions encountered in the Python program so far. Whenever a new kernel or a new instance of a generic kernel is added, the module needs to be reloaded. Re-running the Python program leads to the same sequence of kernels being added to the module, which means that implicit instantiation of generic kernels will trigger the same module reloading on every run. This is clearly not ideal, and we intend to improve this behavior in the future.
Using :ref:`explicit instantiation <explicit_instantiation>` is usually a good workaround for this, as long as the overloads are added in the same order before any kernel launches.
Note that this issue is not specific to generic kernels. Adding new regular kernels to a module can also trigger repetitive module reloading if the kernel definitions are intermixed with kernel launches. For example:
.. code:: python
@wp.kernel
def foo(x: float):
wp.print(x)
wp.launch(foo, dim=1, inputs=[17])
@wp.kernel
def bar(x: float):
wp.print(x)
wp.launch(bar, dim=1, inputs=[42])
This code will also trigger module reloading during each kernel launch, even though it doesn't use generics at all:
.. code:: text
Module __main__ load on device 'cuda:0' took 155.73 ms
17
Module __main__ load on device 'cuda:0' took 164.83 ms
42
Graph Capture
~~~~~~~~~~~~~
Module reloading is not allowed during graph capture in CUDA 12.2 or older. Kernel instantiation can trigger module reloading, which will cause graph capture to fail on drivers that don't support newer versions of CUDA. The workaround, again, is to explicitly declare the required overloads before capture begins.
Type Variables
~~~~~~~~~~~~~~
Warp's ``type()`` operator is similar in principle to Python's ``type()`` function, but it's currently not possible to use types as variables in Warp kernels and functions. For example, the following is currently `not` allowed:
.. code:: python
@wp.func
def triple(x: Any):
# TODO:
T = type(x)
return T(3) * x
Kernel Overloading Restrictions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's currently not possible to define multiple kernels with the same name but different argument counts, but this restriction may be lifted in the future.
| 12,174 | reStructuredText | 38.65798 | 802 | 0.695499 |
NVIDIA/warp/docs/modules/sparse.rst | warp.sparse
===============================
.. currentmodule:: warp.sparse
..
.. toctree::
:maxdepth: 2
Warp includes a sparse linear algebra module ``warp.sparse`` that implements some common sparse matrix operations that arise in simulation.
Sparse Matrices
-------------------------
Currently `warp.sparse` supports Block Sparse Row (BSR) matrices, the BSR format can also be used to represent Compressed Sparse Row (CSR) matrices as a special case with a 1x1 block size.
.. automodule:: warp.sparse
:members:
Iterative Linear Solvers
------------------------
.. currentmodule:: warp.optim.linear
Warp provides a few common iterative linear solvers (:func:`cg`, :func:`cr`, :func:`bicgstab`, :func:`gmres`) with optional preconditioning.
.. note:: While primarily intended to work with sparse matrices, those solvers also accept dense linear operators provided as 2D Warp arrays.
It is also possible to provide custom operators, see :class:`LinearOperator`.
.. automodule:: warp.optim.linear
:members:
| 1,037 | reStructuredText | 30.454545 | 188 | 0.690453 |
NVIDIA/warp/docs/modules/sim.rst | warp.sim
====================
.. currentmodule:: warp.sim
..
.. toctree::
:maxdepth: 2
Warp includes a simulation module ``warp.sim`` that includes many common physical simulation models, and integrators for explicit and implicit time-stepping.
.. note:: The simulation model is under construction and should be expected to change rapidly, please treat this section as work in progress.
Model
-----
.. autoclass:: ModelBuilder
:members:
.. autoclass:: Model
:members:
.. autoclass:: ModelShapeMaterials
:members:
.. autoclass:: ModelShapeGeometry
:members:
.. autoclass:: JointAxis
:members:
.. autoclass:: Mesh
:members:
.. autoclass:: SDF
:members:
.. _Joint types:
Joint types
^^^^^^^^^^^^^^
.. data:: JOINT_PRISMATIC
Prismatic (slider) joint
.. data:: JOINT_REVOLUTE
Revolute (hinge) joint
.. data:: JOINT_BALL
Ball (spherical) joint with quaternion state representation
.. data:: JOINT_FIXED
Fixed (static) joint
.. data:: JOINT_FREE
Free (floating) joint
.. data:: JOINT_COMPOUND
Compound joint with 3 rotational degrees of freedom
.. data:: JOINT_UNIVERSAL
Universal joint with 2 rotational degrees of freedom
.. data:: JOINT_DISTANCE
Distance joint that keeps two bodies at a distance within its joint limits (only supported in :class:`XPBDIntegrator` at the moment)
.. data:: JOINT_D6
Generic D6 joint with up to 3 translational and 3 rotational degrees of freedom
.. _Joint modes:
Joint control modes
^^^^^^^^^^^^^^^^^^^
Joint modes control the behavior of how the joint control input :attr:`Control.joint_act` affects the torque applied at a given joint axis.
By default, it behaves as a direct force application via :data:`JOINT_MODE_FORCE`. Other modes can be used to implement joint position or velocity drives:
.. data:: JOINT_MODE_FORCE
This is the default control mode where the control input is the torque :math:`\tau` applied at the joint axis.
.. data:: JOINT_MODE_TARGET_POSITION
The control input is the target position :math:`\mathbf{q}_{\text{target}}` which is achieved via PD control of torque :math:`\tau` where the proportional and derivative gains are set by :attr:`Model.joint_target_ke` and :attr:`Model.joint_target_kd`:
.. math::
\tau = k_e (\mathbf{q}_{\text{target}} - \mathbf{q}) - k_d \mathbf{\dot{q}}
.. data:: JOINT_MODE_TARGET_VELOCITY
The control input is the target velocity :math:`\mathbf{\dot{q}}_{\text{target}}` which is achieved via a controller of torque :math:`\tau` that brings the velocity at the joint axis to the target through proportional gain :attr:`Model.joint_target_ke`:
.. math::
\tau = k_e (\mathbf{\dot{q}}_{\text{target}} - \mathbf{\dot{q}})
State
--------------
.. autoclass:: State
:members:
Control
--------------
.. autoclass:: Control
:members:
.. _FK-IK:
Forward / Inverse Kinematics
----------------------------
Articulated rigid-body mechanisms are kinematically described by the joints that connect the bodies as well as the relative relative transform from the parent and child body to the respective anchor frames of the joint in the parent and child body:
.. image:: /img/joint_transforms.png
:width: 400
:align: center
.. list-table:: Variable names in the kernels from articulation.py
:widths: 10 90
:header-rows: 1
* - Symbol
- Description
* - x_wp
- World transform of the parent body (stored at :attr:`State.body_q`)
* - x_wc
- World transform of the child body (stored at :attr:`State.body_q`)
* - x_pj
- Transform from the parent body to the joint parent anchor frame (defined by :attr:`Model.joint_X_p`)
* - x_cj
- Transform from the child body to the joint child anchor frame (defined by :attr:`Model.joint_X_c`)
* - x_j
- Joint transform from the joint parent anchor frame to the joint child anchor frame
In the forward kinematics, the joint transform is determined by the joint coordinates (generalized joint positions :attr:`State.joint_q` and velocities :attr:`State.joint_qd`).
Given the parent body's world transform :math:`x_{wp}` and the joint transform :math:`x_{j}`, the child body's world transform :math:`x_{wc}` is computed as:
.. math::
x_{wc} = x_{wp} \cdot x_{pj} \cdot x_{j} \cdot x_{cj}^{-1}.
.. autofunction:: eval_fk
.. autofunction:: eval_ik
Integrators
--------------
.. autoclass:: Integrator
:members:
.. autoclass:: SemiImplicitIntegrator
:members:
.. autoclass:: XPBDIntegrator
:members:
.. autoclass:: FeatherstoneIntegrator
:members:
Importers
--------------
Warp sim supports the loading of simulation models from URDF, MuJoCo (MJCF), and USD Physics files.
.. autofunction:: parse_urdf
.. autofunction:: parse_mjcf
.. autofunction:: parse_usd
.. autofunction:: resolve_usd_from_url
Utility functions
------------------
Common utility functions used in simulators.
.. autofunction:: velocity_at_point
.. autofunction:: quat_to_euler
.. autofunction:: quat_from_euler
.. autofunction:: load_mesh
| 5,086 | reStructuredText | 24.691919 | 258 | 0.679709 |
NVIDIA/warp/docs/modules/functions.rst | ..
Autogenerated File - Do not edit. Run build_docs.py to generate.
.. functions:
.. currentmodule:: warp
Kernel Reference
================
Scalar Types
------------
.. class:: int8
.. class:: uint8
.. class:: int16
.. class:: uint16
.. class:: int32
.. class:: uint32
.. class:: int64
.. class:: uint64
.. class:: float16
.. class:: float32
.. class:: float64
.. class:: bool
Vector Types
------------
.. class:: vec2b
.. class:: vec2ub
.. class:: vec2s
.. class:: vec2us
.. class:: vec2i
.. class:: vec2ui
.. class:: vec2l
.. class:: vec2ul
.. class:: vec2h
.. class:: vec2f
.. class:: vec2d
.. class:: vec3b
.. class:: vec3ub
.. class:: vec3s
.. class:: vec3us
.. class:: vec3i
.. class:: vec3ui
.. class:: vec3l
.. class:: vec3ul
.. class:: vec3h
.. class:: vec3f
.. class:: vec3d
.. class:: vec4b
.. class:: vec4ub
.. class:: vec4s
.. class:: vec4us
.. class:: vec4i
.. class:: vec4ui
.. class:: vec4l
.. class:: vec4ul
.. class:: vec4h
.. class:: vec4f
.. class:: vec4d
.. class:: mat22h
.. class:: mat22f
.. class:: mat22d
.. class:: mat33h
.. class:: mat33f
.. class:: mat33d
.. class:: mat44h
.. class:: mat44f
.. class:: mat44d
.. class:: quath
.. class:: quatf
.. class:: quatd
.. class:: transformh
.. class:: transformf
.. class:: transformd
.. class:: spatial_vectorh
.. class:: spatial_vectorf
.. class:: spatial_vectord
.. class:: spatial_matrixh
.. class:: spatial_matrixf
.. class:: spatial_matrixd
Generic Types
-------------
.. class:: Int
.. class:: Float
.. class:: Scalar
.. class:: Vector
.. class:: Matrix
.. class:: Quaternion
.. class:: Transformation
.. class:: Array
Query Types
-------------
.. autoclass:: bvh_query_t
.. autoclass:: hash_grid_query_t
.. autoclass:: mesh_query_aabb_t
.. autoclass:: mesh_query_point_t
.. autoclass:: mesh_query_ray_t
Scalar Math
---------------
.. py:function:: min(x: Scalar, y: Scalar) -> Scalar
Return the minimum of two scalars.
.. py:function:: min(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
Return the element-wise minimum of two vectors.
.. py:function:: min(v: Vector[Any,Scalar]) -> Scalar
:noindex:
:nocontentsentry:
Return the minimum element of a vector ``v``.
.. py:function:: max(x: Scalar, y: Scalar) -> Scalar
Return the maximum of two scalars.
.. py:function:: max(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
Return the element-wise maximum of two vectors.
.. py:function:: max(v: Vector[Any,Scalar]) -> Scalar
:noindex:
:nocontentsentry:
Return the maximum element of a vector ``v``.
.. py:function:: clamp(x: Scalar, a: Scalar, b: Scalar) -> Scalar
Clamp the value of ``x`` to the range [a, b].
.. py:function:: abs(x: Scalar) -> Scalar
Return the absolute value of ``x``.
.. py:function:: sign(x: Scalar) -> Scalar
Return -1 if ``x`` < 0, return 1 otherwise.
.. py:function:: step(x: Scalar) -> Scalar
Return 1.0 if ``x`` < 0.0, return 0.0 otherwise.
.. py:function:: nonzero(x: Scalar) -> Scalar
Return 1.0 if ``x`` is not equal to zero, return 0.0 otherwise.
.. py:function:: sin(x: Float) -> Float
Return the sine of ``x`` in radians.
.. py:function:: cos(x: Float) -> Float
Return the cosine of ``x`` in radians.
.. py:function:: acos(x: Float) -> Float
Return arccos of ``x`` in radians. Inputs are automatically clamped to [-1.0, 1.0].
.. py:function:: asin(x: Float) -> Float
Return arcsin of ``x`` in radians. Inputs are automatically clamped to [-1.0, 1.0].
.. py:function:: sqrt(x: Float) -> Float
Return the square root of ``x``, where ``x`` is positive.
.. py:function:: cbrt(x: Float) -> Float
Return the cube root of ``x``.
.. py:function:: tan(x: Float) -> Float
Return the tangent of ``x`` in radians.
.. py:function:: atan(x: Float) -> Float
Return the arctangent of ``x`` in radians.
.. py:function:: atan2(y: Float, x: Float) -> Float
Return the 2-argument arctangent, atan2, of the point ``(x, y)`` in radians.
.. py:function:: sinh(x: Float) -> Float
Return the sinh of ``x``.
.. py:function:: cosh(x: Float) -> Float
Return the cosh of ``x``.
.. py:function:: tanh(x: Float) -> Float
Return the tanh of ``x``.
.. py:function:: degrees(x: Float) -> Float
Convert ``x`` from radians into degrees.
.. py:function:: radians(x: Float) -> Float
Convert ``x`` from degrees into radians.
.. py:function:: log(x: Float) -> Float
Return the natural logarithm (base-e) of ``x``, where ``x`` is positive.
.. py:function:: log2(x: Float) -> Float
Return the binary logarithm (base-2) of ``x``, where ``x`` is positive.
.. py:function:: log10(x: Float) -> Float
Return the common logarithm (base-10) of ``x``, where ``x`` is positive.
.. py:function:: exp(x: Float) -> Float
Return the value of the exponential function :math:`e^x`.
.. py:function:: pow(x: Float, y: Float) -> Float
Return the result of ``x`` raised to power of ``y``.
.. py:function:: round(x: Float) -> Float
Return the nearest integer value to ``x``, rounding halfway cases away from zero.
This is the most intuitive form of rounding in the colloquial sense, but can be slower than other options like :func:`warp.rint()`.
Differs from :func:`numpy.round()`, which behaves the same way as :func:`numpy.rint()`.
.. py:function:: rint(x: Float) -> Float
Return the nearest integer value to ``x``, rounding halfway cases to nearest even integer.
It is generally faster than :func:`warp.round()`. Equivalent to :func:`numpy.rint()`.
.. py:function:: trunc(x: Float) -> Float
Return the nearest integer that is closer to zero than ``x``.
In other words, it discards the fractional part of ``x``.
It is similar to casting ``float(int(x))``, but preserves the negative sign when x is in the range [-0.0, -1.0).
Equivalent to :func:`numpy.trunc()` and :func:`numpy.fix()`.
.. py:function:: floor(x: Float) -> Float
Return the largest integer that is less than or equal to ``x``.
.. py:function:: ceil(x: Float) -> Float
Return the smallest integer that is greater than or equal to ``x``.
.. py:function:: frac(x: Float) -> Float
Retrieve the fractional part of x.
In other words, it discards the integer part of x and is equivalent to ``x - trunc(x)``.
.. py:function:: isfinite(x: Scalar) -> bool
Return ``True`` if x is a finite number, otherwise return ``False``.
.. py:function:: isfinite(x: Vector[Any,Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if all elements of the vector ``x`` are finite, otherwise return ``False``.
.. py:function:: isfinite(x: Quaternion[Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if all elements of the quaternion ``x`` are finite, otherwise return ``False``.
.. py:function:: isfinite(m: Matrix[Any,Any,Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if all elements of the matrix ``m`` are finite, otherwise return ``False``.
.. py:function:: isnan(x: Scalar) -> bool
Return ``True`` if ``x`` is NaN, otherwise return ``False``.
.. py:function:: isnan(x: Vector[Any,Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if any element of the vector ``x`` is NaN, otherwise return ``False``.
.. py:function:: isnan(x: Quaternion[Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if any element of the quaternion ``x`` is NaN, otherwise return ``False``.
.. py:function:: isnan(m: Matrix[Any,Any,Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if any element of the matrix ``m`` is NaN, otherwise return ``False``.
.. py:function:: isinf(x: Scalar) -> bool
Return ``True`` if x is positive or negative infinity, otherwise return ``False``.
.. py:function:: isinf(x: Vector[Any,Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if any element of the vector ``x`` is positive or negative infinity, otherwise return ``False``.
.. py:function:: isinf(x: Quaternion[Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if any element of the quaternion ``x`` is positive or negative infinity, otherwise return ``False``.
.. py:function:: isinf(m: Matrix[Any,Any,Scalar]) -> bool
:noindex:
:nocontentsentry:
Return ``True`` if any element of the matrix ``m`` is positive or negative infinity, otherwise return ``False``.
Vector Math
---------------
.. py:function:: dot(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Scalar
Compute the dot product between two vectors.
.. py:function:: dot(x: Quaternion[Float], y: Quaternion[Float]) -> Scalar
:noindex:
:nocontentsentry:
Compute the dot product between two quaternions.
.. py:function:: ddot(x: Matrix[Any,Any,Scalar], y: Matrix[Any,Any,Scalar]) -> Scalar
Compute the double dot product between two matrices.
.. py:function:: argmin(v: Vector[Any,Scalar]) -> uint32
Return the index of the minimum element of a vector ``v``. [1]_
.. py:function:: argmax(v: Vector[Any,Scalar]) -> uint32
Return the index of the maximum element of a vector ``v``. [1]_
.. py:function:: outer(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Matrix[Any,Any,Scalar]
Compute the outer product ``x*y^T`` for two vectors.
.. py:function:: cross(x: Vector[3,Scalar], y: Vector[3,Scalar]) -> Vector[3,Scalar]
Compute the cross product of two 3D vectors.
.. py:function:: skew(x: Vector[3,Scalar])
Compute the skew-symmetric 3x3 matrix for a 3D vector ``x``.
.. py:function:: length(x: Vector[Any,Float]) -> Scalar
Compute the length of a floating-point vector ``x``.
.. py:function:: length(x: Quaternion[Float]) -> Scalar
:noindex:
:nocontentsentry:
Compute the length of a quaternion ``x``.
.. py:function:: length_sq(x: Vector[Any,Scalar]) -> Scalar
Compute the squared length of a vector ``x``.
.. py:function:: length_sq(x: Quaternion[Scalar]) -> Scalar
:noindex:
:nocontentsentry:
Compute the squared length of a quaternion ``x``.
.. py:function:: normalize(x: Vector[Any,Float]) -> Vector[Any,Scalar]
Compute the normalized value of ``x``. If ``length(x)`` is 0 then the zero vector is returned.
.. py:function:: normalize(x: Quaternion[Float]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
Compute the normalized value of ``x``. If ``length(x)`` is 0, then the zero quaternion is returned.
.. py:function:: transpose(m: Matrix[Any,Any,Scalar])
Return the transpose of the matrix ``m``.
.. py:function:: inverse(m: Matrix[2,2,Float]) -> Matrix[Any,Any,Float]
Return the inverse of a 2x2 matrix ``m``.
.. py:function:: inverse(m: Matrix[3,3,Float]) -> Matrix[Any,Any,Float]
:noindex:
:nocontentsentry:
Return the inverse of a 3x3 matrix ``m``.
.. py:function:: inverse(m: Matrix[4,4,Float]) -> Matrix[Any,Any,Float]
:noindex:
:nocontentsentry:
Return the inverse of a 4x4 matrix ``m``.
.. py:function:: determinant(m: Matrix[2,2,Float]) -> Scalar
Return the determinant of a 2x2 matrix ``m``.
.. py:function:: determinant(m: Matrix[3,3,Float]) -> Scalar
:noindex:
:nocontentsentry:
Return the determinant of a 3x3 matrix ``m``.
.. py:function:: determinant(m: Matrix[4,4,Float]) -> Scalar
:noindex:
:nocontentsentry:
Return the determinant of a 4x4 matrix ``m``.
.. py:function:: trace(m: Matrix[Any,Any,Scalar]) -> Scalar
Return the trace of the matrix ``m``.
.. py:function:: diag(d: Vector[Any,Scalar]) -> Matrix[Any,Any,Scalar]
Returns a matrix with the components of the vector ``d`` on the diagonal.
.. py:function:: get_diag(m: Matrix[Any,Any,Scalar]) -> Vector[Any,Scalar]
Returns a vector containing the diagonal elements of the square matrix ``m``.
.. py:function:: cw_mul(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
Component-wise multiplication of two vectors.
.. py:function:: cw_mul(x: Matrix[Any,Any,Scalar], y: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
Component-wise multiplication of two matrices.
.. py:function:: cw_div(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
Component-wise division of two vectors.
.. py:function:: cw_div(x: Matrix[Any,Any,Scalar], y: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
Component-wise division of two matrices.
.. py:function:: vector(w: Vector[3,Float], v: Vector[3,Float])
Construct a 6D screw vector from two 3D vectors.
.. py:function:: vector(*arg_types: Scalar, length: int32, dtype: Scalar) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
Construct a vector of with given length and dtype.
.. py:function:: matrix(pos: Vector[3,Float], rot: Quaternion[Float], scale: Vector[3,Float]) -> Matrix[4,4,Float]
Construct a 4x4 transformation matrix that applies the transformations as
Translation(pos)*Rotation(rot)*Scale(scale) when applied to column vectors, i.e.: y = (TRS)*x
.. py:function:: matrix(*arg_types: Scalar, shape: Tuple[int, int], dtype: Scalar) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
Construct a matrix. If the positional ``arg_types`` are not given, then matrix will be zero-initialized.
.. py:function:: identity(n: int32, dtype: Scalar) -> Matrix[Any,Any,Scalar]
Create an identity matrix with shape=(n,n) with the type given by ``dtype``.
.. py:function:: svd3(A: Matrix[3,3,Float], U: Matrix[3,3,Float], sigma: Vector[3,Float], V: Matrix[3,3,Scalar]) -> None
Compute the SVD of a 3x3 matrix ``A``. The singular values are returned in ``sigma``,
while the left and right basis vectors are returned in ``U`` and ``V``.
.. py:function:: qr3(A: Matrix[3,3,Float], Q: Matrix[3,3,Float], R: Matrix[3,3,Float]) -> None
Compute the QR decomposition of a 3x3 matrix ``A``. The orthogonal matrix is returned in ``Q``,
while the upper triangular matrix is returned in ``R``.
.. py:function:: eig3(A: Matrix[3,3,Float], Q: Matrix[3,3,Float], d: Vector[3,Float]) -> None
Compute the eigendecomposition of a 3x3 matrix ``A``. The eigenvectors are returned as the columns of ``Q``,
while the corresponding eigenvalues are returned in ``d``.
Quaternion Math
---------------
.. py:function:: quaternion() -> Quaternion[Float]
Construct a zero-initialized quaternion. Quaternions are laid out as
[ix, iy, iz, r], where ix, iy, iz are the imaginary part, and r the real part.
.. py:function:: quaternion(x: Float, y: Float, z: Float, w: Float) -> Quaternion[Float]
:noindex:
:nocontentsentry:
Create a quaternion using the supplied components (type inferred from component type).
.. py:function:: quaternion(i: Vector[3,Float], r: Float) -> Quaternion[Float]
:noindex:
:nocontentsentry:
Create a quaternion using the supplied vector/scalar (type inferred from scalar type).
.. py:function:: quaternion(q: Quaternion[Float])
:noindex:
:nocontentsentry:
Construct a quaternion of type dtype from another quaternion of a different dtype.
.. py:function:: quat_identity() -> quatf
Construct an identity quaternion with zero imaginary part and real part of 1.0
.. py:function:: quat_from_axis_angle(axis: Vector[3,Float], angle: Float) -> Quaternion[Scalar]
Construct a quaternion representing a rotation of angle radians around the given axis.
.. py:function:: quat_to_axis_angle(q: Quaternion[Float], axis: Vector[3,Float], angle: Float) -> None
Extract the rotation axis and angle radians a quaternion represents.
.. py:function:: quat_from_matrix(m: Matrix[3,3,Float]) -> Quaternion[Scalar]
Construct a quaternion from a 3x3 matrix.
.. py:function:: quat_rpy(roll: Float, pitch: Float, yaw: Float) -> Quaternion[Scalar]
Construct a quaternion representing a combined roll (z), pitch (x), yaw rotations (y) in radians.
.. py:function:: quat_inverse(q: Quaternion[Float]) -> Quaternion[Scalar]
Compute quaternion conjugate.
.. py:function:: quat_rotate(q: Quaternion[Float], p: Vector[3,Float]) -> Vector[3,Scalar]
Rotate a vector by a quaternion.
.. py:function:: quat_rotate_inv(q: Quaternion[Float], p: Vector[3,Float]) -> Vector[3,Scalar]
Rotate a vector by the inverse of a quaternion.
.. py:function:: quat_slerp(q0: Quaternion[Float], q1: Quaternion[Float], t: Float) -> Quaternion[Scalar]
Linearly interpolate between two quaternions.
.. py:function:: quat_to_matrix(q: Quaternion[Float]) -> Matrix[3,3,Scalar]
Convert a quaternion to a 3x3 rotation matrix.
Transformations
---------------
.. py:function:: transformation(p: Vector[3,Float], q: Quaternion[Float]) -> Transformation[Scalar]
Construct a rigid-body transformation with translation part ``p`` and rotation ``q``.
.. py:function:: transform_identity() -> transformf
Construct an identity transform with zero translation and identity rotation.
.. py:function:: transform_get_translation(t: Transformation[Float]) -> Vector[3,Scalar]
Return the translational part of a transform ``t``.
.. py:function:: transform_get_rotation(t: Transformation[Float]) -> Quaternion[Scalar]
Return the rotational part of a transform ``t``.
.. py:function:: transform_multiply(a: Transformation[Float], b: Transformation[Float]) -> Transformation[Scalar]
Multiply two rigid body transformations together.
.. py:function:: transform_point(t: Transformation[Scalar], p: Vector[3,Scalar]) -> Vector[3,Scalar]
Apply the transform to a point ``p`` treating the homogeneous coordinate as w=1 (translation and rotation).
.. py:function:: transform_point(m: Matrix[4,4,Scalar], p: Vector[3,Scalar]) -> Vector[3,Scalar]
:noindex:
:nocontentsentry:
Apply the transform to a point ``p`` treating the homogeneous coordinate as w=1.
The transformation is applied treating ``p`` as a column vector, e.g.: ``y = M*p``.
Note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = p^T*M^T``.
If the transform is coming from a library that uses row-vectors, then users should transpose the transformation
matrix before calling this method.
.. py:function:: transform_vector(t: Transformation[Scalar], v: Vector[3,Scalar]) -> Vector[3,Scalar]
Apply the transform to a vector ``v`` treating the homogeneous coordinate as w=0 (rotation only).
.. py:function:: transform_vector(m: Matrix[4,4,Scalar], v: Vector[3,Scalar]) -> Vector[3,Scalar]
:noindex:
:nocontentsentry:
Apply the transform to a vector ``v`` treating the homogeneous coordinate as w=0.
The transformation is applied treating ``v`` as a column vector, e.g.: ``y = M*v``
note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = v^T*M^T``.
If the transform is coming from a library that uses row-vectors, then users should transpose the transformation
matrix before calling this method.
.. py:function:: transform_inverse(t: Transformation[Float]) -> Transformation[Float]
Compute the inverse of the transformation ``t``.
Spatial Math
---------------
.. py:function:: spatial_adjoint(r: Matrix[3,3,Float], s: Matrix[3,3,Float]) -> Matrix[6,6,Scalar]
Construct a 6x6 spatial inertial matrix from two 3x3 diagonal blocks.
.. py:function:: spatial_dot(a: Vector[6,Float], b: Vector[6,Float]) -> Scalar
Compute the dot product of two 6D screw vectors.
.. py:function:: spatial_cross(a: Vector[6,Float], b: Vector[6,Float]) -> Vector[6,Float]
Compute the cross product of two 6D screw vectors.
.. py:function:: spatial_cross_dual(a: Vector[6,Float], b: Vector[6,Float]) -> Vector[6,Float]
Compute the dual cross product of two 6D screw vectors.
.. py:function:: spatial_top(a: Vector[6,Float])
Return the top (first) part of a 6D screw vector.
.. py:function:: spatial_bottom(a: Vector[6,Float])
Return the bottom (second) part of a 6D screw vector.
.. py:function:: spatial_jacobian(S: Array[Vector[6,Float]], joint_parents: Array[int32], joint_qd_start: Array[int32], joint_start: int32, joint_count: int32, J_start: int32, J_out: Array[Float]) -> None
.. py:function:: spatial_mass(I_s: Array[Matrix[6,6,Float]], joint_start: int32, joint_count: int32, M_start: int32, M: Array[Float]) -> None
Utility
---------------
.. py:function:: mlp(weights: Array[float32], bias: Array[float32], activation: Callable, index: int32, x: Array[float32], out: Array[float32]) -> None
Evaluate a multi-layer perceptron (MLP) layer in the form: ``out = act(weights*x + bias)``.
:param weights: A layer's network weights with dimensions ``(m, n)``.
:param bias: An array with dimensions ``(n)``.
:param activation: A ``wp.func`` function that takes a single scalar float as input and returns a scalar float as output
:param index: The batch item to process, typically each thread will process one item in the batch, in which case
index should be ``wp.tid()``
:param x: The feature matrix with dimensions ``(n, b)``
:param out: The network output with dimensions ``(m, b)``
:note: Feature and output matrices are transposed compared to some other frameworks such as PyTorch.
All matrices are assumed to be stored in flattened row-major memory layout (NumPy default).
.. py:function:: printf() -> None
Allows printing formatted strings using C-style format specifiers.
.. py:function:: print(value: Any) -> None
Print variable to stdout
.. py:function:: breakpoint() -> None
Debugger breakpoint
.. py:function:: tid() -> int
Return the current thread index for a 1D kernel launch.
Note that this is the *global* index of the thread in the range [0, dim)
where dim is the parameter passed to kernel launch.
This function may not be called from user-defined Warp functions.
.. py:function:: tid() -> Tuple[int, int]
:noindex:
:nocontentsentry:
Return the current thread indices for a 2D kernel launch.
Use ``i,j = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
This function may not be called from user-defined Warp functions.
.. py:function:: tid() -> Tuple[int, int, int]
:noindex:
:nocontentsentry:
Return the current thread indices for a 3D kernel launch.
Use ``i,j,k = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
This function may not be called from user-defined Warp functions.
.. py:function:: tid() -> Tuple[int, int, int, int]
:noindex:
:nocontentsentry:
Return the current thread indices for a 4D kernel launch.
Use ``i,j,k,l = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
This function may not be called from user-defined Warp functions.
.. py:function:: select(cond: bool, arg1: Any, arg2: Any)
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: int8, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: uint8, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: int16, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: uint16, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: int32, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: uint32, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: int64, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(cond: uint64, arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``cond`` is ``False`` then return ``arg1``, otherwise return ``arg2``
.. py:function:: select(arr: Array[Any], arg1: Any, arg2: Any)
:noindex:
:nocontentsentry:
Select between two arguments, if ``arr`` is null then return ``arg1``, otherwise return ``arg2``
.. py:function:: atomic_add(a: Array[Any], i: int32, value: Any)
Atomically add ``value`` onto ``a[i]``.
.. py:function:: atomic_add(a: Array[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j]``.
.. py:function:: atomic_add(a: Array[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j,k]``.
.. py:function:: atomic_add(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j,k,l]``.
.. py:function:: atomic_add(a: FabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i]``.
.. py:function:: atomic_add(a: FabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j]``.
.. py:function:: atomic_add(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j,k]``.
.. py:function:: atomic_add(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j,k,l]``.
.. py:function:: atomic_add(a: IndexedFabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i]``.
.. py:function:: atomic_add(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j]``.
.. py:function:: atomic_add(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j,k]``.
.. py:function:: atomic_add(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically add ``value`` onto ``a[i,j,k,l]``.
.. py:function:: atomic_sub(a: Array[Any], i: int32, value: Any)
Atomically subtract ``value`` onto ``a[i]``.
.. py:function:: atomic_sub(a: Array[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j]``.
.. py:function:: atomic_sub(a: Array[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j,k]``.
.. py:function:: atomic_sub(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j,k,l]``.
.. py:function:: atomic_sub(a: FabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i]``.
.. py:function:: atomic_sub(a: FabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j]``.
.. py:function:: atomic_sub(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j,k]``.
.. py:function:: atomic_sub(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j,k,l]``.
.. py:function:: atomic_sub(a: IndexedFabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i]``.
.. py:function:: atomic_sub(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j]``.
.. py:function:: atomic_sub(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j,k]``.
.. py:function:: atomic_sub(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Atomically subtract ``value`` onto ``a[i,j,k,l]``.
.. py:function:: atomic_min(a: Array[Any], i: int32, value: Any)
Compute the minimum of ``value`` and ``a[i]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: Array[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: Array[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j,k]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j,k,l]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: FabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: FabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j,k]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j,k,l]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: IndexedFabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j,k]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_min(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the minimum of ``value`` and ``a[i,j,k,l]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: Array[Any], i: int32, value: Any)
Compute the maximum of ``value`` and ``a[i]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: Array[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: Array[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j,k]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j,k,l]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: FabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: FabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j,k]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j,k,l]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: IndexedFabricArray[Any], i: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j,k]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: atomic_max(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any)
:noindex:
:nocontentsentry:
Compute the maximum of ``value`` and ``a[i,j,k,l]`` and atomically update the array.
.. note:: The operation is only atomic on a per-component basis for vectors and matrices.
.. py:function:: lerp(a: Float, b: Float, t: Float) -> Float
Linearly interpolate two values ``a`` and ``b`` using factor ``t``, computed as ``a*(1-t) + b*t``
.. py:function:: lerp(a: Vector[Any,Float], b: Vector[Any,Float], t: Float) -> Vector[Any,Float]
:noindex:
:nocontentsentry:
Linearly interpolate two values ``a`` and ``b`` using factor ``t``, computed as ``a*(1-t) + b*t``
.. py:function:: lerp(a: Matrix[Any,Any,Float], b: Matrix[Any,Any,Float], t: Float) -> Matrix[Any,Any,Float]
:noindex:
:nocontentsentry:
Linearly interpolate two values ``a`` and ``b`` using factor ``t``, computed as ``a*(1-t) + b*t``
.. py:function:: lerp(a: Quaternion[Float], b: Quaternion[Float], t: Float) -> Quaternion[Float]
:noindex:
:nocontentsentry:
Linearly interpolate two values ``a`` and ``b`` using factor ``t``, computed as ``a*(1-t) + b*t``
.. py:function:: lerp(a: Transformation[Float], b: Transformation[Float], t: Float) -> Transformation[Float]
:noindex:
:nocontentsentry:
Linearly interpolate two values ``a`` and ``b`` using factor ``t``, computed as ``a*(1-t) + b*t``
.. py:function:: smoothstep(edge0: Float, edge1: Float, x: Float) -> Float
Smoothly interpolate between two values ``edge0`` and ``edge1`` using a factor ``x``,
and return a result between 0 and 1 using a cubic Hermite interpolation after clamping.
.. py:function:: expect_near(arg1: Float, arg2: Float, tolerance: Float) -> None
Prints an error to stdout if ``arg1`` and ``arg2`` are not closer than tolerance in magnitude
.. py:function:: expect_near(arg1: vec3f, arg2: vec3f, tolerance: float32) -> None
:noindex:
:nocontentsentry:
Prints an error to stdout if any element of ``arg1`` and ``arg2`` are not closer than tolerance in magnitude
Geometry
---------------
.. py:function:: bvh_query_aabb(id: uint64, lower: vec3f, upper: vec3f) -> bvh_query_t
Construct an axis-aligned bounding box query against a BVH object.
This query can be used to iterate over all bounds inside a BVH.
:param id: The BVH identifier
:param lower: The lower bound of the bounding box in BVH space
:param upper: The upper bound of the bounding box in BVH space
.. py:function:: bvh_query_ray(id: uint64, start: vec3f, dir: vec3f) -> bvh_query_t
Construct a ray query against a BVH object.
This query can be used to iterate over all bounds that intersect the ray.
:param id: The BVH identifier
:param start: The start of the ray in BVH space
:param dir: The direction of the ray in BVH space
.. py:function:: bvh_query_next(query: bvh_query_t, index: int32) -> bool
Move to the next bound returned by the query.
The index of the current bound is stored in ``index``, returns ``False`` if there are no more overlapping bound.
.. py:function:: mesh_query_point(id: uint64, point: vec3f, max_dist: float32) -> mesh_query_point_t
Computes the closest point on the :class:`Mesh` with identifier ``id`` to the given ``point`` in space.
Identifies the sign of the distance using additional ray-casts to determine if the point is inside or outside.
This method is relatively robust, but does increase computational cost.
See below for additional sign determination methods.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
.. py:function:: mesh_query_point_no_sign(id: uint64, point: vec3f, max_dist: float32) -> mesh_query_point_t
Computes the closest point on the :class:`Mesh` with identifier ``id`` to the given ``point`` in space.
This method does not compute the sign of the point (inside/outside) which makes it faster than other point query methods.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
.. py:function:: mesh_query_furthest_point_no_sign(id: uint64, point: vec3f, min_dist: float32) -> mesh_query_point_t
Computes the furthest point on the mesh with identifier `id` to the given point in space.
This method does not compute the sign of the point (inside/outside).
:param id: The mesh identifier
:param point: The point in space to query
:param min_dist: Mesh faces below this distance will not be considered by the query
.. py:function:: mesh_query_point_sign_normal(id: uint64, point: vec3f, max_dist: float32, epsilon: float32) -> mesh_query_point_t
Computes the closest point on the :class:`Mesh` with identifier ``id`` to the given ``point`` in space.
Identifies the sign of the distance (inside/outside) using the angle-weighted pseudo normal.
This approach to sign determination is robust for well conditioned meshes that are watertight and non-self intersecting.
It is also comparatively fast to compute.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param epsilon: Epsilon treating distance values as equal, when locating the minimum distance vertex/face/edge, as a
fraction of the average edge length, also for treating closest point as being on edge/vertex default 1e-3
.. py:function:: mesh_query_point_sign_winding_number(id: uint64, point: vec3f, max_dist: float32, accuracy: float32, threshold: float32) -> mesh_query_point_t
Computes the closest point on the :class:`Mesh` with identifier ``id`` to the given point in space.
Identifies the sign using the winding number of the mesh relative to the query point. This method of sign determination is robust for poorly conditioned meshes
and provides a smooth approximation to sign even when the mesh is not watertight. This method is the most robust and accurate of the sign determination meshes
but also the most expensive.
.. note:: The :class:`Mesh` object must be constructed with ``support_winding_number=True`` for this method to return correct results.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param accuracy: Accuracy for computing the winding number with fast winding number method utilizing second-order dipole approximation, default 2.0
:param threshold: The threshold of the winding number to be considered inside, default 0.5
.. py:function:: mesh_query_ray(id: uint64, start: vec3f, dir: vec3f, max_t: float32) -> mesh_query_ray_t
Computes the closest ray hit on the :class:`Mesh` with identifier ``id``.
:param id: The mesh identifier
:param start: The start point of the ray
:param dir: The ray direction (should be normalized)
:param max_t: The maximum distance along the ray to check for intersections
.. py:function:: mesh_query_aabb(id: uint64, lower: vec3f, upper: vec3f) -> mesh_query_aabb_t
Construct an axis-aligned bounding box query against a :class:`Mesh`.
This query can be used to iterate over all triangles inside a volume.
:param id: The mesh identifier
:param lower: The lower bound of the bounding box in mesh space
:param upper: The upper bound of the bounding box in mesh space
.. py:function:: mesh_query_aabb_next(query: mesh_query_aabb_t, index: int32) -> bool
Move to the next triangle overlapping the query bounding box.
The index of the current face is stored in ``index``, returns ``False`` if there are no more overlapping triangles.
.. py:function:: mesh_eval_position(id: uint64, face: int32, bary_u: float32, bary_v: float32) -> vec3f
Evaluates the position on the :class:`Mesh` given a face index and barycentric coordinates.
.. py:function:: mesh_eval_velocity(id: uint64, face: int32, bary_u: float32, bary_v: float32) -> vec3f
Evaluates the velocity on the :class:`Mesh` given a face index and barycentric coordinates.
.. py:function:: hash_grid_query(id: uint64, point: vec3f, max_dist: float32) -> hash_grid_query_t
Construct a point query against a :class:`HashGrid`.
This query can be used to iterate over all neighboring point within a fixed radius from the query point.
.. py:function:: hash_grid_query_next(query: hash_grid_query_t, index: int32) -> bool
Move to the next point in the hash grid query.
The index of the current neighbor is stored in ``index``, returns ``False`` if there are no more neighbors.
.. py:function:: hash_grid_point_id(id: uint64, index: int32) -> int
Return the index of a point in the :class:`HashGrid`.
This can be used to reorder threads such that grid traversal occurs in a spatially coherent order.
Returns -1 if the :class:`HashGrid` has not been reserved.
.. py:function:: intersect_tri_tri(v0: vec3f, v1: vec3f, v2: vec3f, u0: vec3f, u1: vec3f, u2: vec3f) -> int
Tests for intersection between two triangles (v0, v1, v2) and (u0, u1, u2) using Moller's method.
Returns > 0 if triangles intersect.
.. py:function:: mesh_get(id: uint64) -> Mesh
Retrieves the mesh given its index. [1]_
.. py:function:: mesh_eval_face_normal(id: uint64, face: int32) -> vec3f
Evaluates the face normal the mesh given a face index.
.. py:function:: mesh_get_point(id: uint64, index: int32) -> vec3f
Returns the point of the mesh given a index.
.. py:function:: mesh_get_velocity(id: uint64, index: int32) -> vec3f
Returns the velocity of the mesh given a index.
.. py:function:: mesh_get_index(id: uint64, index: int32) -> int
Returns the point-index of the mesh given a face-vertex index.
.. py:function:: closest_point_edge_edge(p1: vec3f, q1: vec3f, p2: vec3f, q2: vec3f, epsilon: float32) -> vec3f
Finds the closest points between two edges.
Returns barycentric weights to the points on each edge, as well as the closest distance between the edges.
:param p1: First point of first edge
:param q1: Second point of first edge
:param p2: First point of second edge
:param q2: Second point of second edge
:param epsilon: Zero tolerance for determining if points in an edge are degenerate.
:param out: vec3 output containing (s,t,d), where `s` in [0,1] is the barycentric weight for the first edge, `t` is the barycentric weight for the second edge, and `d` is the distance between the two edges at these two closest points.
Volumes
---------------
.. py:function:: volume_sample(id: uint64, uvw: vec3f, sampling_mode: int32, dtype: Any)
Sample the volume of type `dtype` given by ``id`` at the volume local-space point ``uvw``.
Interpolation should be :attr:`warp.Volume.CLOSEST` or :attr:`wp.Volume.LINEAR.`
.. py:function:: volume_sample_grad(id: uint64, uvw: vec3f, sampling_mode: int32, grad: Any, dtype: Any)
Sample the volume given by ``id`` and its gradient at the volume local-space point ``uvw``.
Interpolation should be :attr:`warp.Volume.CLOSEST` or :attr:`wp.Volume.LINEAR.`
.. py:function:: volume_lookup(id: uint64, i: int32, j: int32, k: int32, dtype: Any)
Returns the value of voxel with coordinates ``i``, ``j``, ``k`` for a volume of type type `dtype`.
If the voxel at this index does not exist, this function returns the background value.
.. py:function:: volume_store(id: uint64, i: int32, j: int32, k: int32, value: Any)
Store ``value`` at the voxel with coordinates ``i``, ``j``, ``k``.
.. py:function:: volume_sample_f(id: uint64, uvw: vec3f, sampling_mode: int32) -> float
Sample the volume given by ``id`` at the volume local-space point ``uvw``.
Interpolation should be :attr:`warp.Volume.CLOSEST` or :attr:`wp.Volume.LINEAR.`
.. py:function:: volume_sample_grad_f(id: uint64, uvw: vec3f, sampling_mode: int32, grad: vec3f) -> float
Sample the volume and its gradient given by ``id`` at the volume local-space point ``uvw``.
Interpolation should be :attr:`warp.Volume.CLOSEST` or :attr:`wp.Volume.LINEAR.`
.. py:function:: volume_lookup_f(id: uint64, i: int32, j: int32, k: int32) -> float
Returns the value of voxel with coordinates ``i``, ``j``, ``k``.
If the voxel at this index does not exist, this function returns the background value
.. py:function:: volume_store_f(id: uint64, i: int32, j: int32, k: int32, value: float32) -> None
Store ``value`` at the voxel with coordinates ``i``, ``j``, ``k``.
.. py:function:: volume_sample_v(id: uint64, uvw: vec3f, sampling_mode: int32) -> vec3f
Sample the vector volume given by ``id`` at the volume local-space point ``uvw``.
Interpolation should be :attr:`warp.Volume.CLOSEST` or :attr:`wp.Volume.LINEAR.`
.. py:function:: volume_lookup_v(id: uint64, i: int32, j: int32, k: int32) -> vec3f
Returns the vector value of voxel with coordinates ``i``, ``j``, ``k``.
If the voxel at this index does not exist, this function returns the background value.
.. py:function:: volume_store_v(id: uint64, i: int32, j: int32, k: int32, value: vec3f) -> None
Store ``value`` at the voxel with coordinates ``i``, ``j``, ``k``.
.. py:function:: volume_sample_i(id: uint64, uvw: vec3f) -> int
Sample the :class:`int32` volume given by ``id`` at the volume local-space point ``uvw``.
.. py:function:: volume_lookup_i(id: uint64, i: int32, j: int32, k: int32) -> int
Returns the :class:`int32` value of voxel with coordinates ``i``, ``j``, ``k``.
If the voxel at this index does not exist, this function returns the background value.
.. py:function:: volume_store_i(id: uint64, i: int32, j: int32, k: int32, value: int32) -> None
Store ``value`` at the voxel with coordinates ``i``, ``j``, ``k``.
.. py:function:: volume_sample_index(id: uint64, uvw: vec3f, sampling_mode: int32, voxel_data: Array[Any], background: Any)
Sample the volume given by ``id`` at the volume local-space point ``uvw``.
Values for allocated voxels are read from the ``voxel_data`` array, and `background` is used as the value of non-existing voxels.
Interpolation should be :attr:`warp.Volume.CLOSEST` or :attr:`wp.Volume.LINEAR`.
This function is available for both index grids and classical volumes.
.. py:function:: volume_sample_grad_index(id: uint64, uvw: vec3f, sampling_mode: int32, voxel_data: Array[Any], background: Any, grad: Any)
Sample the volume given by ``id`` and its gradient at the volume local-space point ``uvw``.
Values for allocated voxels are read from the ``voxel_data`` array, and `background` is used as the value of non-existing voxels.
Interpolation should be :attr:`warp.Volume.CLOSEST` or :attr:`wp.Volume.LINEAR`.
This function is available for both index grids and classical volumes.
.. py:function:: volume_lookup_index(id: uint64, i: int32, j: int32, k: int32) -> int32
Returns the index associated to the voxel with coordinates ``i``, ``j``, ``k``.
If the voxel at this index does not exist, this function returns -1.
This function is available for both index grids and classical volumes.
.. py:function:: volume_index_to_world(id: uint64, uvw: vec3f) -> vec3f
Transform a point ``uvw`` defined in volume index space to world space given the volume's intrinsic affine transformation.
.. py:function:: volume_world_to_index(id: uint64, xyz: vec3f) -> vec3f
Transform a point ``xyz`` defined in volume world space to the volume's index space given the volume's intrinsic affine transformation.
.. py:function:: volume_index_to_world_dir(id: uint64, uvw: vec3f) -> vec3f
Transform a direction ``uvw`` defined in volume index space to world space given the volume's intrinsic affine transformation.
.. py:function:: volume_world_to_index_dir(id: uint64, xyz: vec3f) -> vec3f
Transform a direction ``xyz`` defined in volume world space to the volume's index space given the volume's intrinsic affine transformation.
Random
---------------
.. py:function:: rand_init(seed: int32) -> uint32
Initialize a new random number generator given a user-defined seed. Returns a 32-bit integer representing the RNG state.
.. py:function:: rand_init(seed: int32, offset: int32) -> uint32
:noindex:
:nocontentsentry:
Initialize a new random number generator given a user-defined seed and an offset.
This alternative constructor can be useful in parallel programs, where a kernel as a whole should share a seed,
but each thread should generate uncorrelated values. In this case usage should be ``r = rand_init(seed, tid)``
.. py:function:: randi(state: uint32) -> int
Return a random integer in the range [0, 2^32).
.. py:function:: randi(state: uint32, min: int32, max: int32) -> int
:noindex:
:nocontentsentry:
Return a random integer between [min, max).
.. py:function:: randf(state: uint32) -> float
Return a random float between [0.0, 1.0).
.. py:function:: randf(state: uint32, min: float32, max: float32) -> float
:noindex:
:nocontentsentry:
Return a random float between [min, max).
.. py:function:: randn(state: uint32) -> float
Sample a normal distribution.
.. py:function:: sample_cdf(state: uint32, cdf: Array[float32]) -> int
Inverse-transform sample a cumulative distribution function.
.. py:function:: sample_triangle(state: uint32) -> vec2f
Uniformly sample a triangle. Returns sample barycentric coordinates.
.. py:function:: sample_unit_ring(state: uint32) -> vec2f
Uniformly sample a ring in the xy plane.
.. py:function:: sample_unit_disk(state: uint32) -> vec2f
Uniformly sample a disk in the xy plane.
.. py:function:: sample_unit_sphere_surface(state: uint32) -> vec3f
Uniformly sample a unit sphere surface.
.. py:function:: sample_unit_sphere(state: uint32) -> vec3f
Uniformly sample a unit sphere.
.. py:function:: sample_unit_hemisphere_surface(state: uint32) -> vec3f
Uniformly sample a unit hemisphere surface.
.. py:function:: sample_unit_hemisphere(state: uint32) -> vec3f
Uniformly sample a unit hemisphere.
.. py:function:: sample_unit_square(state: uint32) -> vec2f
Uniformly sample a unit square.
.. py:function:: sample_unit_cube(state: uint32) -> vec3f
Uniformly sample a unit cube.
.. py:function:: poisson(state: uint32, lam: float32) -> uint32
Generate a random sample from a Poisson distribution.
:param state: RNG state
:param lam: The expected value of the distribution
.. py:function:: noise(state: uint32, x: float32) -> float
Non-periodic Perlin-style noise in 1D.
.. py:function:: noise(state: uint32, xy: vec2f) -> float
:noindex:
:nocontentsentry:
Non-periodic Perlin-style noise in 2D.
.. py:function:: noise(state: uint32, xyz: vec3f) -> float
:noindex:
:nocontentsentry:
Non-periodic Perlin-style noise in 3D.
.. py:function:: noise(state: uint32, xyzt: vec4f) -> float
:noindex:
:nocontentsentry:
Non-periodic Perlin-style noise in 4D.
.. py:function:: pnoise(state: uint32, x: float32, px: int32) -> float
Periodic Perlin-style noise in 1D.
.. py:function:: pnoise(state: uint32, xy: vec2f, px: int32, py: int32) -> float
:noindex:
:nocontentsentry:
Periodic Perlin-style noise in 2D.
.. py:function:: pnoise(state: uint32, xyz: vec3f, px: int32, py: int32, pz: int32) -> float
:noindex:
:nocontentsentry:
Periodic Perlin-style noise in 3D.
.. py:function:: pnoise(state: uint32, xyzt: vec4f, px: int32, py: int32, pz: int32, pt: int32) -> float
:noindex:
:nocontentsentry:
Periodic Perlin-style noise in 4D.
.. py:function:: curlnoise(state: uint32, xy: vec2f, octaves: uint32, lacunarity: float32, gain: float32) -> vec2f
Divergence-free vector field based on the gradient of a Perlin noise function. [1]_
.. py:function:: curlnoise(state: uint32, xyz: vec3f, octaves: uint32, lacunarity: float32, gain: float32) -> vec3f
:noindex:
:nocontentsentry:
Divergence-free vector field based on the curl of three Perlin noise functions. [1]_
.. py:function:: curlnoise(state: uint32, xyzt: vec4f, octaves: uint32, lacunarity: float32, gain: float32) -> vec3f
:noindex:
:nocontentsentry:
Divergence-free vector field based on the curl of three Perlin noise functions. [1]_
Other
---------------
.. py:function:: lower_bound(arr: Array[Scalar], value: Scalar) -> int
Search a sorted array ``arr`` for the closest element greater than or equal to ``value``.
.. py:function:: lower_bound(arr: Array[Scalar], arr_begin: int32, arr_end: int32, value: Scalar) -> int
:noindex:
:nocontentsentry:
Search a sorted array ``arr`` in the range [arr_begin, arr_end) for the closest element greater than or equal to ``value``.
.. py:function:: bit_and(x: Int, y: Int) -> Int
.. py:function:: bit_or(x: Int, y: Int) -> Int
.. py:function:: bit_xor(x: Int, y: Int) -> Int
.. py:function:: lshift(x: Int, y: Int) -> Int
.. py:function:: rshift(x: Int, y: Int) -> Int
.. py:function:: invert(x: Int) -> Int
Operators
---------------
.. py:function:: add(x: Scalar, y: Scalar) -> Scalar
.. py:function:: add(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: add(x: Quaternion[Scalar], y: Quaternion[Scalar]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: add(x: Matrix[Any,Any,Scalar], y: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: add(x: Transformation[Scalar], y: Transformation[Scalar]) -> Transformation[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: sub(x: Scalar, y: Scalar) -> Scalar
.. py:function:: sub(x: Vector[Any,Scalar], y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: sub(x: Matrix[Any,Any,Scalar], y: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: sub(x: Quaternion[Scalar], y: Quaternion[Scalar]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: sub(x: Transformation[Scalar], y: Transformation[Scalar]) -> Transformation[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Scalar, y: Scalar) -> Scalar
.. py:function:: mul(x: Vector[Any,Scalar], y: Scalar) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Scalar, y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Quaternion[Scalar], y: Scalar) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Scalar, y: Quaternion[Scalar]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Quaternion[Scalar], y: Quaternion[Scalar]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Scalar, y: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Matrix[Any,Any,Scalar], y: Scalar) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Matrix[Any,Any,Scalar], y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Vector[Any,Scalar], y: Matrix[Any,Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Matrix[Any,Any,Scalar], y: Matrix[Any,Any,Scalar])
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Transformation[Scalar], y: Transformation[Scalar]) -> Transformation[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Scalar, y: Transformation[Scalar]) -> Transformation[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mul(x: Transformation[Scalar], y: Scalar) -> Transformation[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: mod(x: Scalar, y: Scalar) -> Scalar
.. py:function:: div(x: Scalar, y: Scalar) -> Scalar
.. py:function:: div(x: Vector[Any,Scalar], y: Scalar) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: div(x: Scalar, y: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: div(x: Matrix[Any,Any,Scalar], y: Scalar) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: div(x: Scalar, y: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: div(x: Quaternion[Scalar], y: Scalar) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: div(x: Scalar, y: Quaternion[Scalar]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: floordiv(x: Scalar, y: Scalar) -> Scalar
.. py:function:: pos(x: Scalar) -> Scalar
.. py:function:: pos(x: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: pos(x: Quaternion[Scalar]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: pos(x: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: neg(x: Scalar) -> Scalar
.. py:function:: neg(x: Vector[Any,Scalar]) -> Vector[Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: neg(x: Quaternion[Scalar]) -> Quaternion[Scalar]
:noindex:
:nocontentsentry:
.. py:function:: neg(x: Matrix[Any,Any,Scalar]) -> Matrix[Any,Any,Scalar]
:noindex:
:nocontentsentry:
.. py:function:: unot(b: bool) -> bool
.. py:function:: unot(b: int8) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(b: uint8) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(b: int16) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(b: uint16) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(b: int32) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(b: uint32) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(b: int64) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(b: uint64) -> bool
:noindex:
:nocontentsentry:
.. py:function:: unot(a: Array[Any]) -> bool
:noindex:
:nocontentsentry:
.. rubric:: Footnotes
.. [1] Function gradients have not been implemented for backpropagation.
| 63,036 | reStructuredText | 28.224386 | 238 | 0.669316 |
NVIDIA/warp/docs/modules/runtime.rst | Runtime Reference
=================
.. currentmodule:: warp
This section describes the Warp Python runtime API, how to manage memory, launch kernels, and high-level functionality
for dealing with objects such as meshes and volumes. The APIs described in this section are intended to be used at
the *Python Scope* and run inside the CPython interpreter. For a comprehensive list of functions available at
the *Kernel Scope*, please see the :doc:`functions` section.
Kernels
-------
Kernels are launched with the :func:`wp.launch() <launch>` function on a specific device (CPU/GPU)::
wp.launch(simple_kernel, dim=1024, inputs=[a, b, c], device="cuda")
Kernels may be launched with multi-dimensional grid bounds. In this case threads are not assigned a single index,
but a coordinate in an n-dimensional grid, e.g.::
wp.launch(complex_kernel, dim=(128, 128, 3), ...)
Launches a 3D grid of threads with dimension 128 x 128 x 3. To retrieve the 3D index for each thread use the following syntax::
i,j,k = wp.tid()
.. note::
Currently kernels launched on CPU devices will be executed in serial.
Kernels launched on CUDA devices will be launched in parallel with a fixed block-size.
.. note::
Note that all the kernel inputs must live on the target device, or a runtime exception will be raised.
.. autofunction:: launch
.. _Runtime Kernel Creation:
Runtime Kernel Creation
#######################
It is often desirable to specialize kernels for different types, constants, or functions at runtime.
We can achieve this through the use of runtime kernel specialization using Python closures.
For example, we might require a variety of kernels that execute particular functions for each item in an array.
We might also want this function call to be valid for a variety of data types. Making use of closure and generics, we can generate
these kernels using a single kernel definition::
def make_kernel(func, dtype):
def closure_kernel_fn(data: wp.array(dtype=dtype), out: wp.array(dtype=dtype)):
tid = wp.tid()
out[tid] = func(data[tid])
return wp.Kernel(closure_kernel_fn)
In practice, we might use our kernel generator, ``make_kernel()`` as follows::
@wp.func
def sqr(x: Any) -> Any:
return x * x
@wp.func
def cube(x: Any) -> Any:
return sqr(x) * x
sqr_float = make_kernel(sqr, wp.float32)
cube_double = make_kernel(cube, wp.float64)
arr = [1.0, 2.0, 3.0]
N = len(arr)
data_float = wp.array(arr, dtype=wp.float32, device=device)
data_double = wp.array(arr, dtype=wp.float64, device=device)
out_float = wp.zeros(N, dtype=wp.float32, device=device)
out_double = wp.zeros(N, dtype=wp.float64, device=device)
wp.launch(sqr_float, dim=N, inputs=[data_float], outputs=[out_float], device=device)
wp.launch(cube_double, dim=N, inputs=[data_double], outputs=[out_double], device=device)
We can specialize kernel definitions over Warp constants similarly. The following generates kernels that add a specified constant
to a generic-typed array value::
def make_add_kernel(key, constant):
def closure_kernel_fn(data: wp.array(dtype=Any), out: wp.array(dtype=Any)):
tid = wp.tid()
out[tid] = data[tid] + constant
return wp.Kernel(closure_kernel_fn, key=key)
add_ones_int = make_add_kernel("add_one", wp.constant(1))
add_ones_vec3 = make_add_kernel("add_ones_vec3", wp.constant(wp.vec3(1.0, 1.0, 1.0)))
a = wp.zeros(2, dtype=int)
b = wp.zeros(2, dtype=wp.vec3)
a_out = wp.zeros_like(a)
b_out = wp.zeros_like(b)
wp.launch(add_ones_int, dim=a.size, inputs=[a], outputs=[a_out], device=device)
wp.launch(add_ones_vec3, dim=b.size, inputs=[b], outputs=[b_out], device=device)
.. _Arrays:
Arrays
------
Arrays are the fundamental memory abstraction in Warp; they are created through the following global constructors: ::
wp.empty(shape=1024, dtype=wp.vec3, device="cpu")
wp.zeros(shape=1024, dtype=float, device="cuda")
wp.full(shape=1024, value=10, dtype=int, device="cuda")
Arrays can also be constructed directly from ``numpy`` ndarrays as follows: ::
r = np.random.rand(1024)
# copy to Warp owned array
a = wp.array(r, dtype=float, device="cpu")
# return a Warp array wrapper around the NumPy data (zero-copy)
a = wp.array(r, dtype=float, copy=False, device="cpu")
# return a Warp copy of the array data on the GPU
a = wp.array(r, dtype=float, device="cuda")
Note that for multi-dimensional data the ``dtype`` parameter must be specified explicitly, e.g.: ::
r = np.random.rand((1024, 3))
# initialize as an array of vec3 objects
a = wp.array(r, dtype=wp.vec3, device="cuda")
If the shapes are incompatible, an error will be raised.
Warp arrays can also be constructed from objects that define the ``__cuda_array_interface__`` attribute. For example: ::
import cupy
import warp as wp
device = wp.get_cuda_device()
r = cupy.arange(10)
# return a Warp array wrapper around the cupy data (zero-copy)
a = wp.array(r, device=device)
Arrays can be moved between devices using the ``array.to()`` method: ::
host_array = wp.array(a, dtype=float, device="cpu")
# allocate and copy to GPU
device_array = host_array.to("cuda")
Additionally, arrays can be copied directly between memory spaces: ::
src_array = wp.array(a, dtype=float, device="cpu")
dest_array = wp.empty_like(host_array)
# copy from source CPU buffer to GPU
wp.copy(dest_array, src_array)
.. autoclass:: array
:members:
:undoc-members:
:exclude-members: vars
Multi-dimensional Arrays
########################
Multi-dimensional arrays can be constructed by passing a tuple of sizes for each dimension, e.g.: the following constructs a 2d array of size 1024x16::
wp.zeros(shape=(1024, 16), dtype=float, device="cuda")
When passing multi-dimensional arrays to kernels users must specify the expected array dimension inside the kernel signature,
e.g. to pass a 2d array to a kernel the number of dims is specified using the ``ndim=2`` parameter::
@wp.kernel
def test(input: wp.array(dtype=float, ndim=2)):
Type-hint helpers are provided for common array sizes, e.g.: ``array2d()``, ``array3d()``, which are equivalent to calling ``array(..., ndim=2)```, etc. To index a multi-dimensional array use a the following kernel syntax::
# returns a float from the 2d array
value = input[i,j]
To create an array slice use the following syntax, where the number of indices is less than the array dimensions::
# returns an 1d array slice representing a row of the 2d array
row = input[i]
Slice operators can be concatenated, e.g.: ``s = array[i][j][k]``. Slices can be passed to ``wp.func`` user functions provided
the function also declares the expected array dimension. Currently only single-index slicing is supported.
.. note::
Currently Warp limits arrays to 4 dimensions maximum. This is in addition to the contained datatype, which may be 1-2 dimensional for vector and matrix types such as ``vec3``, and ``mat33``.
The following construction methods are provided for allocating zero-initialized and empty (non-initialized) arrays:
.. autofunction:: zeros
.. autofunction:: zeros_like
.. autofunction:: ones
.. autofunction:: ones_like
.. autofunction:: full
.. autofunction:: full_like
.. autofunction:: empty
.. autofunction:: empty_like
.. autofunction:: copy
.. autofunction:: clone
Matrix Multiplication
#####################
Warp 2D array multiplication is built on NVIDIA's `CUTLASS <https://github.com/NVIDIA/cutlass>`_ library,
which enables fast matrix multiplication of large arrays on the GPU.
If no GPU is detected, matrix multiplication falls back to Numpy's implementation on the CPU.
Matrix multiplication is fully differentiable, and can be recorded on the tape like so::
tape = wp.Tape()
with tape:
wp.matmul(A, B, C, D, device=device)
wp.launch(loss_kernel, dim=(m, n), inputs=[D, loss], device=device)
tape.backward(loss=loss)
A_grad = A.grad.numpy()
Using the ``@`` operator (``D = A @ B``) will default to the same CUTLASS algorithm used in ``wp.matmul``.
.. autofunction:: matmul
.. autofunction:: batched_matmul
Data Types
----------
Scalar Types
############
The following scalar storage types are supported for array structures:
+---------+------------------------+
| bool | boolean |
+---------+------------------------+
| int8 | signed byte |
+---------+------------------------+
| uint8 | unsigned byte |
+---------+------------------------+
| int16 | signed short |
+---------+------------------------+
| uint16 | unsigned short |
+---------+------------------------+
| int32 | signed integer |
+---------+------------------------+
| uint32 | unsigned integer |
+---------+------------------------+
| int64 | signed long integer |
+---------+------------------------+
| uint64 | unsigned long integer |
+---------+------------------------+
| float16 | half-precision float |
+---------+------------------------+
| float32 | single-precision float |
+---------+------------------------+
| float64 | double-precision float |
+---------+------------------------+
Warp supports ``float`` and ``int`` as aliases for ``wp.float32`` and ``wp.int32`` respectively.
.. _vec:
Vectors
#######
Warp provides built-in math and geometry types for common simulation and graphics problems.
A full reference for operators and functions for these types is available in the :doc:`/modules/functions`.
Warp supports vectors of numbers with an arbitrary length/numeric type. The built-in concrete types are as follows:
+-----------------------+------------------------------------------------+
| vec2 vec3 vec4 | 2D, 3D, 4D vector of single-precision floats |
+-----------------------+------------------------------------------------+
| vec2b vec3b vec4b | 2D, 3D, 4D vector of signed bytes |
+-----------------------+------------------------------------------------+
| vec2ub vec3ub vec4ub | 2D, 3D, 4D vector of unsigned bytes |
+-----------------------+------------------------------------------------+
| vec2s vec3s vec4s | 2D, 3D, 4D vector of signed shorts |
+-----------------------+------------------------------------------------+
| vec2us vec3us vec4us | 2D, 3D, 4D vector of unsigned shorts |
+-----------------------+------------------------------------------------+
| vec2i vec3i vec4i | 2D, 3D, 4D vector of signed integers |
+-----------------------+------------------------------------------------+
| vec2ui vec3ui vec4ui | 2D, 3D, 4D vector of unsigned integers |
+-----------------------+------------------------------------------------+
| vec2l vec3l vec4l | 2D, 3D, 4D vector of signed long integers |
+-----------------------+------------------------------------------------+
| vec2ul vec3ul vec4ul | 2D, 3D, 4D vector of unsigned long integers |
+-----------------------+------------------------------------------------+
| vec2h vec3h vec4h | 2D, 3D, 4D vector of half-precision floats |
+-----------------------+------------------------------------------------+
| vec2f vec3f vec4f | 2D, 3D, 4D vector of single-precision floats |
+-----------------------+------------------------------------------------+
| vec2d vec3d vec4d | 2D, 3D, 4D vector of double-precision floats |
+-----------------------+------------------------------------------------+
| spatial_vector | 6D vector of single-precision floats |
+-----------------------+------------------------------------------------+
| spatial_vectorf | 6D vector of single-precision floats |
+-----------------------+------------------------------------------------+
| spatial_vectord | 6D vector of double-precision floats |
+-----------------------+------------------------------------------------+
| spatial_vectorh | 6D vector of half-precision floats |
+-----------------------+------------------------------------------------+
Vectors support most standard linear algebra operations, e.g.: ::
@wp.kernel
def compute( ... ):
# basis vectors
a = wp.vec3(1.0, 0.0, 0.0)
b = wp.vec3(0.0, 1.0, 0.0)
# take the cross product
c = wp.cross(a, b)
# compute
r = wp.dot(c, c)
...
It's possible to declare additional vector types with different lengths and data types. This is done in outside of kernels in *Python scope* using ``warp.types.vector()``, for example: ::
# declare a new vector type for holding 5 double precision floats:
vec5d = wp.types.vector(length=5, dtype=wp.float64)
Once declared, the new type can be used when allocating arrays or inside kernels: ::
# create an array of vec5d
arr = wp.zeros(10, dtype=vec5d)
# use inside a kernel
@wp.kernel
def compute( ... ):
# zero initialize a custom named vector type
v = vec5d()
...
# component-wise initialize a named vector type
v = vec5d(wp.float64(1.0),
wp.float64(2.0),
wp.float64(3.0),
wp.float64(4.0),
wp.float64(5.0))
...
In addition, it's possible to directly create *anonymously* typed instances of these vectors without declaring their type in advance. In this case the type will be inferred by the constructor arguments. For example: ::
@wp.kernel
def compute( ... ):
# zero initialize vector of 5 doubles:
v = wp.vector(dtype=wp.float64, length=5)
# scalar initialize a vector of 5 doubles to the same value:
v = wp.vector(wp.float64(1.0), length=5)
# component-wise initialize a vector of 5 doubles
v = wp.vector(wp.float64(1.0),
wp.float64(2.0),
wp.float64(3.0),
wp.float64(4.0),
wp.float64(5.0))
These can be used with all the standard vector arithmetic operators, e.g.: ``+``, ``-``, scalar multiplication, and can also be transformed using matrices with compatible dimensions, potentially returning vectors with a different length.
.. _mat:
Matrices
########
Matrices with arbitrary shapes/numeric types are also supported. The built-in concrete matrix types are as follows:
+--------------------------+-------------------------------------------------+
| mat22 mat33 mat44 | 2x2, 3x3, 4x4 matrix of single-precision floats |
+--------------------------+-------------------------------------------------+
| mat22f mat33f mat44f | 2x2, 3x3, 4x4 matrix of single-precision floats |
+--------------------------+-------------------------------------------------+
| mat22d mat33d mat44d | 2x2, 3x3, 4x4 matrix of double-precision floats |
+--------------------------+-------------------------------------------------+
| mat22h mat33h mat44h | 2x2, 3x3, 4x4 matrix of half-precision floats |
+--------------------------+-------------------------------------------------+
| spatial_matrix | 6x6 matrix of single-precision floats |
+--------------------------+-------------------------------------------------+
| spatial_matrixf | 6x6 matrix of single-precision floats |
+--------------------------+-------------------------------------------------+
| spatial_matrixd | 6x6 matrix of double-precision floats |
+--------------------------+-------------------------------------------------+
| spatial_matrixh | 6x6 matrix of half-precision floats |
+--------------------------+-------------------------------------------------+
Matrices are stored in row-major format and support most standard linear algebra operations: ::
@wp.kernel
def compute( ... ):
# initialize matrix
m = wp.mat22(1.0, 2.0,
3.0, 4.0)
# compute inverse
minv = wp.inverse(m)
# transform vector
v = minv * wp.vec2(0.5, 0.3)
...
In a similar manner to vectors, it's possible to declare new matrix types with arbitrary shapes and data types using ``wp.types.matrix()``, for example: ::
# declare a new 3x2 half precision float matrix type:
mat32h = wp.types.matrix(shape=(3,2), dtype=wp.float64)
# create an array of this type
a = wp.zeros(10, dtype=mat32h)
These can be used inside a kernel::
@wp.kernel
def compute( ... ):
...
# initialize a mat32h matrix
m = mat32h(wp.float16(1.0), wp.float16(2.0),
wp.float16(3.0), wp.float16(4.0),
wp.float16(5.0), wp.float16(6.0))
# declare a 2 component half precision vector
v2 = wp.vec2h(wp.float16(1.0), wp.float16(1.0))
# multiply by the matrix, returning a 3 component vector:
v3 = m * v2
...
It's also possible to directly create anonymously typed instances inside kernels where the type is inferred from constructor arguments as follows::
@wp.kernel
def compute( ... ):
...
# create a 3x2 half precision matrix from components (row major ordering):
m = wp.matrix(
wp.float16(1.0), wp.float16(2.0),
wp.float16(1.0), wp.float16(2.0),
wp.float16(1.0), wp.float16(2.0),
shape=(3,2))
# zero initialize a 3x2 half precision matrix:
m = wp.matrix(wp.float16(0.0),shape=(3,2))
# create a 5x5 double precision identity matrix:
m = wp.identity(n=5, dtype=wp.float64)
As with vectors, you can do standard matrix arithmetic with these variables, along with multiplying matrices with compatible shapes and potentially returning a matrix with a new shape.
.. _quat:
Quaternions
###########
Warp supports quaternions with the layout ``i, j, k, w`` where ``w`` is the real part. Here are the built-in concrete quaternion types:
+-----------------+--------------------------------------------+
| quat | Single-precision floating point quaternion |
+-----------------+--------------------------------------------+
| quatf | Single-precision floating point quaternion |
+-----------------+--------------------------------------------+
| quatd | Double-precision floating point quaternion |
+-----------------+--------------------------------------------+
| quath | Half-precision floating point quaternion |
+-----------------+--------------------------------------------+
Quaternions can be used to transform vectors as follows::
@wp.kernel
def compute( ... ):
...
# construct a 30 degree rotation around the x-axis
q = wp.quat_from_axis_angle(wp.vec3(1.0, 0.0, 0.0), wp.degrees(30.0))
# rotate an axis by this quaternion
v = wp.quat_rotate(q, wp.vec3(0.0, 1.0, 0.0))
As with vectors and matrices, you can declare quaternion types with an arbitrary numeric type like so::
quatd = wp.types.quaternion(dtype=wp.float64)
You can also create identity quaternion and anonymously typed instances inside a kernel like so::
@wp.kernel
def compute( ... ):
...
# create a double precision identity quaternion:
qd = wp.quat_identity(dtype=wp.float64)
# precision defaults to wp.float32 so this creates a single precision identity quaternion:
qf = wp.quat_identity()
# create a half precision quaternion from components, or a vector/scalar:
qh = wp.quaternion(wp.float16(0.0),
wp.float16(0.0),
wp.float16(0.0),
wp.float16(1.0))
qh = wp.quaternion(
wp.vector(wp.float16(0.0),wp.float16(0.0),wp.float16(0.0)),
wp.float16(1.0))
.. _transform:
Transforms
##########
Transforms are 7D vectors of floats representing a spatial rigid body transformation in format (p, q) where p is a 3D vector, and q is a quaternion.
+-----------------+--------------------------------------------+
| transform | Single-precision floating point transform |
+-----------------+--------------------------------------------+
| transformf | Single-precision floating point transform |
+-----------------+--------------------------------------------+
| transformd | Double-precision floating point transform |
+-----------------+--------------------------------------------+
| transformh | Half-precision floating point transform |
+-----------------+--------------------------------------------+
Transforms can be constructed inside kernels from translation and rotation parts::
@wp.kernel
def compute( ... ):
...
# create a transform from a vector/quaternion:
t = wp.transform(
wp.vec3(1.0, 2.0, 3.0),
wp.quat_from_axis_angle(wp.vec3(0.0, 1.0, 0.0), wp.degrees(30.0)))
# transform a point
p = wp.transform_point(t, wp.vec3(10.0, 0.5, 1.0))
# transform a vector (ignore translation)
p = wp.transform_vector(t, wp.vec3(10.0, 0.5, 1.0))
As with vectors and matrices, you can declare transform types with an arbitrary numeric type using ``wp.types.transformation()``, for example::
transformd = wp.types.transformation(dtype=wp.float64)
You can also create identity transforms and anonymously typed instances inside a kernel like so::
@wp.kernel
def compute( ... ):
# create double precision identity transform:
qd = wp.transform_identity(dtype=wp.float64)
.. _Structs:
Structs
#######
Users can define custom structure types using the ``@wp.struct`` decorator as follows::
@wp.struct
class MyStruct:
param1: int
param2: float
param3: wp.array(dtype=wp.vec3)
Struct attributes must be annotated with their respective type. They can be constructed in Python scope and then passed to kernels as arguments::
@wp.kernel
def compute(args: MyStruct):
tid = wp.tid()
print(args.param1)
print(args.param2)
print(args.param3[tid])
# construct an instance of the struct in Python
s = MyStruct()
s.param1 = 10
s.param2 = 2.5
s.param3 = wp.zeros(shape=10, dtype=wp.vec3)
# pass to our compute kernel
wp.launch(compute, dim=10, inputs=[s])
An array of structs can be zero-initialized as follows::
a = wp.zeros(shape=10, dtype=MyStruct)
An array of structs can also be initialized from a list of struct objects::
a = wp.array([MyStruct(), MyStruct(), MyStruct()], dtype=MyStruct)
Example: Using a struct in gradient computation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: python
import numpy as np
import warp as wp
@wp.struct
class TestStruct:
x: wp.vec3
a: wp.array(dtype=wp.vec3)
b: wp.array(dtype=wp.vec3)
@wp.kernel
def test_kernel(s: TestStruct):
tid = wp.tid()
s.b[tid] = s.a[tid] + s.x
@wp.kernel
def loss_kernel(s: TestStruct, loss: wp.array(dtype=float)):
tid = wp.tid()
v = s.b[tid]
wp.atomic_add(loss, 0, float(tid + 1) * (v[0] + 2.0 * v[1] + 3.0 * v[2]))
# create struct
ts = TestStruct()
# set members
ts.x = wp.vec3(1.0, 2.0, 3.0)
ts.a = wp.array(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), dtype=wp.vec3, requires_grad=True)
ts.b = wp.zeros(2, dtype=wp.vec3, requires_grad=True)
loss = wp.zeros(1, dtype=float, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(test_kernel, dim=2, inputs=[ts])
wp.launch(loss_kernel, dim=2, inputs=[ts, loss])
tape.backward(loss)
print(loss)
print(ts.a)
Type Conversions
################
Warp is particularly strict regarding type conversions and does not perform *any* implicit conversion between numeric types.
The user is responsible for ensuring types for most arithmetic operators match, e.g.: ``x = float(0.0) + int(4)`` will result in an error.
This can be surprising for users that are accustomed to C-style conversions but avoids a class of common bugs that result from implicit conversions.
.. note::
Warp does not currently perform implicit type conversions between numeric types.
Users should explicitly cast variables to compatible types using constructors like
``int()``, ``float()``, ``wp.float16()``, ``wp.uint8()``, etc.
Constants
---------
In general, Warp kernels cannot access variables in the global Python interpreter state. One exception to this is for compile-time constants, which may be declared globally (or as class attributes) and folded into the kernel definition.
Constants are defined using the ``wp.constant()`` function. An example is shown below::
TYPE_SPHERE = wp.constant(0)
TYPE_CUBE = wp.constant(1)
TYPE_CAPSULE = wp.constant(2)
@wp.kernel
def collide(geometry: wp.array(dtype=int)):
t = geometry[wp.tid()]
if (t == TYPE_SPHERE):
print("sphere")
if (t == TYPE_CUBE):
print("cube")
if (t == TYPE_CAPSULE):
print("capsule")
.. autoclass:: constant
Predefined Constants
####################
For convenience, Warp has a number of predefined mathematical constants that
may be used both inside and outside Warp kernels.
The constants in the following table also have lowercase versions defined,
e.g. ``wp.E`` and ``wp.e`` are equivalent.
================ =========================
Name Value
================ =========================
wp.E 2.71828182845904523536
wp.LOG2E 1.44269504088896340736
wp.LOG10E 0.43429448190325182765
wp.LN2 0.69314718055994530942
wp.LN10 2.30258509299404568402
wp.PHI 1.61803398874989484820
wp.PI 3.14159265358979323846
wp.HALF_PI 1.57079632679489661923
wp.TAU 6.28318530717958647692
wp.INF math.inf
wp.NAN float('nan')
================ =========================
The ``wp.NAN`` constant may only be used with floating-point types.
Comparisons involving ``wp.NAN`` follow the IEEE 754 standard,
e.g. ``wp.float32(wp.NAN) == wp.float32(wp.NAN)`` returns ``False``.
The :func:`wp.isnan() <isnan>` built-in function can be used to determine whether a
value is a NaN (or if a vector, matrix, or quaternion contains a NaN entry).
The following example shows how positive and negative infinity
can be used with floating-point types in Warp using the ``wp.inf`` constant:
.. code-block:: python
@wp.kernel
def test_infinity(outputs: wp.array(dtype=wp.float32)):
outputs[0] = wp.float32(wp.inf) # inf
outputs[1] = wp.float32(-wp.inf) # -inf
outputs[2] = wp.float32(2.0 * wp.inf) # inf
outputs[3] = wp.float32(-2.0 * wp.inf) # -inf
outputs[4] = wp.float32(2.0 / 0.0) # inf
outputs[5] = wp.float32(-2.0 / 0.0) # -inf
Operators
----------
Boolean Operators
#################
+--------------+--------------------------------------+
| a and b | True if a and b are True |
+--------------+--------------------------------------+
| a or b | True if a or b is True |
+--------------+--------------------------------------+
| not a | True if a is False, otherwise False |
+--------------+--------------------------------------+
.. note::
Expressions such as ``if (a and b):`` currently do not perform short-circuit evaluation.
In this case ``b`` will also be evaluated even when ``a`` is ``False``.
Users should take care to ensure that secondary conditions are safe to evaluate (e.g.: do not index out of bounds) in all cases.
Comparison Operators
####################
+----------+---------------------------------------+
| a > b | True if a strictly greater than b |
+----------+---------------------------------------+
| a < b | True if a strictly less than b |
+----------+---------------------------------------+
| a >= b | True if a greater than or equal to b |
+----------+---------------------------------------+
| a <= b | True if a less than or equal to b |
+----------+---------------------------------------+
| a == b | True if a equals b |
+----------+---------------------------------------+
| a != b | True if a not equal to b |
+----------+---------------------------------------+
Arithmetic Operators
####################
+-----------+--------------------------+
| a + b | Addition |
+-----------+--------------------------+
| a - b | Subtraction |
+-----------+--------------------------+
| a * b | Multiplication |
+-----------+--------------------------+
| a / b | Floating point division |
+-----------+--------------------------+
| a // b | Floored division |
+-----------+--------------------------+
| a ** b | Exponentiation |
+-----------+--------------------------+
| a % b | Modulus |
+-----------+--------------------------+
.. note::
Since implicit conversions are not performed arguments types to operators should match.
Users should use type constructors, e.g.: ``float()``, ``int()``, ``wp.int64()``, etc. to cast variables
to the correct type. Also note that the multiplication expression ``a * b`` is used to represent scalar
multiplication and matrix multiplication. The ``@`` operator is not currently supported.
Graphs
-----------
Launching kernels from Python introduces significant additional overhead compared to C++ or native programs.
To address this, Warp exposes the concept of `CUDA graphs <https://developer.nvidia.com/blog/cuda-graphs/>`_
to allow recording large batches of kernels and replaying them with very little CPU overhead.
To record a series of kernel launches use the :func:`wp.capture_begin() <capture_begin>` and
:func:`wp.capture_end() <capture_end>` API as follows:
.. code:: python
# begin capture
wp.capture_begin(device="cuda")
try:
# record launches
for i in range(100):
wp.launch(kernel=compute1, inputs=[a, b], device="cuda")
finally:
# end capture and return a graph object
graph = wp.capture_end(device="cuda")
We strongly recommend the use of the the try-finally pattern when capturing graphs because the `finally`
statement will ensure :func:`wp.capture_end <capture_end>` gets called, even if an exception occurs during
capture, which would otherwise trap the stream in a capturing state.
Once a graph has been constructed it can be executed: ::
wp.capture_launch(graph)
The :class:`wp.ScopedCapture <ScopedCapture>` context manager can be used to simplify the code and
ensure that :func:`wp.capture_end <capture_end>` is called regardless of exceptions:
.. code:: python
with wp.ScopedCapture(device="cuda") as capture:
# record launches
for i in range(100):
wp.launch(kernel=compute1, inputs=[a, b], device="cuda")
wp.capture_launch(capture.graph)
Note that only launch calls are recorded in the graph, any Python executed outside of the kernel code will not be recorded.
Typically it is only beneficial to use CUDA graphs when the graph will be reused or launched multiple times.
.. autofunction:: capture_begin
.. autofunction:: capture_end
.. autofunction:: capture_launch
.. autoclass:: ScopedCapture
:members:
Meshes
------
Warp provides a ``wp.Mesh`` class to manage triangle mesh data. To create a mesh users provide a points, indices and optionally a velocity array::
mesh = wp.Mesh(points, indices, velocities)
.. note::
Mesh objects maintain references to their input geometry buffers. All buffers should live on the same device.
Meshes can be passed to kernels using their ``id`` attribute which uniquely identifies the mesh by a unique ``uint64`` value.
Once inside a kernel you can perform geometric queries against the mesh such as ray-casts or closest point lookups::
@wp.kernel
def raycast(mesh: wp.uint64,
ray_origin: wp.array(dtype=wp.vec3),
ray_dir: wp.array(dtype=wp.vec3),
ray_hit: wp.array(dtype=wp.vec3)):
tid = wp.tid()
t = float(0.0) # hit distance along ray
u = float(0.0) # hit face barycentric u
v = float(0.0) # hit face barycentric v
sign = float(0.0) # hit face sign
n = wp.vec3() # hit face normal
f = int(0) # hit face index
color = wp.vec3()
# ray cast against the mesh
if wp.mesh_query_ray(mesh, ray_origin[tid], ray_dir[tid], 1.e+6, t, u, v, sign, n, f):
# if we got a hit then set color to the face normal
color = n*0.5 + wp.vec3(0.5, 0.5, 0.5)
ray_hit[tid] = color
Users may update mesh vertex positions at runtime simply by modifying the points buffer.
After modifying point locations users should call ``Mesh.refit()`` to rebuild the bounding volume hierarchy (BVH) structure and ensure that queries work correctly.
.. note::
Updating Mesh topology (indices) at runtime is not currently supported. Users should instead recreate a new Mesh object.
.. autoclass:: Mesh
:members:
Hash Grids
----------
Many particle-based simulation methods such as the Discrete Element Method (DEM), or Smoothed Particle Hydrodynamics (SPH), involve iterating over spatial neighbors to compute force interactions. Hash grids are a well-established data structure to accelerate these nearest neighbor queries, and particularly well-suited to the GPU.
To support spatial neighbor queries Warp provides a ``HashGrid`` object that may be created as follows::
grid = wp.HashGrid(dim_x=128, dim_y=128, dim_z=128, device="cuda")
grid.build(points=p, radius=r)
``p`` is an array of ``wp.vec3`` point positions, and ``r`` is the radius to use when building the grid.
Neighbors can then be iterated over inside the kernel code using :func:`wp.hash_grid_query() <hash_grid_query>`
and :func:`wp.hash_grid_query_next() <hash_grid_query_next>` as follows:
.. code:: python
@wp.kernel
def sum(grid : wp.uint64,
points: wp.array(dtype=wp.vec3),
output: wp.array(dtype=wp.vec3),
radius: float):
tid = wp.tid()
# query point
p = points[tid]
# create grid query around point
query = wp.hash_grid_query(grid, p, radius)
index = int(0)
sum = wp.vec3()
while(wp.hash_grid_query_next(query, index)):
neighbor = points[index]
# compute distance to neighbor point
dist = wp.length(p-neighbor)
if (dist <= radius):
sum += neighbor
output[tid] = sum
.. note::
The ``HashGrid`` query will give back all points in *cells* that fall inside the query radius.
When there are hash conflicts it means that some points outside of query radius will be returned, and users should
check the distance themselves inside their kernels. The reason the query doesn't do the check itself for each
returned point is because it's common for kernels to compute the distance themselves, so it would redundant to
check/compute the distance twice.
.. autoclass:: HashGrid
:members:
Volumes
-------
Sparse volumes are incredibly useful for representing grid data over large domains, such as signed distance fields
(SDFs) for complex objects, or velocities for large-scale fluid flow. Warp supports reading sparse volumetric grids
stored using the `NanoVDB <https://developer.nvidia.com/nanovdb>`_ standard. Users can access voxels directly
or use built-in closest-point or trilinear interpolation to sample grid data from world or local space.
Volume objects can be created directly from Warp arrays containing a NanoVDB grid, from the contents of a
standard ``.nvdb`` file using :func:`load_from_nvdb() <warp.Volume.load_from_nvdb>`,
from an uncompressed in-memory buffer using :func:`load_from_address() <warp.Volume.load_from_address>`,
or from a dense 3D NumPy array using :func:`load_from_numpy() <warp.Volume.load_from_numpy>`.
Volumes can also be created using :func:`allocate() <warp.Volume.allocate>`,
:func:`allocate_by_tiles() <warp.Volume.allocate_by_tiles>` or :func:`allocate_by_voxels() <warp.Volume.allocate_by_voxels>`.
The values for a Volume object can be modified in a Warp kernel using :func:`wp.volume_store() <warp.volume_store>`.
.. note::
Warp does not currently support modifying the topology of sparse volumes at runtime.
Below we give an example of creating a Volume object from an existing NanoVDB file::
# open NanoVDB file on disk
file = open("mygrid.nvdb", "rb")
# create Volume object
volume = wp.Volume.load_from_nvdb(file, device="cpu")
.. note::
Files written by the NanoVDB library, commonly marked by the ``.nvdb`` extension, can contain multiple grids with
various compression methods, but a :class:`Volume` object represents a single NanoVDB grid.
The first grid is loaded by default, then Warp volumes corresponding to the other grids in the file can be created
using repeated calls to :func:`load_next_grid() <warp.Volume.load_next_grid>`.
NanoVDB's uncompressed and zip-compressed file formats are supported out-of-the-box, blosc compressed files require
the `blosc` Python package to be installed.
To sample the volume inside a kernel we pass a reference to it by ID, and use the built-in sampling modes::
@wp.kernel
def sample_grid(volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
samples: wp.array(dtype=float)):
tid = wp.tid()
# load sample point in world-space
p = points[tid]
# transform position to the volume's local-space
q = wp.volume_world_to_index(volume, p)
# sample volume with trilinear interpolation
f = wp.volume_sample(volume, q, wp.Volume.LINEAR, dtype=float)
# write result
samples[tid] = f
Warp also supports NanoVDB index grids, which provide a memory-efficient linearization of voxel indices that can refer
to values in arbitrarily shaped arrays::
@wp.kernel
def sample_index_grid(volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
voxel_values: wp.array(dtype=Any)):
tid = wp.tid()
# load sample point in world-space
p = points[tid]
# transform position to the volume's local-space
q = wp.volume_world_to_index(volume, p)
# sample volume with trilinear interpolation
background_value = voxel_values.dtype(0.0)
f = wp.volume_sample_index(volume, q, wp.Volume.LINEAR, voxel_values, background_value)
The coordinates of all indexable voxels can be recovered using :func:`get_voxels() <warp.Volume.get_voxels>`.
NanoVDB grids may also contains embedded *blind* data arrays; those can be accessed with the
:func:`feature_array() <warp.Volume.feature_array>` function.
.. autoclass:: Volume
:members:
:undoc-members:
.. seealso:: `Reference <functions.html#volumes>`__ for the volume functions available in kernels.
Bounding Value Hierarchies (BVH)
--------------------------------
The :class:`wp.Bvh <Bvh>` class can be used to create a BVH for a group of bounding volumes. This object can then be traversed
to determine which parts are intersected by a ray using :func:`bvh_query_ray` and which parts are fully contained
within a certain bounding volume using :func:`bvh_query_aabb`.
The following snippet demonstrates how to create a :class:`wp.Bvh <Bvh>` object from 100 random bounding volumes:
.. code:: python
rng = np.random.default_rng(123)
num_bounds = 100
lowers = rng.random(size=(num_bounds, 3)) * 5.0
uppers = lowers + rng.random(size=(num_bounds, 3)) * 5.0
device_lowers = wp.array(lowers, dtype=wp.vec3, device="cuda:0")
device_uppers = wp.array(uppers, dtype=wp.vec3, device="cuda:0")
bvh = wp.Bvh(device_lowers, device_uppers)
.. autoclass:: Bvh
:members:
Example: BVH Ray Traversal
##########################
An example of performing a ray traversal on the data structure is as follows:
.. code:: python
@wp.kernel
def bvh_query_ray(
bvh_id: wp.uint64,
start: wp.vec3,
dir: wp.vec3,
bounds_intersected: wp.array(dtype=wp.bool),
):
query = wp.bvh_query_ray(bvh_id, start, dir)
bounds_nr = wp.int32(0)
while wp.bvh_query_next(query, bounds_nr):
# The ray intersects the volume with index bounds_nr
bounds_intersected[bounds_nr] = True
bounds_intersected = wp.zeros(shape=(num_bounds), dtype=wp.bool, device="cuda:0")
query_start = wp.vec3(0.0, 0.0, 0.0)
query_dir = wp.normalize(wp.vec3(1.0, 1.0, 1.0))
wp.launch(
kernel=bvh_query_ray,
dim=1,
inputs=[bvh.id, query_start, query_dir, bounds_intersected],
device="cuda:0",
)
The Warp kernel ``bvh_query_ray`` is launched with a single thread, provided the unique :class:`uint64`
identifier of the :class:`wp.Bvh <Bvh>` object, parameters describing the ray, and an array to store the results.
In ``bvh_query_ray``, :func:`wp.bvh_query_ray() <bvh_query_ray>` is called once to obtain an object that is stored in the
variable ``query``. An integer is also allocated as ``bounds_nr`` to store the volume index of the traversal.
A while statement is used for the actual traversal using :func:`wp.bvh_query_next() <bvh_query_next>`,
which returns ``True`` as long as there are intersecting bounds.
Example: BVH Volume Traversal
#############################
Similar to the ray-traversal example, we can perform volume traversal to find the volumes that are fully contained
within a specified bounding box.
.. code:: python
@wp.kernel
def bvh_query_aabb(
bvh_id: wp.uint64,
lower: wp.vec3,
upper: wp.vec3,
bounds_intersected: wp.array(dtype=wp.bool),
):
query = wp.bvh_query_aabb(bvh_id, lower, upper)
bounds_nr = wp.int32(0)
while wp.bvh_query_next(query, bounds_nr):
# The volume with index bounds_nr is fully contained
# in the (lower,upper) bounding box
bounds_intersected[bounds_nr] = True
bounds_intersected = wp.zeros(shape=(num_bounds), dtype=wp.bool, device="cuda:0")
query_lower = wp.vec3(4.0, 4.0, 4.0)
query_upper = wp.vec3(6.0, 6.0, 6.0)
wp.launch(
kernel=bvh_query_aabb,
dim=1,
inputs=[bvh.id, query_lower, query_upper, bounds_intersected],
device="cuda:0",
)
The kernel is nearly identical to the ray-traversal example, except we obtain ``query`` using
:func:`wp.bvh_query_aabb() <bvh_query_aabb>`.
Marching Cubes
--------------
The :class:`wp.MarchingCubes <MarchingCubes>` class can be used to extract a 2-D mesh approximating an
isosurface of a 3-D scalar field. The resulting triangle mesh can be saved to a USD
file using the :class:`warp.renderer.UsdRenderer`.
See :github:`warp/examples/core/example_marching_cubes.py` for a usage example.
.. autoclass:: MarchingCubes
:members:
Profiling
---------
``wp.ScopedTimer`` objects can be used to gain some basic insight into the performance of Warp applications:
.. code:: python
with wp.ScopedTimer("grid build"):
self.grid.build(self.x, self.point_radius)
This results in a printout at runtime to the standard output stream like:
.. code:: console
grid build took 0.06 ms
See :doc:`../profiling` documentation for more information.
.. autoclass:: warp.ScopedTimer
:noindex:
| 44,063 | reStructuredText | 35.997481 | 331 | 0.594172 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.