file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVIDIA/warp/warp/fem/integrate.py | import ast
from typing import Any, Dict, List, Optional, Set, Union
import warp as wp
from warp.codegen import get_annotations
from warp.fem import cache
from warp.fem.domain import GeometryDomain
from warp.fem.field import (
DiscreteField,
FieldLike,
FieldRestriction,
SpaceField,
TestField,
TrialField,
make_restriction,
)
from warp.fem.operator import Integrand, Operator
from warp.fem.quadrature import Quadrature, RegularQuadrature
from warp.fem.types import NULL_DOF_INDEX, OUTSIDE, DofIndex, Domain, Field, Sample, make_free_sample
from warp.sparse import BsrMatrix, bsr_set_from_triplets, bsr_zeros
from warp.types import type_length
from warp.utils import array_cast
def _resolve_path(func, node):
"""
Resolves variable and path from ast node/attribute (adapted from warp.codegen)
"""
modules = []
while isinstance(node, ast.Attribute):
modules.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
modules.append(node.id)
# reverse list since ast presents it backward order
path = [*reversed(modules)]
if len(path) == 0:
return None, path
# try and evaluate object path
try:
# Look up the closure info and append it to adj.func.__globals__
# in case you want to define a kernel inside a function and refer
# to variables you've declared inside that function:
capturedvars = dict(zip(func.__code__.co_freevars, [c.cell_contents for c in (func.__closure__ or [])]))
vars_dict = {**func.__globals__, **capturedvars}
func = eval(".".join(path), vars_dict)
return func, path
except (NameError, AttributeError):
pass
return None, path
def _path_to_ast_attribute(name: str) -> ast.Attribute:
path = name.split(".")
path.reverse()
node = ast.Name(id=path.pop(), ctx=ast.Load())
while len(path):
node = ast.Attribute(
value=node,
attr=path.pop(),
ctx=ast.Load(),
)
return node
class IntegrandTransformer(ast.NodeTransformer):
def __init__(self, integrand: Integrand, field_args: Dict[str, FieldLike]):
self._integrand = integrand
self._field_args = field_args
def visit_Call(self, call: ast.Call):
call = self.generic_visit(call)
callee = getattr(call.func, "id", None)
if callee in self._field_args:
# Shortcut for evaluating fields as f(x...)
field = self._field_args[callee]
arg_type = self._integrand.argspec.annotations[callee]
operator = arg_type.call_operator
call.func = ast.Attribute(
value=_path_to_ast_attribute(f"{arg_type.__module__}.{arg_type.__qualname__}"),
attr="call_operator",
ctx=ast.Load(),
)
call.args = [ast.Name(id=callee, ctx=ast.Load())] + call.args
self._replace_call_func(call, operator, field)
return call
func, _ = _resolve_path(self._integrand.func, call.func)
if isinstance(func, Operator) and len(call.args) > 0:
# Evaluating operators as op(field, x, ...)
callee = getattr(call.args[0], "id", None)
if callee in self._field_args:
field = self._field_args[callee]
self._replace_call_func(call, func, field)
if isinstance(func, Integrand):
key = self._translate_callee(func, call.args)
call.func = ast.Attribute(
value=call.func,
attr=key,
ctx=ast.Load(),
)
# print(ast.dump(call, indent=4))
return call
def _replace_call_func(self, call: ast.Call, operator: Operator, field: FieldLike):
try:
pointer = operator.resolver(field)
setattr(operator, pointer.key, pointer)
except AttributeError as e:
raise ValueError(f"Operator {operator.func.__name__} is not defined for field {field.name}") from e
call.func = ast.Attribute(value=call.func, attr=pointer.key, ctx=ast.Load())
def _translate_callee(self, callee: Integrand, args: List[ast.AST]):
# Get field types for call site arguments
call_site_field_args = []
for arg in args:
name = getattr(arg, "id", None)
if name in self._field_args:
call_site_field_args.append(self._field_args[name])
call_site_field_args.reverse()
# Pass to callee in same order
callee_field_args = {}
for arg in callee.argspec.args:
arg_type = callee.argspec.annotations[arg]
if arg_type in (Field, Domain):
callee_field_args[arg] = call_site_field_args.pop()
return _translate_integrand(callee, callee_field_args).key
def _translate_integrand(integrand: Integrand, field_args: Dict[str, FieldLike]) -> wp.Function:
# Specialize field argument types
argspec = integrand.argspec
annotations = {}
for arg in argspec.args:
arg_type = argspec.annotations[arg]
if arg_type == Field:
annotations[arg] = field_args[arg].ElementEvalArg
elif arg_type == Domain:
annotations[arg] = field_args[arg].ElementArg
else:
annotations[arg] = arg_type
# Transform field evaluation calls
transformer = IntegrandTransformer(integrand, field_args)
suffix = "_".join([f.name for f in field_args.values()])
func = cache.get_integrand_function(
integrand=integrand,
suffix=suffix,
annotations=annotations,
code_transformers=[transformer],
)
key = func.key
setattr(integrand, key, integrand.module.functions[key])
return getattr(integrand, key)
def _get_integrand_field_arguments(
integrand: Integrand,
fields: Dict[str, FieldLike],
domain: GeometryDomain = None,
):
# parse argument types
field_args = {}
value_args = {}
domain_name = None
sample_name = None
argspec = integrand.argspec
for arg in argspec.args:
arg_type = argspec.annotations[arg]
if arg_type == Field:
if arg not in fields:
raise ValueError(f"Missing field for argument '{arg}' of integrand '{integrand.name}'")
field_args[arg] = fields[arg]
elif arg_type == Domain:
domain_name = arg
field_args[arg] = domain
elif arg_type == Sample:
sample_name = arg
else:
value_args[arg] = arg_type
return field_args, value_args, domain_name, sample_name
def _check_field_compat(
integrand: Integrand,
fields: Dict[str, FieldLike],
field_args: Dict[str, FieldLike],
domain: GeometryDomain = None,
):
# Check field compatilibity
for name, field in fields.items():
if name not in field_args:
raise ValueError(
f"Passed field argument '{name}' does not match any parameter of integrand '{integrand.name}'"
)
if isinstance(field, SpaceField) and domain is not None:
space = field.space
if space.geometry != domain.geometry:
raise ValueError(f"Field '{name}' must be defined on the same geometry as the integration domain")
if space.dimension != domain.dimension:
raise ValueError(
f"Field '{name}' dimension ({space.dimension}) does not match that of the integration domain ({domain.dimension}). Maybe a forgotten `.trace()`?"
)
def _populate_value_struct(ValueStruct: wp.codegen.Struct, values: Dict[str, Any], integrand_name: str):
value_struct_values = ValueStruct()
for k, v in values.items():
try:
setattr(value_struct_values, k, v)
except Exception as err:
if k not in ValueStruct.vars:
raise ValueError(
f"Passed value argument '{k}' does not match any of the integrand '{integrand_name}' parameters"
) from err
raise ValueError(
f"Passed value argument '{k}' of type '{wp.types.type_repr(v)}' is incompatible with the integrand '{integrand_name}' parameter of type '{wp.types.type_repr(ValueStruct.vars[k].type)}'"
) from err
missing_values = ValueStruct.vars.keys() - values.keys()
if missing_values:
wp.utils.warn(
f"Missing values for parameter(s) '{', '.join(missing_values)}' of the integrand '{integrand_name}', will be zero-initialized"
)
return value_struct_values
def _get_test_and_trial_fields(
fields: Dict[str, FieldLike],
):
test = None
trial = None
test_name = None
trial_name = None
for name, field in fields.items():
if not isinstance(field, FieldLike):
raise ValueError(f"Passed field argument '{name}' is not a proper Field")
if isinstance(field, TestField):
if test is not None:
raise ValueError(f"More than one test field argument: '{test_name}' and '{name}'")
test = field
test_name = name
elif isinstance(field, TrialField):
if trial is not None:
raise ValueError(f"More than one trial field argument: '{trial_name}' and '{name}'")
trial = field
trial_name = name
if trial is not None:
if test is None:
raise ValueError("A trial field cannot be provided without a test field")
if test.domain != trial.domain:
raise ValueError("Incompatible test and trial domains")
return test, test_name, trial, trial_name
def _gen_field_struct(field_args: Dict[str, FieldLike]):
class Fields:
pass
annotations = get_annotations(Fields)
for name, arg in field_args.items():
if isinstance(arg, GeometryDomain):
continue
setattr(Fields, name, arg.EvalArg())
annotations[name] = arg.EvalArg
try:
Fields.__annotations__ = annotations
except AttributeError:
Fields.__dict__.__annotations__ = annotations
suffix = "_".join([f"{name}_{arg_struct.cls.__qualname__}" for name, arg_struct in annotations.items()])
return cache.get_struct(Fields, suffix=suffix)
def _gen_value_struct(value_args: Dict[str, type]):
class Values:
pass
annotations = get_annotations(Values)
for name, arg_type in value_args.items():
setattr(Values, name, None)
annotations[name] = arg_type
def arg_type_name(arg_type):
if isinstance(arg_type, wp.codegen.Struct):
return arg_type_name(arg_type.cls)
return getattr(arg_type, "__name__", str(arg_type))
def arg_type_name(arg_type):
if isinstance(arg_type, wp.codegen.Struct):
return arg_type_name(arg_type.cls)
return getattr(arg_type, "__name__", str(arg_type))
try:
Values.__annotations__ = annotations
except AttributeError:
Values.__dict__.__annotations__ = annotations
suffix = "_".join([f"{name}_{arg_type_name(arg_type)}" for name, arg_type in annotations.items()])
return cache.get_struct(Values, suffix=suffix)
def _get_trial_arg():
pass
def _get_test_arg():
pass
class _FieldWrappers:
pass
def _register_integrand_field_wrappers(integrand_func: wp.Function, fields: Dict[str, FieldLike]):
integrand_func._field_wrappers = _FieldWrappers()
for name, field in fields.items():
setattr(integrand_func._field_wrappers, name, field.ElementEvalArg)
class PassFieldArgsToIntegrand(ast.NodeTransformer):
def __init__(
self,
arg_names: List[str],
field_args: Set[str],
value_args: Set[str],
sample_name: str,
domain_name: str,
test_name: str = None,
trial_name: str = None,
func_name: str = "integrand_func",
fields_var_name: str = "fields",
values_var_name: str = "values",
domain_var_name: str = "domain_arg",
sample_var_name: str = "sample",
field_wrappers_attr: str = "_field_wrappers",
):
self._arg_names = arg_names
self._field_args = field_args
self._value_args = value_args
self._domain_name = domain_name
self._sample_name = sample_name
self._func_name = func_name
self._test_name = test_name
self._trial_name = trial_name
self._fields_var_name = fields_var_name
self._values_var_name = values_var_name
self._domain_var_name = domain_var_name
self._sample_var_name = sample_var_name
self._field_wrappers_attr = field_wrappers_attr
def visit_Call(self, call: ast.Call):
call = self.generic_visit(call)
callee = getattr(call.func, "id", None)
if callee == self._func_name:
# Replace function arguments with ours generated structs
call.args.clear()
for arg in self._arg_names:
if arg == self._domain_name:
call.args.append(
ast.Name(id=self._domain_var_name, ctx=ast.Load()),
)
elif arg == self._sample_name:
call.args.append(
ast.Name(id=self._sample_var_name, ctx=ast.Load()),
)
elif arg in self._field_args:
call.args.append(
ast.Call(
func=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id=self._func_name, ctx=ast.Load()),
attr=self._field_wrappers_attr,
ctx=ast.Load(),
),
attr=arg,
ctx=ast.Load(),
),
args=[
ast.Name(id=self._domain_var_name, ctx=ast.Load()),
ast.Attribute(
value=ast.Name(id=self._fields_var_name, ctx=ast.Load()),
attr=arg,
ctx=ast.Load(),
),
],
keywords=[],
)
)
elif arg in self._value_args:
call.args.append(
ast.Attribute(
value=ast.Name(id=self._values_var_name, ctx=ast.Load()),
attr=arg,
ctx=ast.Load(),
)
)
else:
raise RuntimeError(f"Unhandled argument {arg}")
# print(ast.dump(call, indent=4))
elif callee == _get_test_arg.__name__:
# print(ast.dump(call, indent=4))
call = ast.Attribute(
value=ast.Name(id=self._fields_var_name, ctx=ast.Load()),
attr=self._test_name,
ctx=ast.Load(),
)
elif callee == _get_trial_arg.__name__:
# print(ast.dump(call, indent=4))
call = ast.Attribute(
value=ast.Name(id=self._fields_var_name, ctx=ast.Load()),
attr=self._trial_name,
ctx=ast.Load(),
)
return call
def get_integrate_constant_kernel(
integrand_func: wp.Function,
domain: GeometryDomain,
quadrature: Quadrature,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
accumulate_dtype,
):
def integrate_kernel_fn(
qp_arg: quadrature.Arg,
domain_arg: domain.ElementArg,
domain_index_arg: domain.ElementIndexArg,
fields: FieldStruct,
values: ValueStruct,
result: wp.array(dtype=accumulate_dtype),
):
element_index = domain.element_index(domain_index_arg, wp.tid())
elem_sum = accumulate_dtype(0.0)
test_dof_index = NULL_DOF_INDEX
trial_dof_index = NULL_DOF_INDEX
qp_point_count = quadrature.point_count(domain_arg, qp_arg, element_index)
for k in range(qp_point_count):
qp_index = quadrature.point_index(domain_arg, qp_arg, element_index, k)
coords = quadrature.point_coords(domain_arg, qp_arg, element_index, k)
qp_weight = quadrature.point_weight(domain_arg, qp_arg, element_index, k)
sample = Sample(element_index, coords, qp_index, qp_weight, test_dof_index, trial_dof_index)
vol = domain.element_measure(domain_arg, sample)
val = integrand_func(sample, fields, values)
elem_sum += accumulate_dtype(qp_weight * vol * val)
wp.atomic_add(result, 0, elem_sum)
return integrate_kernel_fn
def get_integrate_linear_kernel(
integrand_func: wp.Function,
domain: GeometryDomain,
quadrature: Quadrature,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
test: TestField,
output_dtype,
accumulate_dtype,
):
def integrate_kernel_fn(
qp_arg: quadrature.Arg,
domain_arg: domain.ElementArg,
domain_index_arg: domain.ElementIndexArg,
test_arg: test.space_restriction.NodeArg,
fields: FieldStruct,
values: ValueStruct,
result: wp.array2d(dtype=output_dtype),
):
local_node_index, test_dof = wp.tid()
node_index = test.space_restriction.node_partition_index(test_arg, local_node_index)
element_count = test.space_restriction.node_element_count(test_arg, local_node_index)
trial_dof_index = NULL_DOF_INDEX
val_sum = accumulate_dtype(0.0)
for n in range(element_count):
node_element_index = test.space_restriction.node_element_index(test_arg, local_node_index, n)
element_index = domain.element_index(domain_index_arg, node_element_index.domain_element_index)
test_dof_index = DofIndex(node_element_index.node_index_in_element, test_dof)
qp_point_count = quadrature.point_count(domain_arg, qp_arg, element_index)
for k in range(qp_point_count):
qp_index = quadrature.point_index(domain_arg, qp_arg, element_index, k)
qp_coords = quadrature.point_coords(domain_arg, qp_arg, element_index, k)
qp_weight = quadrature.point_weight(domain_arg, qp_arg, element_index, k)
vol = domain.element_measure(domain_arg, make_free_sample(element_index, qp_coords))
sample = Sample(element_index, qp_coords, qp_index, qp_weight, test_dof_index, trial_dof_index)
val = integrand_func(sample, fields, values)
val_sum += accumulate_dtype(qp_weight * vol * val)
result[node_index, test_dof] = output_dtype(val_sum)
return integrate_kernel_fn
def get_integrate_linear_nodal_kernel(
integrand_func: wp.Function,
domain: GeometryDomain,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
test: TestField,
output_dtype,
accumulate_dtype,
):
def integrate_kernel_fn(
domain_arg: domain.ElementArg,
domain_index_arg: domain.ElementIndexArg,
test_restriction_arg: test.space_restriction.NodeArg,
fields: FieldStruct,
values: ValueStruct,
result: wp.array2d(dtype=output_dtype),
):
local_node_index, dof = wp.tid()
node_index = test.space_restriction.node_partition_index(test_restriction_arg, local_node_index)
element_count = test.space_restriction.node_element_count(test_restriction_arg, local_node_index)
trial_dof_index = NULL_DOF_INDEX
val_sum = accumulate_dtype(0.0)
for n in range(element_count):
node_element_index = test.space_restriction.node_element_index(test_restriction_arg, local_node_index, n)
element_index = domain.element_index(domain_index_arg, node_element_index.domain_element_index)
coords = test.space.node_coords_in_element(
domain_arg,
_get_test_arg(),
element_index,
node_element_index.node_index_in_element,
)
if coords[0] != OUTSIDE:
node_weight = test.space.node_quadrature_weight(
domain_arg,
_get_test_arg(),
element_index,
node_element_index.node_index_in_element,
)
test_dof_index = DofIndex(node_element_index.node_index_in_element, dof)
sample = Sample(
element_index,
coords,
node_index,
node_weight,
test_dof_index,
trial_dof_index,
)
vol = domain.element_measure(domain_arg, sample)
val = integrand_func(sample, fields, values)
val_sum += accumulate_dtype(node_weight * vol * val)
result[node_index, dof] = output_dtype(val_sum)
return integrate_kernel_fn
def get_integrate_bilinear_kernel(
integrand_func: wp.Function,
domain: GeometryDomain,
quadrature: Quadrature,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
test: TestField,
trial: TrialField,
output_dtype,
accumulate_dtype,
):
NODES_PER_ELEMENT = trial.space.topology.NODES_PER_ELEMENT
def integrate_kernel_fn(
qp_arg: quadrature.Arg,
domain_arg: domain.ElementArg,
domain_index_arg: domain.ElementIndexArg,
test_arg: test.space_restriction.NodeArg,
trial_partition_arg: trial.space_partition.PartitionArg,
trial_topology_arg: trial.space_partition.space_topology.TopologyArg,
fields: FieldStruct,
values: ValueStruct,
row_offsets: wp.array(dtype=int),
triplet_rows: wp.array(dtype=int),
triplet_cols: wp.array(dtype=int),
triplet_values: wp.array3d(dtype=output_dtype),
):
test_local_node_index, trial_node, test_dof, trial_dof = wp.tid()
element_count = test.space_restriction.node_element_count(test_arg, test_local_node_index)
test_node_index = test.space_restriction.node_partition_index(test_arg, test_local_node_index)
trial_dof_index = DofIndex(trial_node, trial_dof)
for element in range(element_count):
test_element_index = test.space_restriction.node_element_index(test_arg, test_local_node_index, element)
element_index = domain.element_index(domain_index_arg, test_element_index.domain_element_index)
qp_point_count = quadrature.point_count(domain_arg, qp_arg, element_index)
test_dof_index = DofIndex(
test_element_index.node_index_in_element,
test_dof,
)
val_sum = accumulate_dtype(0.0)
for k in range(qp_point_count):
qp_index = quadrature.point_index(domain_arg, qp_arg, element_index, k)
coords = quadrature.point_coords(domain_arg, qp_arg, element_index, k)
qp_weight = quadrature.point_weight(domain_arg, qp_arg, element_index, k)
vol = domain.element_measure(domain_arg, make_free_sample(element_index, coords))
sample = Sample(
element_index,
coords,
qp_index,
qp_weight,
test_dof_index,
trial_dof_index,
)
val = integrand_func(sample, fields, values)
val_sum += accumulate_dtype(qp_weight * vol * val)
block_offset = (row_offsets[test_node_index] + element) * NODES_PER_ELEMENT + trial_node
triplet_values[block_offset, test_dof, trial_dof] = output_dtype(val_sum)
# Set row and column indices
if test_dof == 0 and trial_dof == 0:
trial_node_index = trial.space_partition.partition_node_index(
trial_partition_arg,
trial.space.topology.element_node_index(domain_arg, trial_topology_arg, element_index, trial_node),
)
triplet_rows[block_offset] = test_node_index
triplet_cols[block_offset] = trial_node_index
return integrate_kernel_fn
def get_integrate_bilinear_nodal_kernel(
integrand_func: wp.Function,
domain: GeometryDomain,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
test: TestField,
output_dtype,
accumulate_dtype,
):
def integrate_kernel_fn(
domain_arg: domain.ElementArg,
domain_index_arg: domain.ElementIndexArg,
test_restriction_arg: test.space_restriction.NodeArg,
fields: FieldStruct,
values: ValueStruct,
triplet_rows: wp.array(dtype=int),
triplet_cols: wp.array(dtype=int),
triplet_values: wp.array3d(dtype=output_dtype),
):
local_node_index, test_dof, trial_dof = wp.tid()
element_count = test.space_restriction.node_element_count(test_restriction_arg, local_node_index)
node_index = test.space_restriction.node_partition_index(test_restriction_arg, local_node_index)
val_sum = accumulate_dtype(0.0)
for n in range(element_count):
node_element_index = test.space_restriction.node_element_index(test_restriction_arg, local_node_index, n)
element_index = domain.element_index(domain_index_arg, node_element_index.domain_element_index)
coords = test.space.node_coords_in_element(
domain_arg,
_get_test_arg(),
element_index,
node_element_index.node_index_in_element,
)
if coords[0] != OUTSIDE:
node_weight = test.space.node_quadrature_weight(
domain_arg,
_get_test_arg(),
element_index,
node_element_index.node_index_in_element,
)
test_dof_index = DofIndex(node_element_index.node_index_in_element, test_dof)
trial_dof_index = DofIndex(node_element_index.node_index_in_element, trial_dof)
sample = Sample(
element_index,
coords,
node_index,
node_weight,
test_dof_index,
trial_dof_index,
)
vol = domain.element_measure(domain_arg, sample)
val = integrand_func(sample, fields, values)
val_sum += accumulate_dtype(node_weight * vol * val)
triplet_values[local_node_index, test_dof, trial_dof] = output_dtype(val_sum)
triplet_rows[local_node_index] = node_index
triplet_cols[local_node_index] = node_index
return integrate_kernel_fn
def _generate_integrate_kernel(
integrand: Integrand,
domain: GeometryDomain,
nodal: bool,
quadrature: Quadrature,
test: Optional[TestField],
test_name: str,
trial: Optional[TrialField],
trial_name: str,
fields: Dict[str, FieldLike],
output_dtype: type,
accumulate_dtype: type,
kernel_options: Optional[Dict[str, Any]] = None,
) -> wp.Kernel:
if kernel_options is None:
kernel_options = {}
output_dtype = wp.types.type_scalar_type(output_dtype)
# Extract field arguments from integrand
field_args, value_args, domain_name, sample_name = _get_integrand_field_arguments(
integrand, fields=fields, domain=domain
)
FieldStruct = _gen_field_struct(field_args)
ValueStruct = _gen_value_struct(value_args)
# Check if kernel exist in cache
kernel_suffix = f"_itg_{wp.types.type_typestr(output_dtype)}{wp.types.type_typestr(accumulate_dtype)}_{domain.name}_{FieldStruct.key}"
if nodal:
kernel_suffix += "_nodal"
else:
kernel_suffix += quadrature.name
if test:
kernel_suffix += f"_test_{test.space_partition.name}_{test.space.name}"
if trial:
kernel_suffix += f"_trial_{trial.space_partition.name}_{trial.space.name}"
kernel = cache.get_integrand_kernel(
integrand=integrand,
suffix=kernel_suffix,
)
if kernel is not None:
return kernel, FieldStruct, ValueStruct
# Not found in cache, transform integrand and generate kernel
_check_field_compat(integrand, fields, field_args, domain)
integrand_func = _translate_integrand(
integrand,
field_args,
)
_register_integrand_field_wrappers(integrand_func, fields)
if test is None and trial is None:
integrate_kernel_fn = get_integrate_constant_kernel(
integrand_func,
domain,
quadrature,
FieldStruct,
ValueStruct,
accumulate_dtype=accumulate_dtype,
)
elif trial is None:
if nodal:
integrate_kernel_fn = get_integrate_linear_nodal_kernel(
integrand_func,
domain,
FieldStruct,
ValueStruct,
test=test,
output_dtype=output_dtype,
accumulate_dtype=accumulate_dtype,
)
else:
integrate_kernel_fn = get_integrate_linear_kernel(
integrand_func,
domain,
quadrature,
FieldStruct,
ValueStruct,
test=test,
output_dtype=output_dtype,
accumulate_dtype=accumulate_dtype,
)
else:
if nodal:
integrate_kernel_fn = get_integrate_bilinear_nodal_kernel(
integrand_func,
domain,
FieldStruct,
ValueStruct,
test=test,
output_dtype=output_dtype,
accumulate_dtype=accumulate_dtype,
)
else:
integrate_kernel_fn = get_integrate_bilinear_kernel(
integrand_func,
domain,
quadrature,
FieldStruct,
ValueStruct,
test=test,
trial=trial,
output_dtype=output_dtype,
accumulate_dtype=accumulate_dtype,
)
kernel = cache.get_integrand_kernel(
integrand=integrand,
kernel_fn=integrate_kernel_fn,
suffix=kernel_suffix,
kernel_options=kernel_options,
code_transformers=[
PassFieldArgsToIntegrand(
arg_names=integrand.argspec.args,
field_args=field_args.keys(),
value_args=value_args.keys(),
sample_name=sample_name,
domain_name=domain_name,
test_name=test_name,
trial_name=trial_name,
)
],
)
return kernel, FieldStruct, ValueStruct
def _launch_integrate_kernel(
integrand: Integrand,
kernel: wp.Kernel,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
domain: GeometryDomain,
nodal: bool,
quadrature: Quadrature,
test: Optional[TestField],
trial: Optional[TrialField],
fields: Dict[str, FieldLike],
values: Dict[str, Any],
accumulate_dtype: type,
temporary_store: Optional[cache.TemporaryStore],
output_dtype: type,
output: Optional[Union[wp.array, BsrMatrix]],
device,
):
# Set-up launch arguments
domain_elt_arg = domain.element_arg_value(device=device)
domain_elt_index_arg = domain.element_index_arg_value(device=device)
if quadrature is not None:
qp_arg = quadrature.arg_value(device=device)
field_arg_values = FieldStruct()
for k, v in fields.items():
setattr(field_arg_values, k, v.eval_arg_value(device=device))
value_struct_values = _populate_value_struct(ValueStruct, values, integrand_name=integrand.name)
# Constant form
if test is None and trial is None:
if output is not None and output.dtype == accumulate_dtype:
if output.size < 1:
raise RuntimeError("Output array must be of size at least 1")
accumulate_array = output
else:
accumulate_temporary = cache.borrow_temporary(
shape=(1),
device=device,
dtype=accumulate_dtype,
temporary_store=temporary_store,
requires_grad=output is not None and output.requires_grad,
)
accumulate_array = accumulate_temporary.array
accumulate_array.zero_()
wp.launch(
kernel=kernel,
dim=domain.element_count(),
inputs=[
qp_arg,
domain_elt_arg,
domain_elt_index_arg,
field_arg_values,
value_struct_values,
accumulate_array,
],
device=device,
)
if output == accumulate_array:
return output
elif output is None:
return accumulate_array.numpy()[0]
else:
array_cast(in_array=accumulate_array, out_array=output)
return output
test_arg = test.space_restriction.node_arg(device=device)
# Linear form
if trial is None:
# If an output array is provided with the correct type, accumulate directly into it
# Otherwise, grab a temporary array
if output is None:
if type_length(output_dtype) == test.space.VALUE_DOF_COUNT:
output_shape = (test.space_partition.node_count(),)
elif type_length(output_dtype) == 1:
output_shape = (test.space_partition.node_count(), test.space.VALUE_DOF_COUNT)
else:
raise RuntimeError(
f"Incompatible output type {wp.types.type_repr(output_dtype)}, must be scalar or vector of length {test.space.VALUE_DOF_COUNT}"
)
output_temporary = cache.borrow_temporary(
temporary_store=temporary_store,
shape=output_shape,
dtype=output_dtype,
device=device,
)
output = output_temporary.array
else:
output_temporary = None
if output.shape[0] < test.space_partition.node_count():
raise RuntimeError(f"Output array must have at least {test.space_partition.node_count()} rows")
output_dtype = output.dtype
if type_length(output_dtype) != test.space.VALUE_DOF_COUNT:
if type_length(output_dtype) != 1:
raise RuntimeError(
f"Incompatible output type {wp.types.type_repr(output_dtype)}, must be scalar or vector of length {test.space.VALUE_DOF_COUNT}"
)
if output.ndim != 2 and output.shape[1] != test.space.VALUE_DOF_COUNT:
raise RuntimeError(
f"Incompatible output array shape, last dimension must be of size {test.space.VALUE_DOF_COUNT}"
)
# Launch the integration on the kernel on a 2d scalar view of the actual array
output.zero_()
def as_2d_array(array):
return wp.array(
data=None,
ptr=array.ptr,
capacity=array.capacity,
device=array.device,
shape=(test.space_partition.node_count(), test.space.VALUE_DOF_COUNT),
dtype=wp.types.type_scalar_type(output_dtype),
grad=None if array.grad is None else as_2d_array(array.grad),
)
output_view = output if output.ndim == 2 else as_2d_array(output)
if nodal:
wp.launch(
kernel=kernel,
dim=(test.space_restriction.node_count(), test.space.VALUE_DOF_COUNT),
inputs=[
domain_elt_arg,
domain_elt_index_arg,
test_arg,
field_arg_values,
value_struct_values,
output_view,
],
device=device,
)
else:
wp.launch(
kernel=kernel,
dim=(test.space_restriction.node_count(), test.space.VALUE_DOF_COUNT),
inputs=[
qp_arg,
domain_elt_arg,
domain_elt_index_arg,
test_arg,
field_arg_values,
value_struct_values,
output_view,
],
device=device,
)
if output_temporary is not None:
return output_temporary.detach()
return output
# Bilinear form
if test.space.VALUE_DOF_COUNT == 1 and trial.space.VALUE_DOF_COUNT == 1:
block_type = output_dtype
else:
block_type = cache.cached_mat_type(
shape=(test.space.VALUE_DOF_COUNT, trial.space.VALUE_DOF_COUNT), dtype=output_dtype
)
if nodal:
nnz = test.space_restriction.node_count()
else:
nnz = test.space_restriction.total_node_element_count() * trial.space.topology.NODES_PER_ELEMENT
triplet_rows_temp = cache.borrow_temporary(temporary_store, shape=(nnz,), dtype=int, device=device)
triplet_cols_temp = cache.borrow_temporary(temporary_store, shape=(nnz,), dtype=int, device=device)
triplet_values_temp = cache.borrow_temporary(
temporary_store,
shape=(
nnz,
test.space.VALUE_DOF_COUNT,
trial.space.VALUE_DOF_COUNT,
),
dtype=output_dtype,
device=device,
)
triplet_cols = triplet_cols_temp.array
triplet_rows = triplet_rows_temp.array
triplet_values = triplet_values_temp.array
triplet_values.zero_()
if nodal:
wp.launch(
kernel=kernel,
dim=triplet_values.shape,
inputs=[
domain_elt_arg,
domain_elt_index_arg,
test_arg,
field_arg_values,
value_struct_values,
triplet_rows,
triplet_cols,
triplet_values,
],
device=device,
)
else:
offsets = test.space_restriction.partition_element_offsets()
trial_partition_arg = trial.space_partition.partition_arg_value(device)
trial_topology_arg = trial.space_partition.space_topology.topo_arg_value(device)
wp.launch(
kernel=kernel,
dim=(
test.space_restriction.node_count(),
trial.space.topology.NODES_PER_ELEMENT,
test.space.VALUE_DOF_COUNT,
trial.space.VALUE_DOF_COUNT,
),
inputs=[
qp_arg,
domain_elt_arg,
domain_elt_index_arg,
test_arg,
trial_partition_arg,
trial_topology_arg,
field_arg_values,
value_struct_values,
offsets,
triplet_rows,
triplet_cols,
triplet_values,
],
device=device,
)
if output is not None:
if output.nrow != test.space_partition.node_count() or output.ncol != trial.space_partition.node_count():
raise RuntimeError(
f"Output matrix must have {test.space_partition.node_count()} rows and {trial.space_partition.node_count()} columns of blocks"
)
else:
output = bsr_zeros(
rows_of_blocks=test.space_partition.node_count(),
cols_of_blocks=trial.space_partition.node_count(),
block_type=block_type,
device=device,
)
bsr_set_from_triplets(output, triplet_rows, triplet_cols, triplet_values)
# Do not wait for garbage collection
triplet_values_temp.release()
triplet_rows_temp.release()
triplet_cols_temp.release()
return output
def integrate(
integrand: Integrand,
domain: Optional[GeometryDomain] = None,
quadrature: Optional[Quadrature] = None,
nodal: bool = False,
fields: Optional[Dict[str, FieldLike]] = None,
values: Optional[Dict[str, Any]] = None,
accumulate_dtype: type = wp.float64,
output_dtype: Optional[type] = None,
output: Optional[Union[BsrMatrix, wp.array]] = None,
device=None,
temporary_store: Optional[cache.TemporaryStore] = None,
kernel_options: Optional[Dict[str, Any]] = None,
):
"""
Integrates a constant, linear or bilinear form, and returns a scalar, array, or sparse matrix, respectively.
Args:
integrand: Form to be integrated, must have :func:`integrand` decorator
domain: Integration domain. If None, deduced from fields
quadrature: Quadrature formula. If None, deduced from domain and fields degree.
nodal: For linear or bilinear form only, use the test function nodes as the quadrature points. Assumes Lagrange interpolation functions are used, and no differential or DG operator is evaluated on the test or trial functions.
fields: Discrete, test, and trial fields to be passed to the integrand. Keys in the dictionary must match integrand parameter names.
values: Additional variable values to be passed to the integrand, can be of any type accepted by warp kernel launches. Keys in the dictionary must match integrand parameter names.
temporary_store: shared pool from which to allocate temporary arrays
accumulate_dtype: Scalar type to be used for accumulating integration samples
output: Sparse matrix or warp array into which to store the result of the integration
output_dtype: Scalar type for returned results in `output` is not provided. If None, defaults to `accumulate_dtype`
device: Device on which to perform the integration
kernel_options: Overloaded options to be passed to the kernel builder (e.g, ``{"enable_backward": True}``)
"""
if fields is None:
fields = {}
if values is None:
values = {}
if kernel_options is None:
kernel_options = {}
if not isinstance(integrand, Integrand):
raise ValueError("integrand must be tagged with @warp.fem.integrand decorator")
test, test_name, trial, trial_name = _get_test_and_trial_fields(fields)
if domain is None:
if quadrature is not None:
domain = quadrature.domain
elif test is not None:
domain = test.domain
if domain is None:
raise ValueError("Must provide at least one of domain, quadrature, or test field")
if test is not None and domain != test.domain:
raise NotImplementedError("Mixing integration and test domain is not supported yet")
if nodal:
if quadrature is not None:
raise ValueError("Cannot specify quadrature for nodal integration")
if test is None:
raise ValueError("Nodal integration requires specifying a test function")
if trial is not None and test.space_partition != trial.space_partition:
raise ValueError(
"Bilinear nodal integration requires test and trial to be defined on the same function space"
)
else:
if quadrature is None:
order = sum(field.degree for field in fields.values())
quadrature = RegularQuadrature(domain=domain, order=order)
elif domain != quadrature.domain:
raise ValueError("Incompatible integration and quadrature domain")
# Canonicalize types
accumulate_dtype = wp.types.type_to_warp(accumulate_dtype)
if output is not None:
if isinstance(output, BsrMatrix):
output_dtype = output.scalar_type
else:
output_dtype = output.dtype
elif output_dtype is None:
output_dtype = accumulate_dtype
else:
output_dtype = wp.types.type_to_warp(output_dtype)
kernel, FieldStruct, ValueStruct = _generate_integrate_kernel(
integrand=integrand,
domain=domain,
nodal=nodal,
quadrature=quadrature,
test=test,
test_name=test_name,
trial=trial,
trial_name=trial_name,
fields=fields,
accumulate_dtype=accumulate_dtype,
output_dtype=output_dtype,
kernel_options=kernel_options,
)
return _launch_integrate_kernel(
integrand=integrand,
kernel=kernel,
FieldStruct=FieldStruct,
ValueStruct=ValueStruct,
domain=domain,
nodal=nodal,
quadrature=quadrature,
test=test,
trial=trial,
fields=fields,
values=values,
accumulate_dtype=accumulate_dtype,
temporary_store=temporary_store,
output_dtype=output_dtype,
output=output,
device=device,
)
def get_interpolate_to_field_function(
integrand_func: wp.Function,
domain: GeometryDomain,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
dest: FieldRestriction,
):
value_type = dest.space.dtype
def interpolate_to_field_fn(
local_node_index: int,
domain_arg: domain.ElementArg,
domain_index_arg: domain.ElementIndexArg,
dest_node_arg: dest.space_restriction.NodeArg,
dest_eval_arg: dest.field.EvalArg,
fields: FieldStruct,
values: ValueStruct,
):
node_index = dest.space_restriction.node_partition_index(dest_node_arg, local_node_index)
element_count = dest.space_restriction.node_element_count(dest_node_arg, local_node_index)
test_dof_index = NULL_DOF_INDEX
trial_dof_index = NULL_DOF_INDEX
node_weight = 1.0
# Volume-weighted average across elements
# Superfluous if the interpolated function is continuous, but helpful for visualizing discontinuous spaces
val_sum = value_type(0.0)
vol_sum = float(0.0)
for n in range(element_count):
node_element_index = dest.space_restriction.node_element_index(dest_node_arg, local_node_index, n)
element_index = domain.element_index(domain_index_arg, node_element_index.domain_element_index)
coords = dest.space.node_coords_in_element(
domain_arg,
dest_eval_arg.space_arg,
element_index,
node_element_index.node_index_in_element,
)
if coords[0] != OUTSIDE:
sample = Sample(
element_index,
coords,
node_index,
node_weight,
test_dof_index,
trial_dof_index,
)
vol = domain.element_measure(domain_arg, sample)
val = integrand_func(sample, fields, values)
vol_sum += vol
val_sum += vol * val
return val_sum, vol_sum
return interpolate_to_field_fn
def get_interpolate_to_field_kernel(
interpolate_to_field_fn: wp.Function,
domain: GeometryDomain,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
dest: FieldRestriction,
):
def interpolate_to_field_kernel_fn(
domain_arg: domain.ElementArg,
domain_index_arg: domain.ElementIndexArg,
dest_node_arg: dest.space_restriction.NodeArg,
dest_eval_arg: dest.field.EvalArg,
fields: FieldStruct,
values: ValueStruct,
):
local_node_index = wp.tid()
val_sum, vol_sum = interpolate_to_field_fn(
local_node_index, domain_arg, domain_index_arg, dest_node_arg, dest_eval_arg, fields, values
)
if vol_sum > 0.0:
node_index = dest.space_restriction.node_partition_index(dest_node_arg, local_node_index)
dest.field.set_node_value(dest_eval_arg, node_index, val_sum / vol_sum)
return interpolate_to_field_kernel_fn
def get_interpolate_to_array_kernel(
integrand_func: wp.Function,
domain: GeometryDomain,
quadrature: Quadrature,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
value_type: type,
):
def interpolate_to_array_kernel_fn(
qp_arg: quadrature.Arg,
domain_arg: quadrature.domain.ElementArg,
domain_index_arg: quadrature.domain.ElementIndexArg,
fields: FieldStruct,
values: ValueStruct,
result: wp.array(dtype=value_type),
):
element_index = domain.element_index(domain_index_arg, wp.tid())
test_dof_index = NULL_DOF_INDEX
trial_dof_index = NULL_DOF_INDEX
qp_point_count = quadrature.point_count(domain_arg, qp_arg, element_index)
for k in range(qp_point_count):
qp_index = quadrature.point_index(domain_arg, qp_arg, element_index, k)
coords = quadrature.point_coords(domain_arg, qp_arg, element_index, k)
qp_weight = quadrature.point_weight(domain_arg, qp_arg, element_index, k)
sample = Sample(element_index, coords, qp_index, qp_weight, test_dof_index, trial_dof_index)
result[qp_index] = integrand_func(sample, fields, values)
return interpolate_to_array_kernel_fn
def get_interpolate_nonvalued_kernel(
integrand_func: wp.Function,
domain: GeometryDomain,
quadrature: Quadrature,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
):
def interpolate_nonvalued_kernel_fn(
qp_arg: quadrature.Arg,
domain_arg: quadrature.domain.ElementArg,
domain_index_arg: quadrature.domain.ElementIndexArg,
fields: FieldStruct,
values: ValueStruct,
):
element_index = domain.element_index(domain_index_arg, wp.tid())
test_dof_index = NULL_DOF_INDEX
trial_dof_index = NULL_DOF_INDEX
qp_point_count = quadrature.point_count(domain_arg, qp_arg, element_index)
for k in range(qp_point_count):
qp_index = quadrature.point_index(domain_arg, qp_arg, element_index, k)
coords = quadrature.point_coords(domain_arg, qp_arg, element_index, k)
qp_weight = quadrature.point_weight(domain_arg, qp_arg, element_index, k)
sample = Sample(element_index, coords, qp_index, qp_weight, test_dof_index, trial_dof_index)
integrand_func(sample, fields, values)
return interpolate_nonvalued_kernel_fn
def _generate_interpolate_kernel(
integrand: Integrand,
domain: GeometryDomain,
dest: Optional[Union[FieldLike, wp.array]],
quadrature: Optional[Quadrature],
fields: Dict[str, FieldLike],
kernel_options: Optional[Dict[str, Any]] = None,
) -> wp.Kernel:
if kernel_options is None:
kernel_options = {}
# Extract field arguments from integrand
field_args, value_args, domain_name, sample_name = _get_integrand_field_arguments(
integrand, fields=fields, domain=domain
)
# Generate field struct
integrand_func = _translate_integrand(
integrand,
field_args,
)
_register_integrand_field_wrappers(integrand_func, fields)
FieldStruct = _gen_field_struct(field_args)
ValueStruct = _gen_value_struct(value_args)
# Check if kernel exist in cache
if isinstance(dest, FieldRestriction):
kernel_suffix = (
f"_itp_{FieldStruct.key}_{dest.domain.name}_{dest.space_restriction.space_partition.name}_{dest.space.name}"
)
elif wp.types.is_array(dest):
kernel_suffix = f"_itp_{FieldStruct.key}_{quadrature.name}_{wp.types.type_repr(dest.dtype)}"
else:
kernel_suffix = f"_itp_{FieldStruct.key}_{quadrature.name}"
kernel = cache.get_integrand_kernel(
integrand=integrand,
suffix=kernel_suffix,
)
if kernel is not None:
return kernel, FieldStruct, ValueStruct
_check_field_compat(integrand, fields, field_args, domain)
# Generate interpolation kernel
if isinstance(dest, FieldRestriction):
# need to split into kernel + function for diffferentiability
interpolate_fn = get_interpolate_to_field_function(
integrand_func,
domain,
dest=dest,
FieldStruct=FieldStruct,
ValueStruct=ValueStruct,
)
interpolate_fn = cache.get_integrand_function(
integrand=integrand,
func=interpolate_fn,
suffix=kernel_suffix,
code_transformers=[
PassFieldArgsToIntegrand(
arg_names=integrand.argspec.args,
field_args=field_args.keys(),
value_args=value_args.keys(),
sample_name=sample_name,
domain_name=domain_name,
)
],
)
interpolate_kernel_fn = get_interpolate_to_field_kernel(
interpolate_fn,
domain,
dest=dest,
FieldStruct=FieldStruct,
ValueStruct=ValueStruct,
)
elif wp.types.is_array(dest):
interpolate_kernel_fn = get_interpolate_to_array_kernel(
integrand_func,
domain=domain,
quadrature=quadrature,
value_type=dest.dtype,
FieldStruct=FieldStruct,
ValueStruct=ValueStruct,
)
else:
interpolate_kernel_fn = get_interpolate_nonvalued_kernel(
integrand_func,
domain=domain,
quadrature=quadrature,
FieldStruct=FieldStruct,
ValueStruct=ValueStruct,
)
kernel = cache.get_integrand_kernel(
integrand=integrand,
kernel_fn=interpolate_kernel_fn,
suffix=kernel_suffix,
kernel_options=kernel_options,
code_transformers=[
PassFieldArgsToIntegrand(
arg_names=integrand.argspec.args,
field_args=field_args.keys(),
value_args=value_args.keys(),
sample_name=sample_name,
domain_name=domain_name,
)
],
)
return kernel, FieldStruct, ValueStruct
def _launch_interpolate_kernel(
integrand: Integrand,
kernel: wp.kernel,
FieldStruct: wp.codegen.Struct,
ValueStruct: wp.codegen.Struct,
domain: GeometryDomain,
dest: Optional[Union[FieldRestriction, wp.array]],
quadrature: Optional[Quadrature],
fields: Dict[str, FieldLike],
values: Dict[str, Any],
device,
) -> wp.Kernel:
# Set-up launch arguments
elt_arg = domain.element_arg_value(device=device)
elt_index_arg = domain.element_index_arg_value(device=device)
field_arg_values = FieldStruct()
for k, v in fields.items():
setattr(field_arg_values, k, v.eval_arg_value(device=device))
value_struct_values = _populate_value_struct(ValueStruct, values, integrand_name=integrand.name)
if isinstance(dest, FieldRestriction):
dest_node_arg = dest.space_restriction.node_arg(device=device)
dest_eval_arg = dest.field.eval_arg_value(device=device)
wp.launch(
kernel=kernel,
dim=dest.space_restriction.node_count(),
inputs=[
elt_arg,
elt_index_arg,
dest_node_arg,
dest_eval_arg,
field_arg_values,
value_struct_values,
],
device=device,
)
elif wp.types.is_array(dest):
qp_arg = quadrature.arg_value(device)
wp.launch(
kernel=kernel,
dim=domain.element_count(),
inputs=[qp_arg, elt_arg, elt_index_arg, field_arg_values, value_struct_values, dest],
device=device,
)
else:
qp_arg = quadrature.arg_value(device)
wp.launch(
kernel=kernel,
dim=domain.element_count(),
inputs=[qp_arg, elt_arg, elt_index_arg, field_arg_values, value_struct_values],
device=device,
)
def interpolate(
integrand: Integrand,
dest: Optional[Union[DiscreteField, FieldRestriction, wp.array]] = None,
quadrature: Optional[Quadrature] = None,
fields: Optional[Dict[str, FieldLike]] = None,
values: Optional[Dict[str, Any]] = None,
device=None,
kernel_options: Optional[Dict[str, Any]] = None,
):
"""
Interpolates a function at a finite set of sample points and optionally assigns the result to a discrete field or a raw warp array.
Args:
integrand: Function to be interpolated, must have :func:`integrand` decorator
dest: Where to store the interpolation result. Can be either
- a :class:`DiscreteField`, or restriction of a discrete field to a domain (from :func:`make_restriction`). In this case, interpolation will be performed at each node.
- a normal warp array. In this case, the `quadrature` argument defining the interpolation locations must be provided and the result of the `integrand` at each quadrature point will be assigned to the array.
- ``None``. In this case, the `quadrature` argument must also be provided and the `integrand` function is responsible for dealing with the interpolation result.
quadrature: Quadrature formula defining the interpolation samples if `dest` is not a discrete field or field restriction.
fields: Discrete fields to be passed to the integrand. Keys in the dictionary must match integrand parameters names.
values: Additional variable values to be passed to the integrand, can be of any type accepted by warp kernel launches. Keys in the dictionary must match integrand parameter names.
device: Device on which to perform the interpolation
kernel_options: Overloaded options to be passed to the kernel builder (e.g, ``{"enable_backward": True}``)
"""
if fields is None:
fields = {}
if values is None:
values = {}
if kernel_options is None:
kernel_options = {}
if not isinstance(integrand, Integrand):
raise ValueError("integrand must be tagged with @integrand decorator")
test, _, trial, __ = _get_test_and_trial_fields(fields)
if test is not None or trial is not None:
raise ValueError("Test or Trial fields should not be used for interpolation")
if isinstance(dest, DiscreteField):
dest = make_restriction(dest)
if isinstance(dest, FieldRestriction):
domain = dest.domain
else:
if quadrature is None:
raise ValueError("When not interpolating to a field, a quadrature formula must be provided")
domain = quadrature.domain
kernel, FieldStruct, ValueStruct = _generate_interpolate_kernel(
integrand=integrand,
domain=domain,
dest=dest,
quadrature=quadrature,
fields=fields,
kernel_options=kernel_options,
)
return _launch_interpolate_kernel(
integrand=integrand,
kernel=kernel,
FieldStruct=FieldStruct,
ValueStruct=ValueStruct,
domain=domain,
dest=dest,
quadrature=quadrature,
fields=fields,
values=values,
device=device,
)
| 59,748 | Python | 34.459347 | 233 | 0.593158 |
NVIDIA/warp/warp/fem/field/trial.py | import warp as wp
from warp.fem import cache, utils
from warp.fem.domain import GeometryDomain
from warp.fem.space import FunctionSpace, SpacePartition
from warp.fem.types import Sample, get_node_index_in_element
from .field import SpaceField
class TrialField(SpaceField):
"""Field defined over a domain that can be used as a trial function"""
def __init__(
self,
space: FunctionSpace,
space_partition: SpacePartition,
domain: GeometryDomain,
):
if domain.dimension == space.dimension - 1:
space = space.trace()
if domain.dimension != space.dimension:
raise ValueError("Incompatible space and domain dimensions")
if not space.topology.is_derived_from(space_partition.space_topology):
raise ValueError("Incompatible space and space partition topologies")
super().__init__(space, space_partition)
self.domain = domain
self.EvalArg = self.space.SpaceArg
self.ElementEvalArg = self._make_element_eval_arg()
self.eval_degree = self._make_eval_degree()
self.eval_inner = self._make_eval_inner()
self.eval_grad_inner = self._make_eval_grad_inner()
self.eval_div_inner = self._make_eval_div_inner()
self.eval_outer = self._make_eval_outer()
self.eval_grad_outer = self._make_eval_grad_outer()
self.eval_div_outer = self._make_eval_div_outer()
self.at_node = self._make_at_node()
def partition_node_count(self) -> int:
"""Returns the number of nodes in the associated space topology partition"""
return self.space_partition.node_count()
@property
def name(self) -> str:
return self.space.name + "Trial"
def eval_arg_value(self, device) -> wp.codegen.StructInstance:
return self.space.space_arg_value(device)
def _make_element_eval_arg(self):
@cache.dynamic_struct(suffix=self.name)
class ElementEvalArg:
elt_arg: self.domain.ElementArg
eval_arg: self.EvalArg
return ElementEvalArg
def _make_eval_inner(self):
@cache.dynamic_func(suffix=self.name)
def eval_trial_inner(args: self.ElementEvalArg, s: Sample):
weight = self.space.element_inner_weight(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.trial_dof),
)
return weight * self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.trial_dof)
return eval_trial_inner
def _make_eval_grad_inner(self):
if not self.gradient_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_nabla_trial_inner(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_inner_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.trial_dof),
)
grad_transform = self.space.element_inner_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_outer(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.trial_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_nabla_trial_inner
def _make_eval_div_inner(self):
if not self.divergence_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_div_trial_inner(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_inner_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.trial_dof),
)
grad_transform = self.space.element_inner_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_inner(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.trial_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_div_trial_inner
def _make_eval_outer(self):
@cache.dynamic_func(suffix=self.name)
def eval_trial_outer(args: self.ElementEvalArg, s: Sample):
weight = self.space.element_outer_weight(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.trial_dof),
)
return weight * self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.trial_dof)
return eval_trial_outer
def _make_eval_grad_outer(self):
if not self.gradient_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_nabla_trial_outer(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_outer_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.trial_dof),
)
grad_transform = self.space.element_outer_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_outer(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.trial_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_nabla_trial_outer
def _make_eval_div_outer(self):
if not self.divergence_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_div_trial_outer(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_outer_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.trial_dof),
)
grad_transform = self.space.element_outer_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_inner(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.trial_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_div_trial_outer
def _make_at_node(self):
@cache.dynamic_func(suffix=self.name)
def at_node(args: self.ElementEvalArg, s: Sample):
node_coords = self.space.node_coords_in_element(
args.elt_arg, args.eval_arg, s.element_index, get_node_index_in_element(s.trial_dof)
)
return Sample(s.element_index, node_coords, s.qp_index, s.qp_weight, s.test_dof, s.trial_dof)
return at_node
| 6,918 | Python | 36.603261 | 105 | 0.593235 |
NVIDIA/warp/warp/fem/field/restriction.py | from warp.fem.space import SpaceRestriction
from .field import DiscreteField
class FieldRestriction:
"""Restriction of a discrete field to a given GeometryDomain"""
def __init__(self, space_restriction: SpaceRestriction, field: DiscreteField):
if field.space.dimension - 1 == space_restriction.space_topology.dimension:
field = field.trace()
if field.space.dimension != space_restriction.space_topology.dimension:
raise ValueError("Incompatible space and field dimensions")
if field.space.topology != space_restriction.space_topology:
raise ValueError("Incompatible field and space restriction topologies")
self.space_restriction = space_restriction
self.domain = self.space_restriction.domain
self.field = field
self.space = self.field.space
| 850 | Python | 35.999998 | 83 | 0.705882 |
NVIDIA/warp/warp/fem/field/field.py | from typing import Any
import warp as wp
from warp.fem.geometry import DeformedGeometry, Geometry
from warp.fem.space import FunctionSpace, SpacePartition
from warp.fem.types import Sample
class FieldLike:
"""Base class for integrable fields"""
EvalArg: wp.codegen.Struct
"""Structure containing field-level arguments passed to device functions for field evaluation"""
ElementEvalArg: wp.codegen.Struct
"""Structure combining geometry-level and field-level arguments passed to device functions for field evaluation"""
def eval_arg_value(self, device) -> "EvalArg": # noqa: F821
"""Value of the field-level arguments to be passed to device functions"""
raise NotImplementedError
@property
def degree(self) -> int:
"""Polynomial degree of the field, used to estimate necessary quadrature order"""
raise NotImplementedError
@property
def name(self) -> str:
raise NotImplementedError
@property
def __str__(self) -> str:
return self.name
def eval_arg_value(self, device):
"""Value of arguments to be passed to device functions"""
raise NotImplementedError
@staticmethod
def eval_inner(args: "ElementEvalArg", s: "Sample"): # noqa: F821
"""Device function evaluating the inner field value at a sample point"""
raise NotImplementedError
@staticmethod
def eval_grad_inner(args: "ElementEvalArg", s: "Sample"): # noqa: F821
"""Device function evaluating the inner field gradient at a sample point"""
raise NotImplementedError
@staticmethod
def eval_div_inner(args: "ElementEvalArg", s: "Sample"): # noqa: F821
"""Device function evaluating the inner field divergence at a sample point"""
raise NotImplementedError
@staticmethod
def eval_outer(args: "ElementEvalArg", s: "Sample"): # noqa: F821
"""Device function evaluating the outer field value at a sample point"""
raise NotImplementedError
@staticmethod
def eval_grad_outer(args: "ElementEvalArg", s: "Sample"): # noqa: F821
"""Device function evaluating the outer field gradient at a sample point"""
raise NotImplementedError
@staticmethod
def eval_div_outer(args: "ElementEvalArg", s: "Sample"): # noqa: F821
"""Device function evaluating the outer field divergence at a sample point"""
raise NotImplementedError
class SpaceField(FieldLike):
"""Base class for fields defined over a function space"""
def __init__(self, space: FunctionSpace, space_partition: SpacePartition):
self._space = space
self._space_partition = space_partition
@property
def space(self) -> FunctionSpace:
return self._space
@property
def space_partition(self) -> SpacePartition:
return self._space_partition
@property
def degree(self) -> int:
return self.space.degree
@property
def dtype(self) -> type:
return self.space.dtype
@property
def dof_dtype(self) -> type:
return self.space.dof_dtype
def gradient_valid(self) -> bool:
"""Whether gradient operator can be computed. Only for scalar and vector fields as higher-order tensors are not support yet"""
return not wp.types.type_is_matrix(self.dtype)
def divergence_valid(self) -> bool:
"""Whether divergence of this field can be computed. Only for vector and tensor fields with same dimension as embedding geometry"""
if wp.types.type_is_vector(self.dtype):
return wp.types.type_length(self.dtype) == self.space.geometry.dimension
if wp.types.type_is_matrix(self.dtype):
return self.dtype._shape_[0] == self.space.geometry.dimension
return False
def _make_eval_degree(self):
ORDER = self.space.ORDER
from warp.fem import cache
@cache.dynamic_func(suffix=self.name)
def degree(args: self.ElementEvalArg):
return ORDER
return degree
class DiscreteField(SpaceField):
"""Explicitly-valued field defined over a partition of a discrete function space"""
@property
def dof_values(self) -> wp.array:
"""Array of degrees of freedom values"""
raise NotImplementedError
@dof_values.setter
def dof_values(self, values: wp.array):
"""Sets degrees of freedom values from an array"""
raise NotImplementedError
def trace(self) -> "DiscreteField":
"""Trace of this field over a lower-dimensional function space"""
raise NotImplementedError
@staticmethod
def set_node_value(args: "FieldLike.EvalArg", node_index: int, value: Any):
"""Device function setting the value at given node"""
raise NotImplementedError
@property
def name(self) -> str:
return f"{self.__class__.__qualname__}_{self.space.name}_{self.space_partition.name}"
def make_deformed_geometry(self) -> Geometry:
"""Returns a deformed version of the underlying geometry using this field's values as displacement"""
return DeformedGeometry(self)
| 5,138 | Python | 33.489933 | 139 | 0.672635 |
NVIDIA/warp/warp/fem/field/__init__.py | from typing import Optional, Union
from warp.fem.domain import Cells, GeometryDomain
from warp.fem.space import FunctionSpace, SpacePartition, SpaceRestriction, make_space_partition, make_space_restriction
from .field import DiscreteField, FieldLike, SpaceField
from .nodal_field import NodalField
from .restriction import FieldRestriction
from .test import TestField
from .trial import TrialField
def make_restriction(
field: DiscreteField,
space_restriction: Optional[SpaceRestriction] = None,
domain: Optional[GeometryDomain] = None,
device=None,
) -> FieldRestriction:
"""
Restricts a discrete field to a subset of elements.
Args:
field: the discrete field to restrict
space_restriction: the function space restriction defining the subset of elements to consider
domain: if ``space_restriction`` is not provided, the :py:class:`Domain` defining the subset of elements to consider
device: Warp device on which to perform and store computations
Returns:
the field restriction
"""
if space_restriction is None:
space_restriction = make_space_restriction(space_partition=field.space_partition, domain=domain, device=device)
return FieldRestriction(field=field, space_restriction=space_restriction)
def make_test(
space: FunctionSpace,
space_restriction: Optional[SpaceRestriction] = None,
space_partition: Optional[SpacePartition] = None,
domain: Optional[GeometryDomain] = None,
device=None,
) -> TestField:
"""
Constructs a test field over a function space or its restriction
Args:
space: the function space
space_restriction: restriction of the space topology to a domain
space_partition: if `space_restriction` is ``None``, the optional subset of node indices to consider
domain: if `space_restriction` is ``None``, optional subset of elements to consider
device: Warp device on which to perform and store computations
Returns:
the test field
"""
if space_restriction is None:
space_restriction = make_space_restriction(
space_topology=space.topology, space_partition=space_partition, domain=domain, device=device
)
return TestField(space_restriction=space_restriction, space=space)
def make_trial(
space: FunctionSpace,
space_restriction: Optional[SpaceRestriction] = None,
space_partition: Optional[SpacePartition] = None,
domain: Optional[GeometryDomain] = None,
) -> TrialField:
"""
Constructs a trial field over a function space or partition
Args:
space: the function space or function space restriction
space_restriction: restriction of the space topology to a domain
space_partition: if `space_restriction` is ``None``, the optional subset of node indices to consider
domain: if `space_restriction` is ``None``, optional subset of elements to consider
device: Warp device on which to perform and store computations
Returns:
the trial field
"""
if space_restriction is not None:
domain = space.domain
space_partition = space.space_partition
if space_partition is None:
if domain is None:
domain = Cells(geometry=space.geometry)
space_partition = make_space_partition(
space_topology=space.topology, geometry_partition=domain.geometry_partition
)
elif domain is None:
domain = Cells(geometry=space_partition.geo_partition)
return TrialField(space, space_partition, domain)
| 3,591 | Python | 34.564356 | 124 | 0.711501 |
NVIDIA/warp/warp/fem/field/test.py | import warp as wp
from warp.fem import cache, utils
from warp.fem.space import FunctionSpace, SpaceRestriction
from warp.fem.types import Sample, get_node_index_in_element
from .field import SpaceField
class TestField(SpaceField):
"""Field defined over a space restriction that can be used as a test function.
In order to reuse computations, it is possible to define the test field using a SpaceRestriction
defined for a different value type than the test function value type, as long as the node topology is similar.
"""
def __init__(self, space_restriction: SpaceRestriction, space: FunctionSpace):
if space_restriction.domain.dimension == space.dimension - 1:
space = space.trace()
if space_restriction.domain.dimension != space.dimension:
raise ValueError("Incompatible space and domain dimensions")
if space.topology != space_restriction.space_topology:
raise ValueError("Incompatible space and space partition topologies")
super().__init__(space, space_restriction.space_partition)
self.space_restriction = space_restriction
self.domain = self.space_restriction.domain
self.EvalArg = self.space.SpaceArg
self.ElementEvalArg = self._make_element_eval_arg()
self.eval_degree = self._make_eval_degree()
self.eval_inner = self._make_eval_inner()
self.eval_grad_inner = self._make_eval_grad_inner()
self.eval_div_inner = self._make_eval_div_inner()
self.eval_outer = self._make_eval_outer()
self.eval_grad_outer = self._make_eval_grad_outer()
self.eval_div_outer = self._make_eval_div_outer()
self.at_node = self._make_at_node()
@property
def name(self) -> str:
return self.space.name + "Test"
def eval_arg_value(self, device) -> wp.codegen.StructInstance:
return self.space.space_arg_value(device)
def _make_element_eval_arg(self):
from warp.fem import cache
@cache.dynamic_struct(suffix=self.name)
class ElementEvalArg:
elt_arg: self.domain.ElementArg
eval_arg: self.EvalArg
return ElementEvalArg
def _make_eval_inner(self):
@cache.dynamic_func(suffix=self.name)
def eval_test_inner(args: self.ElementEvalArg, s: Sample):
weight = self.space.element_inner_weight(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.test_dof),
)
return weight * self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.test_dof)
return eval_test_inner
def _make_eval_grad_inner(self):
if not self.gradient_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_nabla_test_inner(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_inner_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.test_dof),
)
grad_transform = self.space.element_inner_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_outer(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.test_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_nabla_test_inner
def _make_eval_div_inner(self):
if not self.divergence_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_div_test_inner(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_inner_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.test_dof),
)
grad_transform = self.space.element_inner_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_inner(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.test_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_div_test_inner
def _make_eval_outer(self):
@cache.dynamic_func(suffix=self.name)
def eval_test_outer(args: self.ElementEvalArg, s: Sample):
weight = self.space.element_outer_weight(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.test_dof),
)
return weight * self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.test_dof)
return eval_test_outer
def _make_eval_grad_outer(self):
if not self.gradient_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_nabla_test_outer(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_outer_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.test_dof),
)
grad_transform = self.space.element_outer_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_outer(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.test_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_nabla_test_outer
def _make_eval_div_outer(self):
if not self.divergence_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_div_test_outer(args: self.ElementEvalArg, s: Sample):
nabla_weight = self.space.element_outer_weight_gradient(
args.elt_arg,
args.eval_arg,
s.element_index,
s.element_coords,
get_node_index_in_element(s.test_dof),
)
grad_transform = self.space.element_outer_reference_gradient_transform(args.elt_arg, s)
return utils.generalized_inner(
self.space.unit_dof_value(args.elt_arg, args.eval_arg, s.test_dof),
utils.apply_right(nabla_weight, grad_transform),
)
return eval_div_test_outer
def _make_at_node(self):
@cache.dynamic_func(suffix=self.name)
def at_node(args: self.ElementEvalArg, s: Sample):
node_coords = self.space.node_coords_in_element(
args.elt_arg, args.eval_arg, s.element_index, get_node_index_in_element(s.test_dof)
)
return Sample(s.element_index, node_coords, s.qp_index, s.qp_weight, s.test_dof, s.trial_dof)
return at_node
| 6,994 | Python | 37.646409 | 114 | 0.5998 |
NVIDIA/warp/warp/fem/field/nodal_field.py | import warp as wp
from warp.fem import cache, utils
from warp.fem.space import CollocatedFunctionSpace, SpacePartition
from warp.fem.types import NULL_NODE_INDEX, ElementIndex, Sample
from .field import DiscreteField
class NodalFieldBase(DiscreteField):
"""Base class for nodal field and nodal field traces. Does not hold values"""
def __init__(self, space: CollocatedFunctionSpace, space_partition: SpacePartition):
super().__init__(space, space_partition)
self.EvalArg = self._make_eval_arg()
self.ElementEvalArg = self._make_element_eval_arg()
self.eval_degree = DiscreteField._make_eval_degree(self)
self._read_node_value = self._make_read_node_value()
self.eval_inner = self._make_eval_inner()
self.eval_outer = self._make_eval_outer()
self.eval_grad_inner = self._make_eval_grad_inner(world_space=True)
self.eval_grad_outer = self._make_eval_grad_outer(world_space=True)
self.eval_reference_grad_inner = self._make_eval_grad_inner(world_space=False)
self.eval_reference_grad_outer = self._make_eval_grad_outer(world_space=False)
self.eval_div_inner = self._make_eval_div_inner()
self.eval_div_outer = self._make_eval_div_outer()
self.set_node_value = self._make_set_node_value()
def _make_eval_arg(self):
@cache.dynamic_struct(suffix=self.name)
class EvalArg:
dof_values: wp.array(dtype=self.space.dof_dtype)
space_arg: self.space.SpaceArg
topology_arg: self.space.topology.TopologyArg
partition_arg: self.space_partition.PartitionArg
return EvalArg
def _make_element_eval_arg(self):
@cache.dynamic_struct(suffix=self.name)
class ElementEvalArg:
elt_arg: self.space.topology.ElementArg
eval_arg: self.EvalArg
return ElementEvalArg
def _make_read_node_value(self):
@cache.dynamic_func(suffix=self.name)
def read_node_value(args: self.ElementEvalArg, geo_element_index: ElementIndex, node_index_in_elt: int):
nidx = self.space.topology.element_node_index(
args.elt_arg, args.eval_arg.topology_arg, geo_element_index, node_index_in_elt
)
pidx = self.space_partition.partition_node_index(args.eval_arg.partition_arg, nidx)
if pidx == NULL_NODE_INDEX:
return self.space.dtype(0.0)
return self.space.dof_mapper.dof_to_value(args.eval_arg.dof_values[pidx])
return read_node_value
def _make_eval_inner(self):
NODES_PER_ELEMENT = self.space.topology.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def eval_inner(args: self.ElementEvalArg, s: Sample):
res = self.space.element_inner_weight(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, 0
) * self._read_node_value(args, s.element_index, 0)
for k in range(1, NODES_PER_ELEMENT):
res += self.space.element_inner_weight(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, k
) * self._read_node_value(args, s.element_index, k)
return res
return eval_inner
def _make_eval_grad_inner(self, world_space: bool):
NODES_PER_ELEMENT = self.space.topology.NODES_PER_ELEMENT
if not self.gradient_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_grad_inner_ref_space(args: self.ElementEvalArg, s: Sample):
res = utils.generalized_outer(
self._read_node_value(args, s.element_index, 0),
self.space.element_inner_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, 0
),
)
for k in range(1, NODES_PER_ELEMENT):
res += utils.generalized_outer(
self._read_node_value(args, s.element_index, k),
self.space.element_inner_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, k
),
)
return res
@cache.dynamic_func(suffix=self.name)
def eval_grad_inner_world_space(args: self.ElementEvalArg, s: Sample):
grad_transform = self.space.element_inner_reference_gradient_transform(args.elt_arg, s)
res = eval_grad_inner_ref_space(args, s)
return utils.apply_right(res, grad_transform)
return eval_grad_inner_world_space if world_space else eval_grad_inner_ref_space
def _make_eval_div_inner(self):
NODES_PER_ELEMENT = self.space.topology.NODES_PER_ELEMENT
if not self.divergence_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_div_inner(args: self.ElementEvalArg, s: Sample):
grad_transform = self.space.element_inner_reference_gradient_transform(args.elt_arg, s)
res = utils.generalized_inner(
self._read_node_value(args, s.element_index, 0),
utils.apply_right(
self.space.element_inner_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, 0
),
grad_transform,
),
)
for k in range(1, NODES_PER_ELEMENT):
res += utils.generalized_inner(
self._read_node_value(args, s.element_index, k),
utils.apply_right(
self.space.element_inner_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, k
),
grad_transform,
),
)
return res
return eval_div_inner
def _make_eval_outer(self):
NODES_PER_ELEMENT = self.space.topology.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def eval_outer(args: self.ElementEvalArg, s: Sample):
res = self.space.element_outer_weight(
args.elt_arg,
args.eval_arg.space_arg,
s.element_index,
s.element_coords,
0,
) * self._read_node_value(args, s.element_index, 0)
for k in range(1, NODES_PER_ELEMENT):
res += self.space.element_outer_weight(
args.elt_arg,
args.eval_arg.space_arg,
s.element_index,
s.element_coords,
k,
) * self._read_node_value(args, s.element_index, k)
return res
return eval_outer
def _make_eval_grad_outer(self, world_space: bool):
NODES_PER_ELEMENT = self.space.topology.NODES_PER_ELEMENT
if not self.gradient_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_grad_outer_ref_space(args: self.ElementEvalArg, s: Sample):
res = utils.generalized_outer(
self._read_node_value(args, s.element_index, 0),
self.space.element_outer_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, 0
),
)
for k in range(1, NODES_PER_ELEMENT):
res += utils.generalized_outer(
self._read_node_value(args, s.element_index, k),
self.space.element_outer_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, k
),
)
return res
@cache.dynamic_func(suffix=self.name)
def eval_grad_outer_world_space(args: self.ElementEvalArg, s: Sample):
grad_transform = self.space.element_outer_reference_gradient_transform(args.elt_arg, s)
res = eval_grad_outer_ref_space(args, s)
return utils.apply_right(res, grad_transform)
return eval_grad_outer_world_space if world_space else eval_grad_outer_ref_space
def _make_eval_div_outer(self):
NODES_PER_ELEMENT = self.space.topology.NODES_PER_ELEMENT
if not self.divergence_valid():
return None
@cache.dynamic_func(suffix=self.name)
def eval_div_outer(args: self.ElementEvalArg, s: Sample):
grad_transform = self.space.element_outer_reference_gradient_transform(args.elt_arg, s)
res = utils.generalized_inner(
self._read_node_value(args, s.element_index, 0),
utils.apply_right(
self.space.element_outer_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, 0
),
grad_transform,
),
)
for k in range(1, NODES_PER_ELEMENT):
res += utils.generalized_inner(
self._read_node_value(args, s.element_index, k),
utils.apply_right(
self.space.element_outer_weight_gradient(
args.elt_arg, args.eval_arg.space_arg, s.element_index, s.element_coords, k
),
grad_transform,
),
)
return res
return eval_div_outer
def _make_set_node_value(self):
@cache.dynamic_func(suffix=self.name)
def set_node_value(args: self.EvalArg, partition_node_index: int, value: self.space.dtype):
args.dof_values[partition_node_index] = self.space.dof_mapper.value_to_dof(value)
return set_node_value
class NodalField(NodalFieldBase):
"""A field holding values for all degrees of freedom at each node of the underlying function space partition
See also: warp.fem.space.CollocatedFunctionSpace.make_field
"""
def __init__(self, space: CollocatedFunctionSpace, space_partition: SpacePartition):
if space.topology != space_partition.space_topology:
raise ValueError("Incompatible space and space partition topologies")
super().__init__(space, space_partition)
self._dof_values = wp.zeros(n=self.space_partition.node_count(), dtype=self.dof_dtype)
def eval_arg_value(self, device):
arg = self.EvalArg()
arg.dof_values = self._dof_values.to(device)
arg.space_arg = self.space.space_arg_value(device)
arg.partition_arg = self.space_partition.partition_arg_value(device)
arg.topology_arg = self.space.topology.topo_arg_value(device)
return arg
@property
def dof_values(self) -> wp.array:
"""Returns a warp array containing the values at all degrees of freedom of the underlying space partition"""
return self._dof_values
@dof_values.setter
def dof_values(self, values):
"""Sets the degrees-of-freedom values
Args:
values: Array that is convertible to a warp array of length ``self.space_partition.node_count()`` and data type ``self.space.dof_dtype``
"""
if isinstance(values, wp.array):
self._dof_values = values
else:
self._dof_values = wp.array(values, dtype=self.dof_dtype)
class Trace(NodalFieldBase):
def __init__(self, field):
self._field = field
super().__init__(field.space.trace(), field.space_partition)
def eval_arg_value(self, device):
arg = self.EvalArg()
arg.dof_values = self._field.dof_values.to(device)
arg.space_arg = self.space.space_arg_value(device)
arg.partition_arg = self.space_partition.partition_arg_value(device)
arg.topology_arg = self.space.topology.topo_arg_value(device)
return arg
def trace(self) -> Trace:
trace_field = NodalField.Trace(self)
return trace_field
| 12,223 | Python | 39.882943 | 148 | 0.587744 |
NVIDIA/warp/warp/fem/space/grid_3d_function_space.py | import numpy as np
import warp as wp
from warp.fem import cache
from warp.fem.geometry import Grid3D
from warp.fem.polynomial import is_closed
from warp.fem.types import ElementIndex
from .shape import (
CubeSerendipityShapeFunctions,
CubeTripolynomialShapeFunctions,
ShapeFunction,
)
from .topology import SpaceTopology, forward_base_topology
class Grid3DSpaceTopology(SpaceTopology):
def __init__(self, grid: Grid3D, shape: ShapeFunction):
if not is_closed(shape.family):
raise ValueError("A closed polynomial family is required to define a continuous function space")
super().__init__(grid, shape.NODES_PER_ELEMENT)
self._shape = shape
self._grid = grid
@wp.func
def _vertex_coords(vidx_in_cell: int):
x = vidx_in_cell // 4
y = (vidx_in_cell - 4 * x) // 2
z = vidx_in_cell - 4 * x - 2 * y
return wp.vec3i(x, y, z)
@wp.func
def _vertex_index(cell_arg: Grid3D.CellArg, cell_index: ElementIndex, vidx_in_cell: int):
res = cell_arg.res
strides = wp.vec2i((res[1] + 1) * (res[2] + 1), res[2] + 1)
corner = Grid3D.get_cell(res, cell_index) + Grid3DSpaceTopology._vertex_coords(vidx_in_cell)
return Grid3D._from_3d_index(strides, corner)
class GridTripolynomialSpaceTopology(Grid3DSpaceTopology):
def __init__(self, grid: Grid3D, shape: CubeTripolynomialShapeFunctions):
super().__init__(grid, shape)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
return (
(self.geometry.res[0] * self._shape.ORDER + 1)
* (self.geometry.res[1] * self._shape.ORDER + 1)
* (self.geometry.res[2] * self._shape.ORDER + 1)
)
def _make_element_node_index(self):
ORDER = self._shape.ORDER
@cache.dynamic_func(suffix=self.name)
def element_node_index(
cell_arg: Grid3D.CellArg,
topo_arg: Grid3DSpaceTopology.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
res = cell_arg.res
cell = Grid3D.get_cell(res, element_index)
node_i, node_j, node_k = self._shape._node_ijk(node_index_in_elt)
node_x = ORDER * cell[0] + node_i
node_y = ORDER * cell[1] + node_j
node_z = ORDER * cell[2] + node_k
node_pitch_y = (res[2] * ORDER) + 1
node_pitch_x = node_pitch_y * ((res[1] * ORDER) + 1)
node_index = node_pitch_x * node_x + node_pitch_y * node_y + node_z
return node_index
return element_node_index
def _node_grid(self):
res = self.geometry.res
cell_coords = np.array(self._shape.LOBATTO_COORDS)[:-1]
grid_coords_x = np.repeat(np.arange(0, res[0], dtype=float), len(cell_coords)) + np.tile(
cell_coords, reps=res[0]
)
grid_coords_x = np.append(grid_coords_x, res[0])
X = grid_coords_x * self._grid.cell_size[0] + self._grid.origin[0]
grid_coords_y = np.repeat(np.arange(0, res[1], dtype=float), len(cell_coords)) + np.tile(
cell_coords, reps=res[1]
)
grid_coords_y = np.append(grid_coords_y, res[1])
Y = grid_coords_y * self._grid.cell_size[1] + self._grid.origin[1]
grid_coords_z = np.repeat(np.arange(0, res[2], dtype=float), len(cell_coords)) + np.tile(
cell_coords, reps=res[2]
)
grid_coords_z = np.append(grid_coords_z, res[2])
Z = grid_coords_z * self._grid.cell_size[2] + self._grid.origin[2]
return np.meshgrid(X, Y, Z, indexing="ij")
class Grid3DSerendipitySpaceTopology(Grid3DSpaceTopology):
def __init__(self, grid: Grid3D, shape: CubeSerendipityShapeFunctions):
super().__init__(grid, shape)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
return self.geometry.vertex_count() + (self._shape.ORDER - 1) * self.geometry.edge_count()
def _make_element_node_index(self):
ORDER = self._shape.ORDER
@cache.dynamic_func(suffix=self.name)
def element_node_index(
cell_arg: Grid3D.CellArg,
topo_arg: Grid3DSpaceTopology.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
res = cell_arg.res
cell = Grid3D.get_cell(res, element_index)
node_type, type_index = self._shape.node_type_and_type_index(node_index_in_elt)
if node_type == CubeSerendipityShapeFunctions.VERTEX:
return Grid3DSpaceTopology._vertex_index(cell_arg, element_index, type_index)
axis = CubeSerendipityShapeFunctions._edge_axis(node_type)
node_all = CubeSerendipityShapeFunctions._edge_coords(type_index)
res = cell_arg.res
edge_index = 0
if axis > 0:
edge_index += (res[1] + 1) * (res[2] + 1) * res[0]
if axis > 1:
edge_index += (res[0] + 1) * (res[2] + 1) * res[1]
res_loc = Grid3D._world_to_local(axis, res)
cell_loc = Grid3D._world_to_local(axis, cell)
edge_index += (res_loc[1] + 1) * (res_loc[2] + 1) * cell_loc[0]
edge_index += (res_loc[2] + 1) * (cell_loc[1] + node_all[1])
edge_index += cell_loc[2] + node_all[2]
vertex_count = (res[0] + 1) * (res[1] + 1) * (res[2] + 1)
return vertex_count + (ORDER - 1) * edge_index + (node_all[0] - 1)
return element_node_index
def make_grid_3d_space_topology(grid: Grid3D, shape: ShapeFunction):
if isinstance(shape, CubeSerendipityShapeFunctions):
return forward_base_topology(Grid3DSerendipitySpaceTopology, grid, shape)
if isinstance(shape, CubeTripolynomialShapeFunctions):
return forward_base_topology(GridTripolynomialSpaceTopology, grid, shape)
raise ValueError(f"Unsupported shape function {shape.name}")
| 6,042 | Python | 34.970238 | 108 | 0.595664 |
NVIDIA/warp/warp/fem/space/nanogrid_function_space.py | import warp as wp
from warp.fem import cache
from warp.fem.geometry import Nanogrid
from warp.fem.geometry.nanogrid import _add_axis_flag
from warp.fem.polynomial import is_closed
from warp.fem.types import ElementIndex
from .shape import (
CubeSerendipityShapeFunctions,
CubeTripolynomialShapeFunctions,
ShapeFunction,
)
from .topology import SpaceTopology, forward_base_topology
@wp.struct
class NanogridTopologyArg:
vertex_grid: wp.uint64
face_grid: wp.uint64
edge_grid: wp.uint64
vertex_count: int
edge_count: int
face_count: int
class NanogridSpaceTopology(SpaceTopology):
TopologyArg = NanogridTopologyArg
def __init__(
self,
grid: Nanogrid,
shape: ShapeFunction,
need_edge_indices: bool = True,
need_face_indices: bool = True,
):
if not is_closed(shape.family):
raise ValueError("A closed polynomial family is required to define a continuous function space")
super().__init__(grid, shape.NODES_PER_ELEMENT)
self._grid = grid
self._shape = shape
if need_edge_indices:
self._edge_count = self._grid.edge_count()
else:
self._edge_count = 0
self._vertex_grid = grid._node_grid
self._face_grid = grid._face_grid
self._edge_grid = grid._edge_grid
@cache.cached_arg_value
def topo_arg_value(self, device):
arg = NanogridTopologyArg()
arg.vertex_grid = self._vertex_grid.id
arg.face_grid = self._face_grid.id
arg.edge_grid = -1 if self._edge_grid is None else self._edge_grid.id
arg.vertex_count = self._grid.vertex_count()
arg.face_count = self._grid.side_count()
arg.edge_count = self._edge_count
return arg
@wp.func
def _cell_vertex_coord(cell_ijk: wp.vec3i, n: int):
return cell_ijk + wp.vec3i((n & 4) >> 2, (n & 2) >> 1, n & 1)
@wp.func
def _cell_edge_coord(cell_ijk: wp.vec3i, axis: int, offset: int):
e_ijk = cell_ijk
e_ijk[(axis + 1) % 3] += offset >> 1
e_ijk[(axis + 2) % 3] += offset & 1
return _add_axis_flag(e_ijk, axis)
@wp.func
def _cell_face_coord(cell_ijk: wp.vec3i, axis: int, offset: int):
f_ijk = cell_ijk
f_ijk[axis] += offset
return _add_axis_flag(f_ijk, axis)
class NanogridTripolynomialSpaceTopology(NanogridSpaceTopology):
def __init__(self, grid: Nanogrid, shape: CubeTripolynomialShapeFunctions):
super().__init__(grid, shape, need_edge_indices=shape.ORDER >= 2, need_face_indices=shape.ORDER >= 2)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
ORDER = self._shape.ORDER
INTERIOR_NODES_PER_EDGE = max(0, ORDER - 1)
INTERIOR_NODES_PER_FACE = INTERIOR_NODES_PER_EDGE**2
INTERIOR_NODES_PER_CELL = INTERIOR_NODES_PER_EDGE**3
return (
self._grid.vertex_count()
+ self._grid.edge_count() * INTERIOR_NODES_PER_EDGE
+ self._grid.side_count() * INTERIOR_NODES_PER_FACE
+ self._grid.cell_count() * INTERIOR_NODES_PER_CELL
)
def _make_element_node_index(self):
ORDER = self._shape.ORDER
INTERIOR_NODES_PER_EDGE = wp.constant(max(0, ORDER - 1))
INTERIOR_NODES_PER_FACE = wp.constant(INTERIOR_NODES_PER_EDGE**2)
INTERIOR_NODES_PER_CELL = wp.constant(INTERIOR_NODES_PER_EDGE**3)
@cache.dynamic_func(suffix=self.name)
def element_node_index(
geo_arg: Nanogrid.CellArg,
topo_arg: NanogridTopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_instance, type_index = self._shape.node_type_and_type_index(node_index_in_elt)
ijk = geo_arg.cell_ijk[element_index]
if node_type == CubeTripolynomialShapeFunctions.VERTEX:
n_ijk = _cell_vertex_coord(ijk, type_instance)
return wp.volume_lookup_index(topo_arg.vertex_grid, n_ijk[0], n_ijk[1], n_ijk[2])
offset = topo_arg.vertex_count
if node_type == CubeTripolynomialShapeFunctions.EDGE:
axis = type_instance >> 2
node_offset = type_instance & 3
n_ijk = _cell_edge_coord(ijk, axis, node_offset)
edge_index = wp.volume_lookup_index(topo_arg.edge_grid, n_ijk[0], n_ijk[1], n_ijk[2])
return offset + INTERIOR_NODES_PER_EDGE * edge_index + type_index
offset += INTERIOR_NODES_PER_EDGE * topo_arg.edge_count
if node_type == CubeTripolynomialShapeFunctions.FACE:
axis = type_instance >> 1
node_offset = type_instance & 1
n_ijk = _cell_face_coord(ijk, axis, node_offset)
face_index = wp.volume_lookup_index(topo_arg.face_grid, n_ijk[0], n_ijk[1], n_ijk[2])
return offset + INTERIOR_NODES_PER_FACE * face_index + type_index
offset += INTERIOR_NODES_PER_FACE * topo_arg.face_count
return offset + INTERIOR_NODES_PER_CELL * element_index + type_index
return element_node_index
class NanogridSerendipitySpaceTopology(NanogridSpaceTopology):
def __init__(self, grid: Nanogrid, shape: CubeSerendipityShapeFunctions):
super().__init__(grid, shape, need_edge_indices=True, need_face_indices=False)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
return self.geometry.vertex_count() + (self._shape.ORDER - 1) * self.geometry.edge_count()
def _make_element_node_index(self):
ORDER = self._shape.ORDER
@cache.dynamic_func(suffix=self.name)
def element_node_index(
cell_arg: Nanogrid.CellArg,
topo_arg: NanogridSpaceTopology.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_index = self._shape.node_type_and_type_index(node_index_in_elt)
ijk = cell_arg.cell_ijk[element_index]
if node_type == CubeSerendipityShapeFunctions.VERTEX:
n_ijk = _cell_vertex_coord(ijk, type_index)
return wp.volume_lookup_index(topo_arg.vertex_grid, n_ijk[0], n_ijk[1], n_ijk[2])
type_instance, index_in_edge = CubeSerendipityShapeFunctions._cube_edge_index(node_type, type_index)
axis = type_instance >> 2
node_offset = type_instance & 3
n_ijk = _cell_edge_coord(ijk, axis, node_offset)
edge_index = wp.volume_lookup_index(topo_arg.edge_grid, n_ijk[0], n_ijk[1], n_ijk[2])
return topo_arg.vertex_count + (ORDER - 1) * edge_index + index_in_edge
return element_node_index
def make_nanogrid_space_topology(grid: Nanogrid, shape: ShapeFunction):
if isinstance(shape, CubeSerendipityShapeFunctions):
return forward_base_topology(NanogridSerendipitySpaceTopology, grid, shape)
if isinstance(shape, CubeTripolynomialShapeFunctions):
return forward_base_topology(NanogridTripolynomialSpaceTopology, grid, shape)
raise ValueError(f"Unsupported shape function {shape.name}")
| 7,205 | Python | 34.497537 | 112 | 0.626926 |
NVIDIA/warp/warp/fem/space/quadmesh_2d_function_space.py | import warp as wp
from warp.fem import cache
from warp.fem.geometry import Quadmesh2D
from warp.fem.polynomial import is_closed
from warp.fem.types import ElementIndex
from .shape import (
ShapeFunction,
SquareBipolynomialShapeFunctions,
SquareSerendipityShapeFunctions,
)
from .topology import SpaceTopology, forward_base_topology
@wp.struct
class Quadmesh2DTopologyArg:
edge_vertex_indices: wp.array(dtype=wp.vec2i)
quad_edge_indices: wp.array2d(dtype=int)
vertex_count: int
edge_count: int
class Quadmesh2DSpaceTopology(SpaceTopology):
TopologyArg = Quadmesh2DTopologyArg
def __init__(self, mesh: Quadmesh2D, shape: ShapeFunction):
if not is_closed(shape.family):
raise ValueError("A closed polynomial family is required to define a continuous function space")
super().__init__(mesh, shape.NODES_PER_ELEMENT)
self._mesh = mesh
self._shape = shape
self._compute_quad_edge_indices()
@cache.cached_arg_value
def topo_arg_value(self, device):
arg = Quadmesh2DTopologyArg()
arg.quad_edge_indices = self._quad_edge_indices.to(device)
arg.edge_vertex_indices = self._mesh.edge_vertex_indices.to(device)
arg.vertex_count = self._mesh.vertex_count()
arg.edge_count = self._mesh.side_count()
return arg
def _compute_quad_edge_indices(self):
self._quad_edge_indices = wp.empty(
dtype=int, device=self._mesh.quad_vertex_indices.device, shape=(self._mesh.cell_count(), 4)
)
wp.launch(
kernel=Quadmesh2DSpaceTopology._compute_quad_edge_indices_kernel,
dim=self._mesh.edge_quad_indices.shape,
device=self._mesh.quad_vertex_indices.device,
inputs=[
self._mesh.edge_quad_indices,
self._mesh.edge_vertex_indices,
self._mesh.quad_vertex_indices,
self._quad_edge_indices,
],
)
@wp.func
def _find_edge_index_in_quad(
edge_vtx: wp.vec2i,
quad_vtx: wp.vec4i,
):
for k in range(3):
if (edge_vtx[0] == quad_vtx[k] and edge_vtx[1] == quad_vtx[k + 1]) or (
edge_vtx[1] == quad_vtx[k] and edge_vtx[0] == quad_vtx[k + 1]
):
return k
return 3
@wp.kernel
def _compute_quad_edge_indices_kernel(
edge_quad_indices: wp.array(dtype=wp.vec2i),
edge_vertex_indices: wp.array(dtype=wp.vec2i),
quad_vertex_indices: wp.array2d(dtype=int),
quad_edge_indices: wp.array2d(dtype=int),
):
e = wp.tid()
edge_vtx = edge_vertex_indices[e]
edge_quads = edge_quad_indices[e]
q0 = edge_quads[0]
q0_vtx = wp.vec4i(
quad_vertex_indices[q0, 0],
quad_vertex_indices[q0, 1],
quad_vertex_indices[q0, 2],
quad_vertex_indices[q0, 3],
)
q0_edge = Quadmesh2DSpaceTopology._find_edge_index_in_quad(edge_vtx, q0_vtx)
quad_edge_indices[q0, q0_edge] = e
q1 = edge_quads[1]
if q1 != q0:
t1_vtx = wp.vec4i(
quad_vertex_indices[q1, 0],
quad_vertex_indices[q1, 1],
quad_vertex_indices[q1, 2],
quad_vertex_indices[q1, 3],
)
t1_edge = Quadmesh2DSpaceTopology._find_edge_index_in_quad(edge_vtx, t1_vtx)
quad_edge_indices[q1, t1_edge] = e
class Quadmesh2DBipolynomialSpaceTopology(Quadmesh2DSpaceTopology):
def __init__(self, mesh: Quadmesh2D, shape: SquareBipolynomialShapeFunctions):
super().__init__(mesh, shape)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
ORDER = self._shape.ORDER
INTERIOR_NODES_PER_SIDE = max(0, ORDER - 1)
INTERIOR_NODES_PER_CELL = INTERIOR_NODES_PER_SIDE**2
return (
self._mesh.vertex_count()
+ self._mesh.side_count() * INTERIOR_NODES_PER_SIDE
+ self._mesh.cell_count() * INTERIOR_NODES_PER_CELL
)
def _make_element_node_index(self):
ORDER = self._shape.ORDER
INTERIOR_NODES_PER_SIDE = wp.constant(max(0, ORDER - 1))
INTERIOR_NODES_PER_CELL = wp.constant(INTERIOR_NODES_PER_SIDE**2)
@cache.dynamic_func(suffix=self.name)
def element_node_index(
geo_arg: Quadmesh2D.CellArg,
topo_arg: Quadmesh2DTopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_i = node_index_in_elt // (ORDER + 1)
node_j = node_index_in_elt - (ORDER + 1) * node_i
# Vertices
if node_i == 0:
if node_j == 0:
return geo_arg.quad_vertex_indices[element_index, 0]
elif node_j == ORDER:
return geo_arg.quad_vertex_indices[element_index, 3]
# 3-0 edge
side_index = topo_arg.quad_edge_indices[element_index, 3]
local_vs = geo_arg.quad_vertex_indices[element_index, 3]
global_vs = topo_arg.edge_vertex_indices[side_index][0]
index_in_side = wp.select(local_vs == global_vs, ORDER - node_j, node_j) - 1
return topo_arg.vertex_count + (ORDER - 1) * side_index + index_in_side
elif node_i == ORDER:
if node_j == 0:
return geo_arg.quad_vertex_indices[element_index, 1]
elif node_j == ORDER:
return geo_arg.quad_vertex_indices[element_index, 2]
# 1-2 edge
side_index = topo_arg.quad_edge_indices[element_index, 1]
local_vs = geo_arg.quad_vertex_indices[element_index, 1]
global_vs = topo_arg.edge_vertex_indices[side_index][0]
index_in_side = wp.select(local_vs == global_vs, ORDER - node_j, node_j) - 1
return topo_arg.vertex_count + (ORDER - 1) * side_index + index_in_side
if node_j == 0:
# 0-1 edge
side_index = topo_arg.quad_edge_indices[element_index, 0]
local_vs = geo_arg.quad_vertex_indices[element_index, 0]
global_vs = topo_arg.edge_vertex_indices[side_index][0]
index_in_side = wp.select(local_vs == global_vs, node_i, ORDER - node_i) - 1
return topo_arg.vertex_count + (ORDER - 1) * side_index + index_in_side
elif node_j == ORDER:
# 2-3 edge
side_index = topo_arg.quad_edge_indices[element_index, 2]
local_vs = geo_arg.quad_vertex_indices[element_index, 2]
global_vs = topo_arg.edge_vertex_indices[side_index][0]
index_in_side = wp.select(local_vs == global_vs, node_i, ORDER - node_i) - 1
return topo_arg.vertex_count + (ORDER - 1) * side_index + index_in_side
return (
topo_arg.vertex_count
+ topo_arg.edge_count * INTERIOR_NODES_PER_SIDE
+ element_index * INTERIOR_NODES_PER_CELL
+ (node_i - 1) * INTERIOR_NODES_PER_SIDE
+ node_j
- 1
)
return element_node_index
class Quadmesh2DSerendipitySpaceTopology(Quadmesh2DSpaceTopology):
def __init__(self, grid: Quadmesh2D, shape: SquareSerendipityShapeFunctions):
super().__init__(grid, shape)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
return self.geometry.vertex_count() + (self._shape.ORDER - 1) * self.geometry.side_count()
def _make_element_node_index(self):
ORDER = self._shape.ORDER
SHAPE_TO_QUAD_IDX = wp.constant(wp.vec4i([0, 3, 1, 2]))
@cache.dynamic_func(suffix=self.name)
def element_node_index(
cell_arg: Quadmesh2D.CellArg,
topo_arg: Quadmesh2DSpaceTopology.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_index = self._shape.node_type_and_type_index(node_index_in_elt)
if node_type == SquareSerendipityShapeFunctions.VERTEX:
return cell_arg.quad_vertex_indices[element_index, SHAPE_TO_QUAD_IDX[type_index]]
side_offset, index_in_side = SquareSerendipityShapeFunctions.side_offset_and_index(type_index)
if node_type == SquareSerendipityShapeFunctions.EDGE_X:
if side_offset == 0:
side_start = 0
else:
side_start = 2
index_in_side = ORDER - 2 - index_in_side
else:
if side_offset == 0:
side_start = 3
index_in_side = ORDER - 2 - index_in_side
else:
side_start = 1
side_index = topo_arg.quad_edge_indices[element_index, side_start]
local_vs = cell_arg.quad_vertex_indices[element_index, side_start]
global_vs = topo_arg.edge_vertex_indices[side_index][0]
if local_vs != global_vs:
# Flip indexing direction
index_in_side = ORDER - 2 - index_in_side
return topo_arg.vertex_count + (ORDER - 1) * side_index + index_in_side
return element_node_index
def make_quadmesh_2d_space_topology(mesh: Quadmesh2D, shape: ShapeFunction):
if isinstance(shape, SquareSerendipityShapeFunctions):
return forward_base_topology(Quadmesh2DSerendipitySpaceTopology, mesh, shape)
if isinstance(shape, SquareBipolynomialShapeFunctions):
return forward_base_topology(Quadmesh2DBipolynomialSpaceTopology, mesh, shape)
raise ValueError(f"Unsupported shape function {shape.name}")
| 9,889 | Python | 36.748091 | 108 | 0.578319 |
NVIDIA/warp/warp/fem/space/restriction.py | import warp as wp
from warp.fem.cache import TemporaryStore, borrow_temporary, borrow_temporary_like, cached_arg_value
from warp.fem.domain import GeometryDomain
from warp.fem.types import NodeElementIndex
from warp.fem.utils import compress_node_indices
from .partition import SpacePartition
wp.set_module_options({"enable_backward": False})
class SpaceRestriction:
"""Restriction of a space partition to a given GeometryDomain"""
def __init__(
self,
space_partition: SpacePartition,
domain: GeometryDomain,
device=None,
temporary_store: TemporaryStore = None,
):
space_topology = space_partition.space_topology
if domain.dimension == space_topology.dimension - 1:
space_topology = space_topology.trace()
if domain.dimension != space_topology.dimension:
raise ValueError("Incompatible space and domain dimensions")
self.space_partition = space_partition
self.space_topology = space_topology
self.domain = domain
self._compute_node_element_indices(device=device, temporary_store=temporary_store)
def _compute_node_element_indices(self, device, temporary_store: TemporaryStore):
from warp.fem import cache
NODES_PER_ELEMENT = self.space_topology.NODES_PER_ELEMENT
@cache.dynamic_kernel(
suffix=f"{self.domain.name}_{self.space_topology.name}_{self.space_partition.name}",
kernel_options={"max_unroll": 8},
)
def fill_element_node_indices(
element_arg: self.domain.ElementArg,
domain_index_arg: self.domain.ElementIndexArg,
topo_arg: self.space_topology.TopologyArg,
partition_arg: self.space_partition.PartitionArg,
element_node_indices: wp.array2d(dtype=int),
):
domain_element_index = wp.tid()
element_index = self.domain.element_index(domain_index_arg, domain_element_index)
for n in range(NODES_PER_ELEMENT):
space_nidx = self.space_topology.element_node_index(element_arg, topo_arg, element_index, n)
partition_nidx = self.space_partition.partition_node_index(partition_arg, space_nidx)
element_node_indices[domain_element_index, n] = partition_nidx
element_node_indices = borrow_temporary(
temporary_store,
shape=(self.domain.element_count(), NODES_PER_ELEMENT),
dtype=int,
device=device,
)
wp.launch(
dim=element_node_indices.array.shape[0],
kernel=fill_element_node_indices,
inputs=[
self.domain.element_arg_value(device),
self.domain.element_index_arg_value(device),
self.space_topology.topo_arg_value(device),
self.space_partition.partition_arg_value(device),
element_node_indices.array,
],
device=device,
)
# Build compressed map from node to element indices
flattened_node_indices = element_node_indices.array.flatten()
(
self._dof_partition_element_offsets,
node_array_indices,
self._node_count,
self._dof_partition_indices,
) = compress_node_indices(
self.space_partition.node_count(), flattened_node_indices, temporary_store=temporary_store
)
# Extract element index and index in element
self._dof_element_indices = borrow_temporary_like(flattened_node_indices, temporary_store)
self._dof_indices_in_element = borrow_temporary_like(flattened_node_indices, temporary_store)
wp.launch(
kernel=SpaceRestriction._split_vertex_element_index,
dim=flattened_node_indices.shape,
inputs=[
NODES_PER_ELEMENT,
node_array_indices.array,
self._dof_element_indices.array,
self._dof_indices_in_element.array,
],
device=flattened_node_indices.device,
)
node_array_indices.release()
def node_count(self):
return self._node_count
def partition_element_offsets(self):
return self._dof_partition_element_offsets.array
def node_partition_indices(self):
return self._dof_partition_indices.array
def total_node_element_count(self):
return self._dof_element_indices.array.size
@wp.struct
class NodeArg:
dof_element_offsets: wp.array(dtype=int)
dof_element_indices: wp.array(dtype=int)
dof_partition_indices: wp.array(dtype=int)
dof_indices_in_element: wp.array(dtype=int)
@cached_arg_value
def node_arg(self, device):
arg = SpaceRestriction.NodeArg()
arg.dof_element_offsets = self._dof_partition_element_offsets.array.to(device)
arg.dof_element_indices = self._dof_element_indices.array.to(device)
arg.dof_partition_indices = self._dof_partition_indices.array.to(device)
arg.dof_indices_in_element = self._dof_indices_in_element.array.to(device)
return arg
@wp.func
def node_partition_index(args: NodeArg, node_index: int):
return args.dof_partition_indices[node_index]
@wp.func
def node_element_count(args: NodeArg, node_index: int):
partition_node_index = SpaceRestriction.node_partition_index(args, node_index)
return args.dof_element_offsets[partition_node_index + 1] - args.dof_element_offsets[partition_node_index]
@wp.func
def node_element_index(args: NodeArg, node_index: int, element_index: int):
partition_node_index = SpaceRestriction.node_partition_index(args, node_index)
offset = args.dof_element_offsets[partition_node_index] + element_index
domain_element_index = args.dof_element_indices[offset]
index_in_element = args.dof_indices_in_element[offset]
return NodeElementIndex(domain_element_index, index_in_element)
@wp.kernel
def _split_vertex_element_index(
vertex_per_element: int,
sorted_indices: wp.array(dtype=int),
vertex_element_index: wp.array(dtype=int),
vertex_index_in_element: wp.array(dtype=int),
):
idx = sorted_indices[wp.tid()]
element_index = idx // vertex_per_element
vertex_element_index[wp.tid()] = element_index
vertex_index_in_element[wp.tid()] = idx - vertex_per_element * element_index
| 6,496 | Python | 39.104938 | 114 | 0.645628 |
NVIDIA/warp/warp/fem/space/partition.py | from typing import Any, Optional
import warp as wp
from warp.fem.cache import (
TemporaryStore,
borrow_temporary,
borrow_temporary_like,
cached_arg_value,
)
from warp.fem.geometry import GeometryPartition, WholeGeometryPartition
from warp.fem.types import NULL_NODE_INDEX
from warp.fem.utils import _iota_kernel, compress_node_indices
from .function_space import FunctionSpace
from .topology import SpaceTopology
wp.set_module_options({"enable_backward": False})
class SpacePartition:
class PartitionArg:
pass
def __init__(self, space_topology: SpaceTopology, geo_partition: GeometryPartition):
self.space_topology = space_topology
self.geo_partition = geo_partition
def node_count(self):
"""Returns number of nodes in this partition"""
def owned_node_count(self) -> int:
"""Returns number of nodes in this partition, excluding exterior halo"""
def interior_node_count(self) -> int:
"""Returns number of interior nodes in this partition"""
def space_node_indices(self) -> wp.array:
"""Return the global function space indices for nodes in this partition"""
def partition_arg_value(self, device):
pass
@staticmethod
def partition_node_index(args: "PartitionArg", space_node_index: int):
"""Returns the index in the partition of a function space node, or -1 if it does not exist"""
def __str__(self) -> str:
return self.name
@property
def name(self) -> str:
return f"{self.__class__.__name__}"
class WholeSpacePartition(SpacePartition):
@wp.struct
class PartitionArg:
pass
def __init__(self, space_topology: SpaceTopology):
super().__init__(space_topology, WholeGeometryPartition(space_topology.geometry))
self._node_indices = None
def node_count(self):
"""Returns number of nodes in this partition"""
return self.space_topology.node_count()
def owned_node_count(self) -> int:
"""Returns number of nodes in this partition, excluding exterior halo"""
return self.space_topology.node_count()
def interior_node_count(self) -> int:
"""Returns number of interior nodes in this partition"""
return self.space_topology.node_count()
def space_node_indices(self):
"""Return the global function space indices for nodes in this partition"""
if self._node_indices is None:
self._node_indices = borrow_temporary(temporary_store=None, shape=(self.node_count(),), dtype=int)
wp.launch(kernel=_iota_kernel, dim=self.node_count(), inputs=[self._node_indices.array, 1])
return self._node_indices.array
def partition_arg_value(self, device):
return WholeSpacePartition.PartitionArg()
@wp.func
def partition_node_index(args: Any, space_node_index: int):
return space_node_index
def __eq__(self, other: SpacePartition) -> bool:
return isinstance(other, SpacePartition) and self.space_topology == other.space_topology
@property
def name(self) -> str:
return "Whole"
class NodeCategory:
OWNED_INTERIOR = wp.constant(0)
"""Node is touched exclusively by this partition, not touched by frontier side"""
OWNED_FRONTIER = wp.constant(1)
"""Node is touched by a frontier side, but belongs to an element of this partition"""
HALO_LOCAL_SIDE = wp.constant(2)
"""Node belongs to an element of another partition, but is touched by one of our frontier side"""
HALO_OTHER_SIDE = wp.constant(3)
"""Node belongs to an element of another partition, and is not touched by one of our frontier side"""
EXTERIOR = wp.constant(4)
"""Node is never referenced by this partition"""
COUNT = 5
class NodePartition(SpacePartition):
@wp.struct
class PartitionArg:
space_to_partition: wp.array(dtype=int)
def __init__(
self,
space_topology: SpaceTopology,
geo_partition: GeometryPartition,
with_halo: bool = True,
device=None,
temporary_store: TemporaryStore = None,
):
super().__init__(space_topology=space_topology, geo_partition=geo_partition)
self._compute_node_indices_from_sides(device, with_halo, temporary_store)
def node_count(self) -> int:
"""Returns number of nodes referenced by this partition, including exterior halo"""
return int(self._category_offsets.array.numpy()[NodeCategory.HALO_OTHER_SIDE + 1])
def owned_node_count(self) -> int:
"""Returns number of nodes in this partition, excluding exterior halo"""
return int(self._category_offsets.array.numpy()[NodeCategory.OWNED_FRONTIER + 1])
def interior_node_count(self) -> int:
"""Returns number of interior nodes in this partition"""
return int(self._category_offsets.array.numpy()[NodeCategory.OWNED_INTERIOR + 1])
def space_node_indices(self):
"""Return the global function space indices for nodes in this partition"""
return self._node_indices.array
@cached_arg_value
def partition_arg_value(self, device):
arg = NodePartition.PartitionArg()
arg.space_to_partition = self._space_to_partition.array.to(device)
return arg
@wp.func
def partition_node_index(args: PartitionArg, space_node_index: int):
return args.space_to_partition[space_node_index]
def _compute_node_indices_from_sides(self, device, with_halo: bool, temporary_store: TemporaryStore):
from warp.fem import cache
trace_topology = self.space_topology.trace()
NODES_PER_CELL = self.space_topology.NODES_PER_ELEMENT
NODES_PER_SIDE = trace_topology.NODES_PER_ELEMENT
@cache.dynamic_kernel(suffix=f"{self.geo_partition.name}_{self.space_topology.name}")
def node_category_from_cells_kernel(
geo_arg: self.geo_partition.geometry.CellArg,
geo_partition_arg: self.geo_partition.CellArg,
space_arg: self.space_topology.TopologyArg,
node_mask: wp.array(dtype=int),
):
partition_cell_index = wp.tid()
cell_index = self.geo_partition.cell_index(geo_partition_arg, partition_cell_index)
for n in range(NODES_PER_CELL):
space_nidx = self.space_topology.element_node_index(geo_arg, space_arg, cell_index, n)
node_mask[space_nidx] = NodeCategory.OWNED_INTERIOR
@cache.dynamic_kernel(suffix=f"{self.geo_partition.name}_{self.space_topology.name}")
def node_category_from_owned_sides_kernel(
geo_arg: self.geo_partition.geometry.SideArg,
geo_partition_arg: self.geo_partition.SideArg,
space_arg: trace_topology.TopologyArg,
node_mask: wp.array(dtype=int),
):
partition_side_index = wp.tid()
side_index = self.geo_partition.side_index(geo_partition_arg, partition_side_index)
for n in range(NODES_PER_SIDE):
space_nidx = trace_topology.element_node_index(geo_arg, space_arg, side_index, n)
if node_mask[space_nidx] == NodeCategory.EXTERIOR:
node_mask[space_nidx] = NodeCategory.HALO_LOCAL_SIDE
@cache.dynamic_kernel(suffix=f"{self.geo_partition.name}_{self.space_topology.name}")
def node_category_from_frontier_sides_kernel(
geo_arg: self.geo_partition.geometry.SideArg,
geo_partition_arg: self.geo_partition.SideArg,
space_arg: trace_topology.TopologyArg,
node_mask: wp.array(dtype=int),
):
frontier_side_index = wp.tid()
side_index = self.geo_partition.frontier_side_index(geo_partition_arg, frontier_side_index)
for n in range(NODES_PER_SIDE):
space_nidx = trace_topology.element_node_index(geo_arg, space_arg, side_index, n)
if node_mask[space_nidx] == NodeCategory.EXTERIOR:
node_mask[space_nidx] = NodeCategory.HALO_OTHER_SIDE
elif node_mask[space_nidx] == NodeCategory.OWNED_INTERIOR:
node_mask[space_nidx] = NodeCategory.OWNED_FRONTIER
node_category = borrow_temporary(
temporary_store,
shape=(self.space_topology.node_count(),),
dtype=int,
device=device,
)
node_category.array.fill_(value=NodeCategory.EXTERIOR)
wp.launch(
dim=self.geo_partition.cell_count(),
kernel=node_category_from_cells_kernel,
inputs=[
self.geo_partition.geometry.cell_arg_value(device),
self.geo_partition.cell_arg_value(device),
self.space_topology.topo_arg_value(device),
node_category.array,
],
device=device,
)
if with_halo:
wp.launch(
dim=self.geo_partition.side_count(),
kernel=node_category_from_owned_sides_kernel,
inputs=[
self.geo_partition.geometry.side_arg_value(device),
self.geo_partition.side_arg_value(device),
self.space_topology.topo_arg_value(device),
node_category.array,
],
device=device,
)
wp.launch(
dim=self.geo_partition.frontier_side_count(),
kernel=node_category_from_frontier_sides_kernel,
inputs=[
self.geo_partition.geometry.side_arg_value(device),
self.geo_partition.side_arg_value(device),
self.space_topology.topo_arg_value(device),
node_category.array,
],
device=device,
)
self._finalize_node_indices(node_category.array, temporary_store)
node_category.release()
def _finalize_node_indices(self, node_category: wp.array(dtype=int), temporary_store: TemporaryStore):
category_offsets, node_indices, _, __ = compress_node_indices(NodeCategory.COUNT, node_category)
# Copy offsets to cpu
device = node_category.device
self._category_offsets = borrow_temporary(
temporary_store,
shape=category_offsets.array.shape,
dtype=category_offsets.array.dtype,
pinned=device.is_cuda,
device="cpu",
)
wp.copy(src=category_offsets.array, dest=self._category_offsets.array)
if device.is_cuda:
# TODO switch to synchronize_event once available
wp.synchronize_stream(wp.get_stream(device))
category_offsets.release()
# Compute global to local indices
self._space_to_partition = borrow_temporary_like(node_indices, temporary_store)
wp.launch(
kernel=NodePartition._scatter_partition_indices,
dim=self.space_topology.node_count(),
device=device,
inputs=[self.node_count(), node_indices.array, self._space_to_partition.array],
)
# Copy to shrinked-to-fit array
self._node_indices = borrow_temporary(temporary_store, shape=(self.node_count()), dtype=int, device=device)
wp.copy(dest=self._node_indices.array, src=node_indices.array, count=self.node_count())
node_indices.release()
@wp.kernel
def _scatter_partition_indices(
local_node_count: int,
node_indices: wp.array(dtype=int),
space_to_partition_indices: wp.array(dtype=int),
):
local_idx = wp.tid()
space_idx = node_indices[local_idx]
if local_idx < local_node_count:
space_to_partition_indices[space_idx] = local_idx
else:
space_to_partition_indices[space_idx] = NULL_NODE_INDEX
def make_space_partition(
space: Optional[FunctionSpace] = None,
geometry_partition: Optional[GeometryPartition] = None,
space_topology: Optional[SpaceTopology] = None,
with_halo: bool = True,
device=None,
temporary_store: TemporaryStore = None,
) -> SpacePartition:
"""Computes the subset of nodes from a function space topology that touch a geometry partition
Either `space_topology` or `space` must be provided (and will be considered in that order).
Args:
space: (deprecated) the function space defining the topology if `space_topology` is ``None``.
geometry_partition: The subset of the space geometry. If not provided, use the whole geometry.
space_topology: the topology of the function space to consider. If ``None``, deduced from `space`.
with_halo: if True, include the halo nodes (nodes from exterior frontier cells to the partition)
device: Warp device on which to perform and store computations
Returns:
the resulting space partition
"""
if space_topology is None:
space_topology = space.topology
space_topology = space_topology.full_space_topology()
if geometry_partition is not None:
if geometry_partition.cell_count() < geometry_partition.geometry.cell_count():
return NodePartition(
space_topology=space_topology,
geo_partition=geometry_partition,
with_halo=with_halo,
device=device,
temporary_store=temporary_store,
)
return WholeSpacePartition(space_topology)
| 13,484 | Python | 37.418803 | 115 | 0.63542 |
NVIDIA/warp/warp/fem/space/function_space.py | import warp as wp
from warp.fem.geometry import Geometry
from warp.fem.types import Coords, DofIndex, ElementIndex
from .topology import SpaceTopology
class FunctionSpace:
"""
Interface class for function spaces, i.e. geometry + interpolation basis
"""
dtype: type
"""Value type of the interpolation functions"""
SpaceArg: wp.codegen.Struct
"""Structure containing arguments to be passed to device function"""
VALUE_DOF_COUNT: int
"""Number of degrees of freedom per node, as a Warp constant"""
def __init__(self, topology: SpaceTopology):
self._topology = topology
if self._topology.is_trace:
self.element_inner_reference_gradient_transform = self.geometry.side_inner_inverse_deformation_gradient
self.element_outer_reference_gradient_transform = self.geometry.side_outer_inverse_deformation_gradient
else:
self.element_inner_reference_gradient_transform = self.geometry.cell_inverse_deformation_gradient
self.element_outer_reference_gradient_transform = self.geometry.cell_inverse_deformation_gradient
def node_count(self) -> int:
"""Number of nodes in the interpolation basis"""
raise NotImplementedError
def space_arg_value(self, device) -> wp.codegen.StructInstance:
"""Value of the arguments to be passed to device functions"""
raise NotImplementedError
@property
def topology(self) -> SpaceTopology:
"""Underlying geometry"""
return self._topology
@property
def geometry(self) -> Geometry:
"""Underlying geometry"""
return self.topology.geometry
@property
def dimension(self) -> int:
"""Function space embedding dimension"""
return self.topology.dimension
@property
def degree(self) -> int:
"""Maximum polynomial degree of the underlying basis"""
raise NotImplementedError
@property
def name(self):
raise NotImplementedError
def __str__(self):
return self.name
def trace(self) -> "FunctionSpace":
"""Trace of the function space over lower-dimensional elements of the geometry"""
raise NotImplementedError
def make_field(self, space_partition=None):
"""Creates a zero-initialized discrete field over the function space holding values for all degrees of freedom of nodes in a space partition
space_arg:
space_partition: If provided, the subset of nodes to consider
See also: :func:`make_space_partition`
"""
raise NotImplementedError
@staticmethod
def unit_dof_value(elt_arg: "SpaceTopology.ElementArg", space_arg: "SpaceArg", dof: DofIndex): # noqa: F821
"""Unit value for a given degree of freedom. Typically a rank-1 tensor"""
raise NotImplementedError
@staticmethod
def node_coords_in_element(
elt_arg: "SpaceTopology.ElementArg",
space_arg: "SpaceArg", # noqa: F821
element_index: ElementIndex,
node_index_in_elt: int,
):
"""Coordinates inside element of a given node"""
raise NotImplementedError
@staticmethod
def node_quadrature_weight(
elt_arg: "SpaceTopology.ElementArg",
space_arg: "SpaceArg", # noqa: F821
element_index: ElementIndex,
node_index_in_elt: int,
):
"""Weight of a given node when used as a quadrature point"""
raise NotImplementedError
@staticmethod
def element_inner_weight(
elt_arg: "SpaceTopology.ElementArg",
space_arg: "SpaceArg", # noqa: F821
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
"""Inner weight for a node at given coordinates"""
raise NotImplementedError
@staticmethod
def element_inner_weight_gradient(
elt_arg: "SpaceTopology.ElementArg",
space_arg: "SpaceArg", # noqa: F821
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
"""Inner weight gradient w.r.t. reference space for a node at given coordinates"""
raise NotImplementedError
@staticmethod
def element_outer_weight(
elt_arg: "SpaceTopology.ElementArg",
space_arg: "SpaceArg", # noqa: F821
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
"""Outer weight for a node at given coordinates"""
raise NotImplementedError
@staticmethod
def element_outer_weight_gradient(
elt_arg: "SpaceTopology.ElementArg",
space_arg: "SpaceArg", # noqa: F821
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
"""Outer weight gradient w.r.t reference space for a node at given coordinates"""
raise NotImplementedError
| 4,896 | Python | 31.865772 | 148 | 0.651757 |
NVIDIA/warp/warp/fem/space/basis_space.py | from typing import Optional
import warp as wp
from warp.fem import cache
from warp.fem.geometry import Geometry
from warp.fem.quadrature import Quadrature
from warp.fem.types import Coords, ElementIndex, make_free_sample
from .shape import ShapeFunction
from .topology import DiscontinuousSpaceTopology, SpaceTopology
class BasisSpace:
"""Interface class for defining a scalar-valued basis over a geometry.
A basis space makes it easy to define multiple function spaces sharing the same basis (and thus nodes) but with different valuation functions;
however, it is not a required ingredient of a function space.
See also: :func:`make_polynomial_basis_space`, :func:`make_collocated_function_space`
"""
@wp.struct
class BasisArg:
"""Argument structure to be passed to device functions"""
pass
def __init__(self, topology: SpaceTopology):
self._topology = topology
self.NODES_PER_ELEMENT = self._topology.NODES_PER_ELEMENT
@property
def topology(self) -> SpaceTopology:
"""Underlying topology of the basis space"""
return self._topology
@property
def geometry(self) -> Geometry:
"""Underlying geometry of the basis space"""
return self._topology.geometry
def basis_arg_value(self, device) -> "BasisArg":
"""Value for the argument structure to be passed to device functions"""
return BasisSpace.BasisArg()
# Helpers for generating node positions
def node_positions(self, out: Optional[wp.array] = None) -> wp.array:
"""Returns a temporary array containing the world position for each node"""
NODES_PER_ELEMENT = self.NODES_PER_ELEMENT
pos_type = cache.cached_vec_type(length=self.geometry.dimension, dtype=float)
node_coords_in_element = self.make_node_coords_in_element()
@cache.dynamic_kernel(suffix=self.name, kernel_options={"max_unroll": 4, "enable_backward": False})
def fill_node_positions(
geo_cell_arg: self.geometry.CellArg,
basis_arg: self.BasisArg,
topo_arg: self.topology.TopologyArg,
node_positions: wp.array(dtype=pos_type),
):
element_index = wp.tid()
for n in range(NODES_PER_ELEMENT):
node_index = self.topology.element_node_index(geo_cell_arg, topo_arg, element_index, n)
coords = node_coords_in_element(geo_cell_arg, basis_arg, element_index, n)
sample = make_free_sample(element_index, coords)
pos = self.geometry.cell_position(geo_cell_arg, sample)
node_positions[node_index] = pos
shape = (self.topology.node_count(),)
if out is None:
node_positions = wp.empty(
shape=shape,
dtype=pos_type,
)
else:
if out.shape != shape or not wp.types.types_equal(pos_type, out.dtype):
raise ValueError(
f"Out node positions array must have shape {shape} and data type {wp.types.type_repr(pos_type)}"
)
node_positions = out
wp.launch(
dim=self.geometry.cell_count(),
kernel=fill_node_positions,
inputs=[
self.geometry.cell_arg_value(device=node_positions.device),
self.basis_arg_value(device=node_positions.device),
self.topology.topo_arg_value(device=node_positions.device),
node_positions,
],
)
return node_positions
def make_node_coords_in_element(self):
raise NotImplementedError()
def make_node_quadrature_weight(self):
raise NotImplementedError()
def make_element_inner_weight(self):
raise NotImplementedError()
def make_element_outer_weight(self):
return self.make_element_inner_weight()
def make_element_inner_weight_gradient(self):
raise NotImplementedError()
def make_element_outer_weight_gradient(self):
return self.make_element_inner_weight_gradient()
def make_trace_node_quadrature_weight(self):
raise NotImplementedError()
def trace(self) -> "TraceBasisSpace":
return TraceBasisSpace(self)
class ShapeBasisSpace(BasisSpace):
"""Base class for defining shape-function-based basis spaces."""
def __init__(self, topology: SpaceTopology, shape: ShapeFunction):
super().__init__(topology)
self._shape = shape
self.ORDER = self._shape.ORDER
if hasattr(shape, "element_node_triangulation"):
self.node_triangulation = self._node_triangulation
if hasattr(shape, "element_node_tets"):
self.node_tets = self._node_tets
if hasattr(shape, "element_node_hexes"):
self.node_hexes = self._node_hexes
@property
def shape(self) -> ShapeFunction:
"""Shape functions used for defining individual element basis"""
return self._shape
@property
def name(self):
return f"{self.topology.name}_{self._shape.name}"
def make_node_coords_in_element(self):
shape_node_coords_in_element = self._shape.make_node_coords_in_element()
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
elt_arg: self.geometry.CellArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return shape_node_coords_in_element(node_index_in_elt)
return node_coords_in_element
def make_node_quadrature_weight(self):
shape_node_quadrature_weight = self._shape.make_node_quadrature_weight()
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(
elt_arg: self.geometry.CellArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return shape_node_quadrature_weight(node_index_in_elt)
return node_quadrature_weight
def make_element_inner_weight(self):
shape_element_inner_weight = self._shape.make_element_inner_weight()
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
elt_arg: self.geometry.CellArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
return shape_element_inner_weight(coords, node_index_in_elt)
return element_inner_weight
def make_element_inner_weight_gradient(self):
shape_element_inner_weight_gradient = self._shape.make_element_inner_weight_gradient()
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
elt_arg: self.geometry.CellArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
return shape_element_inner_weight_gradient(coords, node_index_in_elt)
return element_inner_weight_gradient
def make_trace_node_quadrature_weight(self, trace_basis):
shape_trace_node_quadrature_weight = self._shape.make_trace_node_quadrature_weight()
@cache.dynamic_func(suffix=self.name)
def trace_node_quadrature_weight(
geo_side_arg: trace_basis.geometry.SideArg,
basis_arg: trace_basis.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
neighbour_elem, index_in_neighbour = trace_basis.topology.neighbor_cell_index(
geo_side_arg, element_index, node_index_in_elt
)
return shape_trace_node_quadrature_weight(index_in_neighbour)
return trace_node_quadrature_weight
def _node_triangulation(self):
element_node_indices = self._topology.element_node_indices().numpy()
element_triangles = self._shape.element_node_triangulation()
tri_indices = element_node_indices[:, element_triangles].reshape(-1, 3)
return tri_indices
def _node_tets(self):
element_node_indices = self._topology.element_node_indices().numpy()
element_tets = self._shape.element_node_tets()
tet_indices = element_node_indices[:, element_tets].reshape(-1, 4)
return tet_indices
def _node_hexes(self):
element_node_indices = self._topology.element_node_indices().numpy()
element_hexes = self._shape.element_node_hexes()
hex_indices = element_node_indices[:, element_hexes].reshape(-1, 8)
return hex_indices
class TraceBasisSpace(BasisSpace):
"""Auto-generated trace space evaluating the cell-defined basis on the geometry sides"""
def __init__(self, basis: BasisSpace):
super().__init__(basis.topology.trace())
self.ORDER = basis.ORDER
self._basis = basis
self.BasisArg = self._basis.BasisArg
self.basis_arg_value = self._basis.basis_arg_value
@property
def name(self):
return f"{self._basis.name}_Trace"
def make_node_coords_in_element(self):
node_coords_in_cell = self._basis.make_node_coords_in_element()
@cache.dynamic_func(suffix=self._basis.name)
def trace_node_coords_in_element(
geo_side_arg: self.geometry.SideArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
neighbour_elem, index_in_neighbour = self.topology.neighbor_cell_index(
geo_side_arg, element_index, node_index_in_elt
)
geo_cell_arg = self.geometry.side_to_cell_arg(geo_side_arg)
neighbour_coords = node_coords_in_cell(
geo_cell_arg,
basis_arg,
neighbour_elem,
index_in_neighbour,
)
return self.geometry.side_from_cell_coords(geo_side_arg, element_index, neighbour_elem, neighbour_coords)
return trace_node_coords_in_element
def make_node_quadrature_weight(self):
return self._basis.make_trace_node_quadrature_weight(self)
def make_element_inner_weight(self):
cell_inner_weight = self._basis.make_element_inner_weight()
@cache.dynamic_func(suffix=self._basis.name)
def trace_element_inner_weight(
geo_side_arg: self.geometry.SideArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
cell_index, index_in_cell = self.topology.inner_cell_index(geo_side_arg, element_index, node_index_in_elt)
if index_in_cell < 0:
return 0.0
cell_coords = self.geometry.side_inner_cell_coords(geo_side_arg, element_index, coords)
geo_cell_arg = self.geometry.side_to_cell_arg(geo_side_arg)
return cell_inner_weight(
geo_cell_arg,
basis_arg,
cell_index,
cell_coords,
index_in_cell,
)
return trace_element_inner_weight
def make_element_outer_weight(self):
cell_outer_weight = self._basis.make_element_outer_weight()
@cache.dynamic_func(suffix=self._basis.name)
def trace_element_outer_weight(
geo_side_arg: self.geometry.SideArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
cell_index, index_in_cell = self.topology.outer_cell_index(geo_side_arg, element_index, node_index_in_elt)
if index_in_cell < 0:
return 0.0
cell_coords = self.geometry.side_outer_cell_coords(geo_side_arg, element_index, coords)
geo_cell_arg = self.geometry.side_to_cell_arg(geo_side_arg)
return cell_outer_weight(
geo_cell_arg,
basis_arg,
cell_index,
cell_coords,
index_in_cell,
)
return trace_element_outer_weight
def make_element_inner_weight_gradient(self):
cell_inner_weight_gradient = self._basis.make_element_inner_weight_gradient()
grad_vec_type = wp.vec(length=self.geometry.dimension, dtype=float)
@cache.dynamic_func(suffix=self._basis.name)
def trace_element_inner_weight_gradient(
geo_side_arg: self.geometry.SideArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
cell_index, index_in_cell = self.topology.inner_cell_index(geo_side_arg, element_index, node_index_in_elt)
if index_in_cell < 0:
return grad_vec_type(0.0)
cell_coords = self.geometry.side_inner_cell_coords(geo_side_arg, element_index, coords)
geo_cell_arg = self.geometry.side_to_cell_arg(geo_side_arg)
return cell_inner_weight_gradient(geo_cell_arg, basis_arg, cell_index, cell_coords, index_in_cell)
return trace_element_inner_weight_gradient
def make_element_outer_weight_gradient(self):
cell_outer_weight_gradient = self._basis.make_element_outer_weight_gradient()
grad_vec_type = wp.vec(length=self.geometry.dimension, dtype=float)
@cache.dynamic_func(suffix=self._basis.name)
def trace_element_outer_weight_gradient(
geo_side_arg: self.geometry.SideArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
cell_index, index_in_cell = self.topology.outer_cell_index(geo_side_arg, element_index, node_index_in_elt)
if index_in_cell < 0:
return grad_vec_type(0.0)
cell_coords = self.geometry.side_outer_cell_coords(geo_side_arg, element_index, coords)
geo_cell_arg = self.geometry.side_to_cell_arg(geo_side_arg)
return cell_outer_weight_gradient(geo_cell_arg, basis_arg, cell_index, cell_coords, index_in_cell)
return trace_element_outer_weight_gradient
def __eq__(self, other: "TraceBasisSpace") -> bool:
return self._topo == other._topo
class PiecewiseConstantBasisSpace(ShapeBasisSpace):
class Trace(TraceBasisSpace):
def make_node_coords_in_element(self):
# Makes the single node visible to all sides; useful for interpolating on boundaries
# For higher-order non-conforming elements direct interpolation on boundary is not possible,
# need to do proper integration then solve with mass matrix
CENTER_COORDS = Coords(self.geometry.reference_side().center())
@cache.dynamic_func(suffix=self._basis.name)
def trace_node_coords_in_element(
geo_side_arg: self.geometry.SideArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return CENTER_COORDS
return trace_node_coords_in_element
def trace(self):
return PiecewiseConstantBasisSpace.Trace(self)
def make_discontinuous_basis_space(geometry: Geometry, shape: ShapeFunction):
topology = DiscontinuousSpaceTopology(geometry, shape.NODES_PER_ELEMENT)
if shape.NODES_PER_ELEMENT == 1:
# piecewise-constant space
return PiecewiseConstantBasisSpace(topology=topology, shape=shape)
return ShapeBasisSpace(topology=topology, shape=shape)
class PointBasisSpace(BasisSpace):
"""An unstructured :class:`BasisSpace` that is non-zero at a finite set of points only.
The node locations and nodal quadrature weights are defined by a :class:`Quadrature` formula.
"""
def __init__(self, quadrature: Quadrature):
self._quadrature = quadrature
if quadrature.points_per_element() is None:
raise NotImplementedError("Varying number of points per element is not supported yet")
topology = DiscontinuousSpaceTopology(
geometry=quadrature.domain.geometry, nodes_per_element=quadrature.points_per_element()
)
super().__init__(topology)
self.BasisArg = quadrature.Arg
self.basis_arg_value = quadrature.arg_value
self.ORDER = 0
self.make_element_outer_weight = self.make_element_inner_weight
self.make_element_outer_weight_gradient = self.make_element_outer_weight_gradient
@property
def name(self):
return f"{self._quadrature.name}_Point"
def make_node_coords_in_element(self):
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
elt_arg: self._quadrature.domain.ElementArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return self._quadrature.point_coords(elt_arg, basis_arg, element_index, node_index_in_elt)
return node_coords_in_element
def make_node_quadrature_weight(self):
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(
elt_arg: self._quadrature.domain.ElementArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return self._quadrature.point_weight(elt_arg, basis_arg, element_index, node_index_in_elt)
return node_quadrature_weight
def make_element_inner_weight(self):
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
elt_arg: self._quadrature.domain.ElementArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
qp_coord = self._quadrature.point_coords(elt_arg, basis_arg, element_index, node_index_in_elt)
return wp.select(wp.length_sq(coords - qp_coord) < 0.001, 0.0, 1.0)
return element_inner_weight
def make_element_inner_weight_gradient(self):
gradient_vec = cache.cached_vec_type(length=self.geometry.dimension, dtype=float)
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
elt_arg: self._quadrature.domain.ElementArg,
basis_arg: self.BasisArg,
element_index: ElementIndex,
coords: Coords,
node_index_in_elt: int,
):
return gradient_vec(0.0)
return element_inner_weight_gradient
def make_trace_node_quadrature_weight(self, trace_basis):
@cache.dynamic_func(suffix=self.name)
def trace_node_quadrature_weight(
elt_arg: trace_basis.geometry.SideArg,
basis_arg: trace_basis.BasisArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return 0.0
return trace_node_quadrature_weight
| 19,174 | Python | 35.66348 | 146 | 0.623448 |
NVIDIA/warp/warp/fem/space/__init__.py | # isort: skip_file
from enum import Enum
from typing import Optional
import warp.fem.domain as _domain
import warp.fem.geometry as _geometry
import warp.fem.polynomial as _polynomial
from .function_space import FunctionSpace
from .topology import SpaceTopology
from .basis_space import BasisSpace, PointBasisSpace, ShapeBasisSpace, make_discontinuous_basis_space
from .collocated_function_space import CollocatedFunctionSpace
from .shape import ElementBasis, get_shape_function
from .grid_2d_function_space import make_grid_2d_space_topology
from .grid_3d_function_space import make_grid_3d_space_topology
from .trimesh_2d_function_space import make_trimesh_2d_space_topology
from .tetmesh_function_space import make_tetmesh_space_topology
from .quadmesh_2d_function_space import make_quadmesh_2d_space_topology
from .hexmesh_function_space import make_hexmesh_space_topology
from .nanogrid_function_space import make_nanogrid_space_topology
from .partition import SpacePartition, make_space_partition
from .restriction import SpaceRestriction
from .dof_mapper import DofMapper, IdentityMapper, SymmetricTensorMapper, SkewSymmetricTensorMapper
def make_space_restriction(
space: Optional[FunctionSpace] = None,
space_partition: Optional[SpacePartition] = None,
domain: Optional[_domain.GeometryDomain] = None,
space_topology: Optional[SpaceTopology] = None,
device=None,
temporary_store: "Optional[warp.fem.cache.TemporaryStore]" = None, # noqa: F821
) -> SpaceRestriction:
"""
Restricts a function space partition to a Domain, i.e. a subset of its elements.
One of `space_partition`, `space_topology`, or `space` must be provided (and will be considered in that order).
Args:
space: (deprecated) if neither `space_partition` nor `space_topology` are provided, the space defining the topology to restrict
space_partition: the subset of nodes from the space topology to consider
domain: the domain to restrict the space to, defaults to all cells of the space geometry or partition.
space_topology: the space topology to be restricted, if `space_partition` is ``None``.
device: device on which to perform and store computations
temporary_store: shared pool from which to allocate temporary arrays
"""
if space_partition is None:
if space_topology is None:
assert space is not None
space_topology = space.topology
if domain is None:
domain = _domain.Cells(geometry=space_topology.geometry)
space_partition = make_space_partition(
space_topology=space_topology, geometry_partition=domain.geometry_partition
)
elif domain is None:
domain = _domain.Cells(geometry=space_partition.geo_partition)
return SpaceRestriction(
space_partition=space_partition, domain=domain, device=device, temporary_store=temporary_store
)
def make_polynomial_basis_space(
geo: _geometry.Geometry,
degree: int = 1,
element_basis: Optional[ElementBasis] = None,
discontinuous: bool = False,
family: Optional[_polynomial.Polynomial] = None,
) -> BasisSpace:
"""
Equips a geometry with a polynomial basis.
Args:
geo: the Geometry on which to build the space
degree: polynomial degree of the per-element shape functions
discontinuous: if True, use Discontinuous Galerkin shape functions. Discontinuous is implied if degree is 0, i.e, piecewise-constant shape functions.
element_basis: type of basis function for the individual elements
family: Polynomial family used to generate the shape function basis. If not provided, a reasonable basis is chosen.
Returns:
the constructed basis space
"""
base_geo = geo.base if isinstance(geo, _geometry.DeformedGeometry) else geo
if element_basis is None:
element_basis = ElementBasis.LAGRANGE
elif element_basis == ElementBasis.SERENDIPITY and degree == 1:
# Degree-1 serendipity is always equivalent to Lagrange
element_basis = ElementBasis.LAGRANGE
shape = get_shape_function(geo.reference_cell(), geo.dimension, degree, element_basis, family)
if discontinuous or degree == 0 or element_basis == ElementBasis.NONCONFORMING_POLYNOMIAL:
return make_discontinuous_basis_space(geo, shape)
topology = None
if isinstance(base_geo, _geometry.Grid2D):
topology = make_grid_2d_space_topology(geo, shape)
elif isinstance(base_geo, _geometry.Grid3D):
topology = make_grid_3d_space_topology(geo, shape)
elif isinstance(base_geo, _geometry.Trimesh2D):
topology = make_trimesh_2d_space_topology(geo, shape)
elif isinstance(base_geo, _geometry.Tetmesh):
topology = make_tetmesh_space_topology(geo, shape)
elif isinstance(base_geo, _geometry.Quadmesh2D):
topology = make_quadmesh_2d_space_topology(geo, shape)
elif isinstance(base_geo, _geometry.Hexmesh):
topology = make_hexmesh_space_topology(geo, shape)
elif isinstance(base_geo, _geometry.Nanogrid):
topology = make_nanogrid_space_topology(geo, shape)
if topology is None:
raise NotImplementedError(f"Unsupported geometry type {geo.name}")
return ShapeBasisSpace(topology, shape)
def make_collocated_function_space(
basis_space: BasisSpace, dtype: type = float, dof_mapper: Optional[DofMapper] = None
) -> CollocatedFunctionSpace:
"""
Constructs a function space from a basis space and a value type, such that all degrees of freedom of the value type are stored at each of the basis nodes.
Args:
geo: the Geometry on which to build the space
dtype: value type the function space. If ``dof_mapper`` is provided, the value type from the DofMapper will be used instead.
dof_mapper: mapping from node degrees of freedom to function values, defaults to Identity. Useful for reduced coordinates, e.g. :py:class:`SymmetricTensorMapper` maps 2x2 (resp 3x3) symmetric tensors to 3 (resp 6) degrees of freedom.
Returns:
the constructed function space
"""
return CollocatedFunctionSpace(basis_space, dtype=dtype, dof_mapper=dof_mapper)
def make_polynomial_space(
geo: _geometry.Geometry,
dtype: type = float,
dof_mapper: Optional[DofMapper] = None,
degree: int = 1,
element_basis: Optional[ElementBasis] = None,
discontinuous: bool = False,
family: Optional[_polynomial.Polynomial] = None,
) -> CollocatedFunctionSpace:
"""
Equips a geometry with a collocated, polynomial function space.
Equivalent to successive calls to :func:`make_polynomial_basis_space` and `make_collocated_function_space`.
Args:
geo: the Geometry on which to build the space
dtype: value type the function space. If ``dof_mapper`` is provided, the value type from the DofMapper will be used instead.
dof_mapper: mapping from node degrees of freedom to function values, defaults to Identity. Useful for reduced coordinates, e.g. :py:class:`SymmetricTensorMapper` maps 2x2 (resp 3x3) symmetric tensors to 3 (resp 6) degrees of freedom.
degree: polynomial degree of the per-element shape functions
discontinuous: if True, use Discontinuous Galerkin shape functions. Discontinuous is implied if degree is 0, i.e, piecewise-constant shape functions.
element_basis: type of basis function for the individual elements
family: Polynomial family used to generate the shape function basis. If not provided, a reasonable basis is chosen.
Returns:
the constructed function space
"""
basis_space = make_polynomial_basis_space(geo, degree, element_basis, discontinuous, family)
return CollocatedFunctionSpace(basis_space, dtype=dtype, dof_mapper=dof_mapper)
| 7,828 | Python | 42.494444 | 241 | 0.728922 |
NVIDIA/warp/warp/fem/space/topology.py | from typing import Optional, Type
import warp as wp
from warp.fem import cache
from warp.fem.geometry import DeformedGeometry, Geometry
from warp.fem.types import ElementIndex
class SpaceTopology:
"""
Interface class for defining the topology of a function space.
The topology only considers the indices of the nodes in each element, and as such,
the connectivity pattern of the function space.
It does not specify the actual location of the nodes within the elements, or the valuation function.
"""
dimension: int
"""Embedding dimension of the function space"""
NODES_PER_ELEMENT: int
"""Number of interpolation nodes per element of the geometry.
.. note:: This will change to be defined per-element in future versions
"""
@wp.struct
class TopologyArg:
"""Structure containing arguments to be passed to device functions"""
pass
def __init__(self, geometry: Geometry, nodes_per_element: int):
self._geometry = geometry
self.dimension = geometry.dimension
self.NODES_PER_ELEMENT = wp.constant(nodes_per_element)
self.ElementArg = geometry.CellArg
@property
def geometry(self) -> Geometry:
"""Underlying geometry"""
return self._geometry
def node_count(self) -> int:
"""Number of nodes in the interpolation basis"""
raise NotImplementedError
def topo_arg_value(self, device) -> "TopologyArg":
"""Value of the topology argument structure to be passed to device functions"""
return SpaceTopology.TopologyArg()
@property
def name(self):
return f"{self.__class__.__name__}_{self.NODES_PER_ELEMENT}"
def __str__(self):
return self.name
@staticmethod
def element_node_index(
geo_arg: "ElementArg", # noqa: F821
topo_arg: "TopologyArg",
element_index: ElementIndex,
node_index_in_elt: int,
):
"""Global node index for a given node in a given element"""
raise NotImplementedError
def element_node_indices(self, out: Optional[wp.array] = None) -> wp.array:
"""Returns a temporary array containing the global index for each node of each element"""
NODES_PER_ELEMENT = self.NODES_PER_ELEMENT
@cache.dynamic_kernel(suffix=self.name)
def fill_element_node_indices(
geo_cell_arg: self.geometry.CellArg,
topo_arg: self.TopologyArg,
element_node_indices: wp.array2d(dtype=int),
):
element_index = wp.tid()
for n in range(NODES_PER_ELEMENT):
element_node_indices[element_index, n] = self.element_node_index(
geo_cell_arg, topo_arg, element_index, n
)
shape = (self.geometry.cell_count(), NODES_PER_ELEMENT)
if out is None:
element_node_indices = wp.empty(
shape=shape,
dtype=int,
)
else:
if out.shape != shape or out.dtype != wp.int32:
raise ValueError(f"Out element node indices array must have shape {shape} and data type 'int32'")
element_node_indices = out
wp.launch(
dim=element_node_indices.shape[0],
kernel=fill_element_node_indices,
inputs=[
self.geometry.cell_arg_value(device=element_node_indices.device),
self.topo_arg_value(device=element_node_indices.device),
element_node_indices,
],
device=element_node_indices.device,
)
return element_node_indices
# Interface generating trace space topology
def trace(self) -> "TraceSpaceTopology":
"""Trace of the function space over lower-dimensional elements of the geometry"""
return TraceSpaceTopology(self)
@property
def is_trace(self) -> bool:
"""Whether this topology is defined on the trace of the geometry"""
return self.dimension == self.geometry.dimension - 1
def full_space_topology(self) -> "SpaceTopology":
"""Returns the full space topology from which this topology is derived"""
return self
def __eq__(self, other: "SpaceTopology") -> bool:
"""Checks whether two topologies are compatible"""
return self.geometry == other.geometry and self.name == other.name
def is_derived_from(self, other: "SpaceTopology") -> bool:
"""Checks whether two topologies are equal, or `self` is the trace of `other`"""
if self.dimension == other.dimension:
return self == other
if self.dimension + 1 == other.dimension:
return self.full_space_topology() == other
return False
class TraceSpaceTopology(SpaceTopology):
"""Auto-generated trace topology defining the node indices associated to the geometry sides"""
def __init__(self, topo: SpaceTopology):
super().__init__(topo.geometry, 2 * topo.NODES_PER_ELEMENT)
self._topo = topo
self.dimension = topo.dimension - 1
self.ElementArg = topo.geometry.SideArg
self.TopologyArg = topo.TopologyArg
self.topo_arg_value = topo.topo_arg_value
self.inner_cell_index = self._make_inner_cell_index()
self.outer_cell_index = self._make_outer_cell_index()
self.neighbor_cell_index = self._make_neighbor_cell_index()
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
return self._topo.node_count()
@property
def name(self):
return f"{self._topo.name}_Trace"
def _make_inner_cell_index(self):
NODES_PER_ELEMENT = self._topo.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def inner_cell_index(args: self.geometry.SideArg, element_index: ElementIndex, node_index_in_elt: int):
index_in_inner_cell = wp.select(node_index_in_elt < NODES_PER_ELEMENT, -1, node_index_in_elt)
return self.geometry.side_inner_cell_index(args, element_index), index_in_inner_cell
return inner_cell_index
def _make_outer_cell_index(self):
NODES_PER_ELEMENT = self._topo.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def outer_cell_index(args: self.geometry.SideArg, element_index: ElementIndex, node_index_in_elt: int):
return self.geometry.side_outer_cell_index(args, element_index), node_index_in_elt - NODES_PER_ELEMENT
return outer_cell_index
def _make_neighbor_cell_index(self):
NODES_PER_ELEMENT = self._topo.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def neighbor_cell_index(args: self.geometry.SideArg, element_index: ElementIndex, node_index_in_elt: int):
if node_index_in_elt < NODES_PER_ELEMENT:
return self.geometry.side_inner_cell_index(args, element_index), node_index_in_elt
else:
return (
self.geometry.side_outer_cell_index(args, element_index),
node_index_in_elt - NODES_PER_ELEMENT,
)
return neighbor_cell_index
def _make_element_node_index(self):
@cache.dynamic_func(suffix=self.name)
def trace_element_node_index(
geo_side_arg: self.geometry.SideArg,
topo_arg: self._topo.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
cell_index, index_in_cell = self.neighbor_cell_index(geo_side_arg, element_index, node_index_in_elt)
geo_cell_arg = self.geometry.side_to_cell_arg(geo_side_arg)
return self._topo.element_node_index(geo_cell_arg, topo_arg, cell_index, index_in_cell)
return trace_element_node_index
def full_space_topology(self) -> SpaceTopology:
"""Returns the full space topology from which this topology is derived"""
return self._topo
def __eq__(self, other: "TraceSpaceTopology") -> bool:
return self._topo == other._topo
class DiscontinuousSpaceTopologyMixin:
"""Helper for defining discontinuous topologies (per-element nodes)"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.element_node_index = self._make_element_node_index()
def node_count(self):
return self.geometry.cell_count() * self.NODES_PER_ELEMENT
@property
def name(self):
return f"{self.geometry.name}_D{self.NODES_PER_ELEMENT}"
def _make_element_node_index(self):
NODES_PER_ELEMENT = self.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def element_node_index(
elt_arg: self.geometry.CellArg,
topo_arg: self.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return NODES_PER_ELEMENT * element_index + node_index_in_elt
return element_node_index
class DiscontinuousSpaceTopology(DiscontinuousSpaceTopologyMixin, SpaceTopology):
"""Topology for generic discontinuous spaces"""
pass
class DeformedGeometrySpaceTopology(SpaceTopology):
def __init__(self, geometry: DeformedGeometry, base_topology: SpaceTopology):
super().__init__(geometry, base_topology.NODES_PER_ELEMENT)
self.base = base_topology
self.node_count = self.base.node_count
self.topo_arg_value = self.base.topo_arg_value
self.TopologyArg = self.base.TopologyArg
self.element_node_index = self._make_element_node_index()
@property
def name(self):
return f"{self.base.name}_{self.geometry.field.name}"
def _make_element_node_index(self):
@cache.dynamic_func(suffix=self.name)
def element_node_index(
elt_arg: self.geometry.CellArg,
topo_arg: self.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
return self.base.element_node_index(elt_arg.elt_arg, topo_arg, element_index, node_index_in_elt)
return element_node_index
def forward_base_topology(topology_class: Type[SpaceTopology], geometry: Geometry, *args, **kwargs) -> SpaceTopology:
"""
If `geometry` is *not* a :class:`DeformedGeometry`, constructs a normal instance of `topology_class` over `geometry`, forwarding additional arguments.
If `geometry` *is* a :class:`DeformedGeometry`, constructs an instance of `topology_class` over the base (undeformed) geometry of `geometry`, then warp it
in a :class:`DeformedGeometrySpaceTopology` forwarding the calls to the underlying topology.
"""
if isinstance(geometry, DeformedGeometry):
base_topo = topology_class(geometry.base, *args, **kwargs)
return DeformedGeometrySpaceTopology(geometry, base_topo)
return topology_class(geometry, *args, **kwargs)
| 10,898 | Python | 35.573825 | 158 | 0.640852 |
NVIDIA/warp/warp/fem/space/dof_mapper.py | import math
from enum import Enum
from typing import Any
import warp as wp
import warp.types
vec6 = wp.types.vector(length=6, dtype=wp.float32)
_SQRT_2 = wp.constant(math.sqrt(2.0))
_SQRT_3 = wp.constant(math.sqrt(3.0))
_SQRT_1_2 = wp.constant(math.sqrt(1.0 / 2.0))
_SQRT_1_3 = wp.constant(math.sqrt(1.0 / 3.0))
class DofMapper:
"""Base class from mapping node degrees of freedom to function values"""
value_dtype: type
dof_dtype: type
DOF_SIZE: int
@wp.func
def dof_to_value(dof: Any):
raise NotImplementedError
@wp.func
def value_to_dof(val: Any):
raise NotImplementedError
def __str__(self):
return f"{self.value_dtype.__name__}_{self.DOF_SIZE}"
class IdentityMapper(DofMapper):
"""Identity mapper"""
def __init__(self, dtype: type):
if dtype == float:
dtype = wp.float32
self.value_dtype = dtype
self.dof_dtype = dtype
size = warp.types.type_length(dtype)
self.DOF_SIZE = wp.constant(size)
@wp.func
def dof_to_value(dof: Any):
return dof
@wp.func
def value_to_dof(val: Any):
return val
class SymmetricTensorMapper(DofMapper):
"""Orthonormal isomorphism from R^{n (n+1)} to nxn symmetric tensors,
using usual L2 norm for vectors and half Frobenius norm, (tau : tau)/2 for tensors.
"""
class Mapping(Enum):
VOIGT = 0
"""Voigt ordering of vector coefficients:
first the three diagonal terms, then off-diagonal coefficients"""
DB16 = 1
"""Ordering that also separates normal from tangential coefficients:
first trace, then other diagonal terms, then off-diagonal coefficients.
See [Daviet and Bertails-Descoubes 2016]"""
def __init__(self, dtype: type, mapping: Mapping = Mapping.VOIGT):
self.value_dtype = dtype
self.mapping = mapping
if dtype == wp.mat22:
self.dof_dtype = wp.vec3
self.DOF_SIZE = wp.constant(3)
if mapping == SymmetricTensorMapper.Mapping.VOIGT:
self.dof_to_value = SymmetricTensorMapper.dof_to_value_2d_voigt
self.value_to_dof = SymmetricTensorMapper.value_to_dof_2d_voigt
else:
self.dof_to_value = SymmetricTensorMapper.dof_to_value_2d
self.value_to_dof = SymmetricTensorMapper.value_to_dof_2d
elif dtype == wp.mat33:
self.dof_dtype = vec6
self.DOF_SIZE = wp.constant(6)
if mapping == SymmetricTensorMapper.Mapping.VOIGT:
self.dof_to_value = SymmetricTensorMapper.dof_to_value_3d_voigt
self.value_to_dof = SymmetricTensorMapper.value_to_dof_3d_voigt
else:
self.dof_to_value = SymmetricTensorMapper.dof_to_value_3d
self.value_to_dof = SymmetricTensorMapper.value_to_dof_3d
else:
raise ValueError("Unsupported value dtype: ", dtype)
def __str__(self):
return f"{self.mapping}_{self.DOF_SIZE}"
@wp.func
def dof_to_value_2d(dof: wp.vec3):
a = dof[0]
b = dof[1]
c = dof[2]
return wp.mat22(a + b, c, c, a - b)
@wp.func
def value_to_dof_2d(val: wp.mat22):
a = 0.5 * (val[0, 0] + val[1, 1])
b = 0.5 * (val[0, 0] - val[1, 1])
c = 0.5 * (val[0, 1] + val[1, 0])
return wp.vec3(a, b, c)
@wp.func
def dof_to_value_2d_voigt(dof: wp.vec3):
a = _SQRT_2 * dof[0]
b = _SQRT_2 * dof[1]
c = dof[2]
return wp.mat22(a, c, c, b)
@wp.func
def value_to_dof_2d_voigt(val: wp.mat22):
a = _SQRT_1_2 * val[0, 0]
b = _SQRT_1_2 * val[1, 1]
c = 0.5 * (val[0, 1] + val[1, 0])
return wp.vec3(a, b, c)
@wp.func
def dof_to_value_3d(dof: vec6):
a = dof[0] * _SQRT_2 * _SQRT_1_3
b = dof[1]
c = dof[2] * _SQRT_1_3
d = dof[3]
e = dof[4]
f = dof[5]
return wp.mat33(
a + b - c,
f,
e,
f,
a - b - c,
d,
e,
d,
a + 2.0 * c,
)
@wp.func
def value_to_dof_3d(val: wp.mat33):
a = (val[0, 0] + val[1, 1] + val[2, 2]) * _SQRT_1_3 * _SQRT_1_2
b = 0.5 * (val[0, 0] - val[1, 1])
c = 0.5 * (val[2, 2] - (val[0, 0] + val[1, 1] + val[2, 2]) / 3.0) * _SQRT_3
d = 0.5 * (val[2, 1] + val[1, 2])
e = 0.5 * (val[0, 2] + val[2, 0])
f = 0.5 * (val[1, 0] + val[0, 1])
return vec6(a, b, c, d, e, f)
@wp.func
def dof_to_value_3d_voigt(dof: vec6):
a = _SQRT_2 * dof[0]
b = _SQRT_2 * dof[1]
c = _SQRT_2 * dof[2]
d = dof[3]
e = dof[4]
f = dof[5]
return wp.mat33(
a,
f,
e,
f,
b,
d,
e,
d,
c,
)
@wp.func
def value_to_dof_3d_voigt(val: wp.mat33):
a = _SQRT_1_2 * val[0, 0]
b = _SQRT_1_2 * val[1, 1]
c = _SQRT_1_2 * val[2, 2]
d = 0.5 * (val[2, 1] + val[1, 2])
e = 0.5 * (val[0, 2] + val[2, 0])
f = 0.5 * (val[1, 0] + val[0, 1])
return vec6(a, b, c, d, e, f)
class SkewSymmetricTensorMapper(DofMapper):
"""Orthonormal isomorphism from R^{n (n-1)} to nxn skew-symmetric tensors,
using usual L2 norm for vectors and half Frobenius norm, (tau : tau)/2 for tensors.
"""
def __init__(self, dtype: type):
self.value_dtype = dtype
if dtype == wp.mat22:
self.dof_dtype = float
self.DOF_SIZE = wp.constant(1)
self.dof_to_value = SkewSymmetricTensorMapper.dof_to_value_2d
self.value_to_dof = SkewSymmetricTensorMapper.value_to_dof_2d
elif dtype == wp.mat33:
self.dof_dtype = wp.vec3
self.DOF_SIZE = wp.constant(3)
self.dof_to_value = SkewSymmetricTensorMapper.dof_to_value_3d
self.value_to_dof = SkewSymmetricTensorMapper.value_to_dof_3d
else:
raise ValueError("Unsupported value dtype: ", dtype)
def __str__(self):
return f"{self.__class__.__name__}_{self.DOF_SIZE}"
@wp.func
def dof_to_value_2d(dof: float):
return wp.mat22(0.0, -dof, dof, 0.0)
@wp.func
def value_to_dof_2d(val: wp.mat22):
return 0.5 * (val[1, 0] - val[0, 1])
@wp.func
def dof_to_value_3d(dof: wp.vec3):
a = dof[0]
b = dof[1]
c = dof[2]
return wp.mat33(0.0, -c, b, c, 0.0, -a, -b, a, 0.0)
@wp.func
def value_to_dof_3d(val: wp.mat33):
a = 0.5 * (val[2, 1] - val[1, 2])
b = 0.5 * (val[0, 2] - val[2, 0])
c = 0.5 * (val[1, 0] - val[0, 1])
return wp.vec3(a, b, c)
| 6,859 | Python | 27.945148 | 87 | 0.516402 |
NVIDIA/warp/warp/fem/space/hexmesh_function_space.py | import warp as wp
from warp.fem import cache
from warp.fem.geometry import Hexmesh
from warp.fem.geometry.hexmesh import (
EDGE_VERTEX_INDICES,
FACE_ORIENTATION,
FACE_TRANSLATION,
)
from warp.fem.polynomial import is_closed
from warp.fem.types import ElementIndex
from .shape import (
CubeSerendipityShapeFunctions,
CubeTripolynomialShapeFunctions,
ShapeFunction,
)
from .topology import SpaceTopology, forward_base_topology
_FACE_ORIENTATION_I = wp.constant(wp.mat(shape=(16, 2), dtype=int)(FACE_ORIENTATION))
_FACE_TRANSLATION_I = wp.constant(wp.mat(shape=(4, 2), dtype=int)(FACE_TRANSLATION))
# map from shape function vertex indexing to hexmesh vertex indexing
_CUBE_TO_HEX_VERTEX = wp.constant(wp.vec(length=8, dtype=int)([0, 4, 3, 7, 1, 5, 2, 6]))
# map from shape function edge indexing to hexmesh edge indexing
_CUBE_TO_HEX_EDGE = wp.constant(wp.vec(length=12, dtype=int)([0, 4, 2, 6, 3, 1, 7, 5, 8, 11, 9, 10]))
@wp.struct
class HexmeshTopologyArg:
hex_edge_indices: wp.array2d(dtype=int)
hex_face_indices: wp.array2d(dtype=wp.vec2i)
vertex_count: int
edge_count: int
face_count: int
class HexmeshSpaceTopology(SpaceTopology):
TopologyArg = HexmeshTopologyArg
def __init__(
self,
mesh: Hexmesh,
shape: ShapeFunction,
need_hex_edge_indices: bool = True,
need_hex_face_indices: bool = True,
):
if not is_closed(shape.family):
raise ValueError("A closed polynomial family is required to define a continuous function space")
super().__init__(mesh, shape.NODES_PER_ELEMENT)
self._mesh = mesh
self.shape = shape
if need_hex_edge_indices:
self._hex_edge_indices = self._mesh.hex_edge_indices
self._edge_count = self._mesh.edge_count()
else:
self._hex_edge_indices = wp.empty(shape=(0, 0), dtype=int)
self._edge_count = 0
if need_hex_face_indices:
self._compute_hex_face_indices()
else:
self._hex_face_indices = wp.empty(shape=(0, 0), dtype=wp.vec2i)
self._compute_hex_face_indices()
@cache.cached_arg_value
def topo_arg_value(self, device):
arg = HexmeshTopologyArg()
arg.hex_edge_indices = self._hex_edge_indices.to(device)
arg.hex_face_indices = self._hex_face_indices.to(device)
arg.vertex_count = self._mesh.vertex_count()
arg.face_count = self._mesh.side_count()
arg.edge_count = self._edge_count
return arg
def _compute_hex_face_indices(self):
self._hex_face_indices = wp.empty(
dtype=wp.vec2i, device=self._mesh.hex_vertex_indices.device, shape=(self._mesh.cell_count(), 6)
)
wp.launch(
kernel=HexmeshSpaceTopology._compute_hex_face_indices_kernel,
dim=self._mesh.side_count(),
device=self._mesh.hex_vertex_indices.device,
inputs=[
self._mesh.face_hex_indices,
self._mesh._face_hex_face_orientation,
self._hex_face_indices,
],
)
@wp.kernel
def _compute_hex_face_indices_kernel(
face_hex_indices: wp.array(dtype=wp.vec2i),
face_hex_face_ori: wp.array(dtype=wp.vec4i),
hex_face_indices: wp.array2d(dtype=wp.vec2i),
):
f = wp.tid()
hx0 = face_hex_indices[f][0]
local_face_0 = face_hex_face_ori[f][0]
ori_0 = face_hex_face_ori[f][1]
hex_face_indices[hx0, local_face_0] = wp.vec2i(f, ori_0)
hx1 = face_hex_indices[f][1]
local_face_1 = face_hex_face_ori[f][2]
ori_1 = face_hex_face_ori[f][3]
hex_face_indices[hx1, local_face_1] = wp.vec2i(f, ori_1)
class HexmeshTripolynomialSpaceTopology(HexmeshSpaceTopology):
def __init__(self, mesh: Hexmesh, shape: CubeTripolynomialShapeFunctions):
super().__init__(mesh, shape, need_hex_edge_indices=shape.ORDER >= 2, need_hex_face_indices=shape.ORDER >= 2)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
ORDER = self.shape.ORDER
INTERIOR_NODES_PER_EDGE = max(0, ORDER - 1)
INTERIOR_NODES_PER_FACE = INTERIOR_NODES_PER_EDGE**2
INTERIOR_NODES_PER_CELL = INTERIOR_NODES_PER_EDGE**3
return (
self._mesh.vertex_count()
+ self._mesh.edge_count() * INTERIOR_NODES_PER_EDGE
+ self._mesh.side_count() * INTERIOR_NODES_PER_FACE
+ self._mesh.cell_count() * INTERIOR_NODES_PER_CELL
)
@wp.func
def _rotate_face_index(type_index: int, ori: int, size: int):
i = type_index // size
j = type_index - i * size
coords = wp.vec2i(i, j)
fv = ori // 2
rot_i = wp.dot(_FACE_ORIENTATION_I[2 * ori], coords) + _FACE_TRANSLATION_I[fv, 0]
rot_j = wp.dot(_FACE_ORIENTATION_I[2 * ori + 1], coords) + _FACE_TRANSLATION_I[fv, 1]
return rot_i * size + rot_j
def _make_element_node_index(self):
ORDER = self.shape.ORDER
INTERIOR_NODES_PER_EDGE = wp.constant(max(0, ORDER - 1))
INTERIOR_NODES_PER_FACE = wp.constant(INTERIOR_NODES_PER_EDGE**2)
INTERIOR_NODES_PER_CELL = wp.constant(INTERIOR_NODES_PER_EDGE**3)
@cache.dynamic_func(suffix=self.name)
def element_node_index(
geo_arg: Hexmesh.CellArg,
topo_arg: HexmeshTopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_instance, type_index = self.shape.node_type_and_type_index(node_index_in_elt)
if node_type == CubeTripolynomialShapeFunctions.VERTEX:
return geo_arg.hex_vertex_indices[element_index, _CUBE_TO_HEX_VERTEX[type_instance]]
offset = topo_arg.vertex_count
if node_type == CubeTripolynomialShapeFunctions.EDGE:
hex_edge = _CUBE_TO_HEX_EDGE[type_instance]
edge_index = topo_arg.hex_edge_indices[element_index, hex_edge]
v0 = geo_arg.hex_vertex_indices[element_index, EDGE_VERTEX_INDICES[hex_edge, 0]]
v1 = geo_arg.hex_vertex_indices[element_index, EDGE_VERTEX_INDICES[hex_edge, 1]]
if v0 > v1:
type_index = ORDER - 1 - type_index
return offset + INTERIOR_NODES_PER_EDGE * edge_index + type_index
offset += INTERIOR_NODES_PER_EDGE * topo_arg.edge_count
if node_type == CubeTripolynomialShapeFunctions.FACE:
face_index_and_ori = topo_arg.hex_face_indices[element_index, type_instance]
face_index = face_index_and_ori[0]
face_orientation = face_index_and_ori[1]
type_index = HexmeshTripolynomialSpaceTopology._rotate_face_index(
type_index, face_orientation, ORDER - 1
)
return offset + INTERIOR_NODES_PER_FACE * face_index + type_index
offset += INTERIOR_NODES_PER_FACE * topo_arg.face_count
return offset + INTERIOR_NODES_PER_CELL * element_index + type_index
return element_node_index
class HexmeshSerendipitySpaceTopology(HexmeshSpaceTopology):
def __init__(
self,
grid: Hexmesh,
shape: CubeSerendipityShapeFunctions,
):
super().__init__(grid, shape, need_hex_edge_indices=True, need_hex_face_indices=False)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
return self.geometry.vertex_count() + (self.shape.ORDER - 1) * self.geometry.edge_count()
def _make_element_node_index(self):
ORDER = self.shape.ORDER
@cache.dynamic_func(suffix=self.name)
def element_node_index(
cell_arg: Hexmesh.CellArg,
topo_arg: HexmeshSpaceTopology.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_index = self.shape.node_type_and_type_index(node_index_in_elt)
if node_type == CubeSerendipityShapeFunctions.VERTEX:
return cell_arg.hex_vertex_indices[element_index, _CUBE_TO_HEX_VERTEX[type_index]]
type_instance, index_in_edge = CubeSerendipityShapeFunctions._cube_edge_index(node_type, type_index)
hex_edge = _CUBE_TO_HEX_EDGE[type_instance]
edge_index = topo_arg.hex_edge_indices[element_index, hex_edge]
v0 = cell_arg.hex_vertex_indices[element_index, EDGE_VERTEX_INDICES[hex_edge, 0]]
v1 = cell_arg.hex_vertex_indices[element_index, EDGE_VERTEX_INDICES[hex_edge, 1]]
if v0 > v1:
index_in_edge = ORDER - 1 - index_in_edge
return topo_arg.vertex_count + (ORDER - 1) * edge_index + index_in_edge
return element_node_index
def make_hexmesh_space_topology(mesh: Hexmesh, shape: ShapeFunction):
if isinstance(shape, CubeSerendipityShapeFunctions):
return forward_base_topology(HexmeshSerendipitySpaceTopology, mesh, shape)
if isinstance(shape, CubeTripolynomialShapeFunctions):
return forward_base_topology(HexmeshTripolynomialSpaceTopology, mesh, shape)
raise ValueError(f"Unsupported shape function {shape.name}")
| 9,317 | Python | 35.685039 | 117 | 0.622089 |
NVIDIA/warp/warp/fem/space/trimesh_2d_function_space.py | import warp as wp
from warp.fem import cache
from warp.fem.geometry import Trimesh2D
from warp.fem.types import ElementIndex
from .shape import (
ShapeFunction,
Triangle2DPolynomialShapeFunctions,
)
from .topology import SpaceTopology, forward_base_topology
@wp.struct
class Trimesh2DTopologyArg:
edge_vertex_indices: wp.array(dtype=wp.vec2i)
tri_edge_indices: wp.array2d(dtype=int)
vertex_count: int
edge_count: int
class Trimesh2DSpaceTopology(SpaceTopology):
TopologyArg = Trimesh2DTopologyArg
def __init__(self, mesh: Trimesh2D, shape: ShapeFunction):
super().__init__(mesh, shape.NODES_PER_ELEMENT)
self._mesh = mesh
self._shape = shape
self._compute_tri_edge_indices()
@cache.cached_arg_value
def topo_arg_value(self, device):
arg = Trimesh2DTopologyArg()
arg.tri_edge_indices = self._tri_edge_indices.to(device)
arg.edge_vertex_indices = self._mesh.edge_vertex_indices.to(device)
arg.vertex_count = self._mesh.vertex_count()
arg.edge_count = self._mesh.side_count()
return arg
def _compute_tri_edge_indices(self):
self._tri_edge_indices = wp.empty(
dtype=int, device=self._mesh.tri_vertex_indices.device, shape=(self._mesh.cell_count(), 3)
)
wp.launch(
kernel=Trimesh2DSpaceTopology._compute_tri_edge_indices_kernel,
dim=self._mesh.edge_tri_indices.shape,
device=self._mesh.tri_vertex_indices.device,
inputs=[
self._mesh.edge_tri_indices,
self._mesh.edge_vertex_indices,
self._mesh.tri_vertex_indices,
self._tri_edge_indices,
],
)
@wp.func
def _find_edge_index_in_tri(
edge_vtx: wp.vec2i,
tri_vtx: wp.vec3i,
):
for k in range(2):
if (edge_vtx[0] == tri_vtx[k] and edge_vtx[1] == tri_vtx[k + 1]) or (
edge_vtx[1] == tri_vtx[k] and edge_vtx[0] == tri_vtx[k + 1]
):
return k
return 2
@wp.kernel
def _compute_tri_edge_indices_kernel(
edge_tri_indices: wp.array(dtype=wp.vec2i),
edge_vertex_indices: wp.array(dtype=wp.vec2i),
tri_vertex_indices: wp.array2d(dtype=int),
tri_edge_indices: wp.array2d(dtype=int),
):
e = wp.tid()
edge_vtx = edge_vertex_indices[e]
edge_tris = edge_tri_indices[e]
t0 = edge_tris[0]
t0_vtx = wp.vec3i(tri_vertex_indices[t0, 0], tri_vertex_indices[t0, 1], tri_vertex_indices[t0, 2])
t0_edge = Trimesh2DSpaceTopology._find_edge_index_in_tri(edge_vtx, t0_vtx)
tri_edge_indices[t0, t0_edge] = e
t1 = edge_tris[1]
if t1 != t0:
t1_vtx = wp.vec3i(tri_vertex_indices[t1, 0], tri_vertex_indices[t1, 1], tri_vertex_indices[t1, 2])
t1_edge = Trimesh2DSpaceTopology._find_edge_index_in_tri(edge_vtx, t1_vtx)
tri_edge_indices[t1, t1_edge] = e
class Trimesh2DPolynomialSpaceTopology(Trimesh2DSpaceTopology):
def __init__(self, mesh: Trimesh2D, shape: Triangle2DPolynomialShapeFunctions):
super().__init__(mesh, shape)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
INTERIOR_NODES_PER_SIDE = max(0, self._shape.ORDER - 1)
INTERIOR_NODES_PER_CELL = max(0, self._shape.ORDER - 2) * max(0, self._shape.ORDER - 1) // 2
return (
self._mesh.vertex_count()
+ self._mesh.side_count() * INTERIOR_NODES_PER_SIDE
+ self._mesh.cell_count() * INTERIOR_NODES_PER_CELL
)
def _make_element_node_index(self):
INTERIOR_NODES_PER_SIDE = wp.constant(max(0, self._shape.ORDER - 1))
INTERIOR_NODES_PER_CELL = wp.constant(max(0, self._shape.ORDER - 2) * max(0, self._shape.ORDER - 1) // 2)
@cache.dynamic_func(suffix=self.name)
def element_node_index(
geo_arg: Trimesh2D.CellArg,
topo_arg: Trimesh2DTopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_index = self._shape.node_type_and_type_index(node_index_in_elt)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
return geo_arg.tri_vertex_indices[element_index][type_index]
global_offset = topo_arg.vertex_count
if node_type == Triangle2DPolynomialShapeFunctions.EDGE:
edge = type_index // INTERIOR_NODES_PER_SIDE
edge_node = type_index - INTERIOR_NODES_PER_SIDE * edge
global_edge_index = topo_arg.tri_edge_indices[element_index][edge]
if (
topo_arg.edge_vertex_indices[global_edge_index][0]
!= geo_arg.tri_vertex_indices[element_index][edge]
):
edge_node = INTERIOR_NODES_PER_SIDE - 1 - edge_node
return global_offset + INTERIOR_NODES_PER_SIDE * global_edge_index + edge_node
global_offset += INTERIOR_NODES_PER_SIDE * topo_arg.edge_count
return global_offset + INTERIOR_NODES_PER_CELL * element_index + type_index
return element_node_index
def make_trimesh_2d_space_topology(mesh: Trimesh2D, shape: ShapeFunction):
if isinstance(shape, Triangle2DPolynomialShapeFunctions):
return forward_base_topology(Trimesh2DPolynomialSpaceTopology, mesh, shape)
raise ValueError(f"Unsupported shape function {shape.name}")
| 5,569 | Python | 35.168831 | 113 | 0.610882 |
NVIDIA/warp/warp/fem/space/collocated_function_space.py | from typing import Optional
import warp as wp
from warp.fem import cache, utils
from warp.fem.types import DofIndex, get_node_coord
from .basis_space import BasisSpace
from .dof_mapper import DofMapper, IdentityMapper
from .function_space import FunctionSpace
from .partition import SpacePartition, make_space_partition
class CollocatedFunctionSpace(FunctionSpace):
"""Function space where values are collocated at nodes"""
def __init__(self, basis: BasisSpace, dtype: type = float, dof_mapper: DofMapper = None):
super().__init__(topology=basis.topology)
self.dof_mapper = IdentityMapper(dtype) if dof_mapper is None else dof_mapper
self.dtype = self.dof_mapper.value_dtype
self.dof_dtype = self.dof_mapper.dof_dtype
self.VALUE_DOF_COUNT = self.dof_mapper.DOF_SIZE
self._basis = basis
self.SpaceArg = self._basis.BasisArg
self.ORDER = self._basis.ORDER
self.unit_dof_value = self._make_unit_dof_value(self.dof_mapper)
self.node_coords_in_element = self._basis.make_node_coords_in_element()
self.node_quadrature_weight = self._basis.make_node_quadrature_weight()
self.element_inner_weight = self._basis.make_element_inner_weight()
self.element_inner_weight_gradient = self._basis.make_element_inner_weight_gradient()
self.element_outer_weight = self._basis.make_element_outer_weight()
self.element_outer_weight_gradient = self._basis.make_element_outer_weight_gradient()
# For backward compatibility
if hasattr(basis.topology, "node_grid"):
self.node_grid = basis.node_grid
if hasattr(basis, "node_triangulation"):
self.node_triangulation = basis.node_triangulation
if hasattr(basis, "node_tets"):
self.node_tets = basis.node_tets
if hasattr(basis, "node_hexes"):
self.node_hexes = basis.node_hexes
def space_arg_value(self, device):
return self._basis.basis_arg_value(device)
@property
def name(self):
return f"{self._basis.name}_{self.dof_mapper}".replace(".", "_")
@property
def degree(self):
"""Maximum polynomial degree of the underlying basis"""
return self.ORDER
def make_field(
self,
space_partition: Optional[SpacePartition] = None,
) -> "wp.fem.field.NodalField":
from warp.fem.field import NodalField
if space_partition is None:
space_partition = make_space_partition(space_topology=self.topology)
return NodalField(space=self, space_partition=space_partition)
def _make_unit_dof_value(self, dof_mapper: DofMapper):
@cache.dynamic_func(suffix=self.name)
def unit_dof_value(geo_arg: self.topology.ElementArg, space_arg: self.SpaceArg, dof: DofIndex):
return dof_mapper.dof_to_value(utils.unit_element(dof_mapper.dof_dtype(0.0), get_node_coord(dof)))
return unit_dof_value
def node_count(self):
return self.topology.node_count()
def node_positions(self, out: Optional[wp.array] = None) -> wp.array:
return self._basis.node_positions(out=out)
def trace(self) -> "CollocatedFunctionSpace":
return CollocatedFunctionSpaceTrace(self)
class CollocatedFunctionSpaceTrace(CollocatedFunctionSpace):
"""Trace of a :class:`CollocatedFunctionSpace`"""
def __init__(self, space: CollocatedFunctionSpace):
self._space = space
super().__init__(space._basis.trace(), space.dtype, space.dof_mapper)
@property
def name(self):
return f"{self._space.name}_Trace"
def __eq__(self, other: "CollocatedFunctionSpaceTrace") -> bool:
return self._space == other._space
| 3,736 | Python | 36 | 110 | 0.672109 |
NVIDIA/warp/warp/fem/space/grid_2d_function_space.py | import numpy as np
import warp as wp
from warp.fem import cache
from warp.fem.geometry import Grid2D
from warp.fem.polynomial import is_closed
from warp.fem.types import ElementIndex
from .shape import (
ShapeFunction,
SquareBipolynomialShapeFunctions,
SquareSerendipityShapeFunctions,
)
from .topology import SpaceTopology, forward_base_topology
class Grid2DSpaceTopology(SpaceTopology):
def __init__(self, grid: Grid2D, shape: ShapeFunction):
if not is_closed(shape.family):
raise ValueError("A closed polynomial family is required to define a continuous function space")
super().__init__(grid, shape.NODES_PER_ELEMENT)
self._shape = shape
@wp.func
def _vertex_coords(vidx_in_cell: int):
x = vidx_in_cell // 2
y = vidx_in_cell - 2 * x
return wp.vec2i(x, y)
@wp.func
def _vertex_index(cell_arg: Grid2D.CellArg, cell_index: ElementIndex, vidx_in_cell: int):
res = cell_arg.res
x_stride = res[1] + 1
corner = Grid2D.get_cell(res, cell_index) + Grid2DSpaceTopology._vertex_coords(vidx_in_cell)
return Grid2D._from_2d_index(x_stride, corner)
class GridBipolynomialSpaceTopology(Grid2DSpaceTopology):
def __init__(self, grid: Grid2D, shape: SquareBipolynomialShapeFunctions):
super().__init__(grid, shape)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
return (self.geometry.res[0] * self._shape.ORDER + 1) * (self.geometry.res[1] * self._shape.ORDER + 1)
def _make_element_node_index(self):
ORDER = self._shape.ORDER
@cache.dynamic_func(suffix=self.name)
def element_node_index(
cell_arg: Grid2D.CellArg,
topo_arg: Grid2DSpaceTopology.TopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
res = cell_arg.res
cell = Grid2D.get_cell(res, element_index)
node_i = node_index_in_elt // (ORDER + 1)
node_j = node_index_in_elt - (ORDER + 1) * node_i
node_x = ORDER * cell[0] + node_i
node_y = ORDER * cell[1] + node_j
node_pitch = (res[1] * ORDER) + 1
node_index = node_pitch * node_x + node_y
return node_index
return element_node_index
def _node_grid(self):
res = self.geometry.res
cell_coords = np.array(self._shape.LOBATTO_COORDS)[:-1]
grid_coords_x = np.repeat(np.arange(0, res[0], dtype=float), len(cell_coords)) + np.tile(
cell_coords, reps=res[0]
)
grid_coords_x = np.append(grid_coords_x, res[0])
X = grid_coords_x * self._grid.cell_size[0] + self._grid.origin[0]
grid_coords_y = np.repeat(np.arange(0, res[1], dtype=float), len(cell_coords)) + np.tile(
cell_coords, reps=res[1]
)
grid_coords_y = np.append(grid_coords_y, res[1])
Y = grid_coords_y * self._grid.cell_size[1] + self._grid.origin[1]
return np.meshgrid(X, Y, indexing="ij")
class GridSerendipitySpaceTopology(Grid2DSpaceTopology):
def __init__(self, grid: Grid2D, shape: SquareSerendipityShapeFunctions):
super().__init__(grid, shape)
self.element_node_index = self._make_element_node_index()
TopologyArg = Grid2D.SideArg
def topo_arg_value(self, device):
return self.geometry.side_arg_value(device)
def node_count(self) -> int:
return self.geometry.vertex_count() + (self._shape.ORDER - 1) * self.geometry.side_count()
def _make_element_node_index(self):
ORDER = self._shape.ORDER
@cache.dynamic_func(suffix=self.name)
def element_node_index(
cell_arg: Grid2D.CellArg,
topo_arg: Grid2D.SideArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_index = self._shape.node_type_and_type_index(node_index_in_elt)
if node_type == SquareSerendipityShapeFunctions.VERTEX:
return Grid2DSpaceTopology._vertex_index(cell_arg, element_index, type_index)
side_offset, index_in_side = SquareSerendipityShapeFunctions.side_offset_and_index(type_index)
axis = 1 - (node_type - SquareSerendipityShapeFunctions.EDGE_X)
cell = Grid2D.get_cell(cell_arg.res, element_index)
origin = wp.vec2i(cell[Grid2D.ROTATION[axis, 0]] + side_offset, cell[Grid2D.ROTATION[axis, 1]])
side = Grid2D.Side(axis, origin)
side_index = Grid2D.side_index(topo_arg, side)
res = cell_arg.res
vertex_count = (res[0] + 1) * (res[1] + 1)
return vertex_count + (ORDER - 1) * side_index + index_in_side
return element_node_index
def make_grid_2d_space_topology(grid: Grid2D, shape: ShapeFunction):
if isinstance(shape, SquareSerendipityShapeFunctions):
return forward_base_topology(GridSerendipitySpaceTopology, grid, shape)
if isinstance(shape, SquareBipolynomialShapeFunctions):
return forward_base_topology(GridBipolynomialSpaceTopology, grid, shape)
raise ValueError(f"Unsupported shape function {shape.name}")
| 5,238 | Python | 34.161074 | 110 | 0.62963 |
NVIDIA/warp/warp/fem/space/tetmesh_function_space.py | import warp as wp
from warp.fem import cache
from warp.fem.geometry import Tetmesh
from warp.fem.types import ElementIndex
from .shape import (
ShapeFunction,
TetrahedronPolynomialShapeFunctions,
)
from .topology import SpaceTopology, forward_base_topology
@wp.struct
class TetmeshTopologyArg:
tet_edge_indices: wp.array2d(dtype=int)
tet_face_indices: wp.array2d(dtype=int)
face_vertex_indices: wp.array(dtype=wp.vec3i)
vertex_count: int
edge_count: int
face_count: int
class TetmeshSpaceTopology(SpaceTopology):
TopologyArg = TetmeshTopologyArg
def __init__(
self,
mesh: Tetmesh,
shape: ShapeFunction,
need_tet_edge_indices: bool = True,
need_tet_face_indices: bool = True,
):
super().__init__(mesh, shape.NODES_PER_ELEMENT)
self._mesh = mesh
self._shape = shape
if need_tet_edge_indices:
self._tet_edge_indices = self._mesh.tet_edge_indices
self._edge_count = self._mesh.edge_count()
else:
self._tet_edge_indices = wp.empty(shape=(0, 0), dtype=int)
self._edge_count = 0
if need_tet_face_indices:
self._compute_tet_face_indices()
else:
self._tet_face_indices = wp.empty(shape=(0, 0), dtype=int)
@cache.cached_arg_value
def topo_arg_value(self, device):
arg = TetmeshTopologyArg()
arg.tet_face_indices = self._tet_face_indices.to(device)
arg.tet_edge_indices = self._tet_edge_indices.to(device)
arg.face_vertex_indices = self._mesh.face_vertex_indices.to(device)
arg.vertex_count = self._mesh.vertex_count()
arg.face_count = self._mesh.side_count()
arg.edge_count = self._edge_count
return arg
def _compute_tet_face_indices(self):
self._tet_face_indices = wp.empty(
dtype=int, device=self._mesh.tet_vertex_indices.device, shape=(self._mesh.cell_count(), 4)
)
wp.launch(
kernel=TetmeshSpaceTopology._compute_tet_face_indices_kernel,
dim=self._mesh._face_tet_indices.shape,
device=self._mesh.tet_vertex_indices.device,
inputs=[
self._mesh.face_tet_indices,
self._mesh.face_vertex_indices,
self._mesh.tet_vertex_indices,
self._tet_face_indices,
],
)
@wp.func
def _find_face_index_in_tet(
face_vtx: wp.vec3i,
tet_vtx: wp.vec4i,
):
for k in range(3):
tvk = wp.vec3i(tet_vtx[k], tet_vtx[(k + 1) % 4], tet_vtx[(k + 2) % 4])
# Use fact that face always start with min vertex
min_t = wp.min(tvk)
max_t = wp.max(tvk)
mid_t = tvk[0] + tvk[1] + tvk[2] - min_t - max_t
if min_t == face_vtx[0] and (
(face_vtx[2] == max_t and face_vtx[1] == mid_t) or (face_vtx[1] == max_t and face_vtx[2] == mid_t)
):
return k
return 3
@wp.kernel
def _compute_tet_face_indices_kernel(
face_tet_indices: wp.array(dtype=wp.vec2i),
face_vertex_indices: wp.array(dtype=wp.vec3i),
tet_vertex_indices: wp.array2d(dtype=int),
tet_face_indices: wp.array2d(dtype=int),
):
e = wp.tid()
face_vtx = face_vertex_indices[e]
face_tets = face_tet_indices[e]
t0 = face_tets[0]
t0_vtx = wp.vec4i(
tet_vertex_indices[t0, 0], tet_vertex_indices[t0, 1], tet_vertex_indices[t0, 2], tet_vertex_indices[t0, 3]
)
t0_face = TetmeshSpaceTopology._find_face_index_in_tet(face_vtx, t0_vtx)
tet_face_indices[t0, t0_face] = e
t1 = face_tets[1]
if t1 != t0:
t1_vtx = wp.vec4i(
tet_vertex_indices[t1, 0],
tet_vertex_indices[t1, 1],
tet_vertex_indices[t1, 2],
tet_vertex_indices[t1, 3],
)
t1_face = TetmeshSpaceTopology._find_face_index_in_tet(face_vtx, t1_vtx)
tet_face_indices[t1, t1_face] = e
class TetmeshPolynomialSpaceTopology(TetmeshSpaceTopology):
def __init__(self, mesh: Tetmesh, shape: TetrahedronPolynomialShapeFunctions):
super().__init__(mesh, shape, need_tet_edge_indices=shape.ORDER >= 2, need_tet_face_indices=shape.ORDER >= 3)
self.element_node_index = self._make_element_node_index()
def node_count(self) -> int:
ORDER = self._shape.ORDER
INTERIOR_NODES_PER_EDGE = max(0, ORDER - 1)
INTERIOR_NODES_PER_FACE = max(0, ORDER - 2) * max(0, ORDER - 1) // 2
INTERIOR_NODES_PER_CELL = max(0, ORDER - 3) * max(0, ORDER - 2) * max(0, ORDER - 1) // 6
return (
self._mesh.vertex_count()
+ self._mesh.edge_count() * INTERIOR_NODES_PER_EDGE
+ self._mesh.side_count() * INTERIOR_NODES_PER_FACE
+ self._mesh.cell_count() * INTERIOR_NODES_PER_CELL
)
def _make_element_node_index(self):
ORDER = self._shape.ORDER
INTERIOR_NODES_PER_EDGE = wp.constant(max(0, ORDER - 1))
INTERIOR_NODES_PER_FACE = wp.constant(max(0, ORDER - 2) * max(0, ORDER - 1) // 2)
INTERIOR_NODES_PER_CELL = wp.constant(max(0, ORDER - 3) * max(0, ORDER - 2) * max(0, ORDER - 1) // 6)
@cache.dynamic_func(suffix=self.name)
def element_node_index(
geo_arg: Tetmesh.CellArg,
topo_arg: TetmeshTopologyArg,
element_index: ElementIndex,
node_index_in_elt: int,
):
node_type, type_index = self._shape.node_type_and_type_index(node_index_in_elt)
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
return geo_arg.tet_vertex_indices[element_index][type_index]
global_offset = topo_arg.vertex_count
if node_type == TetrahedronPolynomialShapeFunctions.EDGE:
edge = type_index // INTERIOR_NODES_PER_EDGE
edge_node = type_index - INTERIOR_NODES_PER_EDGE * edge
global_edge_index = topo_arg.tet_edge_indices[element_index][edge]
# Test if we need to swap edge direction
if INTERIOR_NODES_PER_EDGE > 1:
if edge < 3:
c1 = edge
c2 = (edge + 1) % 3
else:
c1 = edge - 3
c2 = 3
if geo_arg.tet_vertex_indices[element_index][c1] > geo_arg.tet_vertex_indices[element_index][c2]:
edge_node = INTERIOR_NODES_PER_EDGE - 1 - edge_node
return global_offset + INTERIOR_NODES_PER_EDGE * global_edge_index + edge_node
global_offset += INTERIOR_NODES_PER_EDGE * topo_arg.edge_count
if node_type == TetrahedronPolynomialShapeFunctions.FACE:
face = type_index // INTERIOR_NODES_PER_FACE
face_node = type_index - INTERIOR_NODES_PER_FACE * face
global_face_index = topo_arg.tet_face_indices[element_index][face]
if INTERIOR_NODES_PER_FACE == 3:
# Hard code for P4 case, 3 nodes per face
# Higher orders would require rotating triangle coordinates, this is not supported yet
vidx = geo_arg.tet_vertex_indices[element_index][(face + face_node) % 4]
fvi = topo_arg.face_vertex_indices[global_face_index]
if vidx == fvi[0]:
face_node = 0
elif vidx == fvi[1]:
face_node = 1
else:
face_node = 2
return global_offset + INTERIOR_NODES_PER_FACE * global_face_index + face_node
global_offset += INTERIOR_NODES_PER_FACE * topo_arg.face_count
return global_offset + INTERIOR_NODES_PER_CELL * element_index + type_index
return element_node_index
def make_tetmesh_space_topology(mesh: Tetmesh, shape: ShapeFunction):
if isinstance(shape, TetrahedronPolynomialShapeFunctions):
return forward_base_topology(TetmeshPolynomialSpaceTopology, mesh, shape)
raise ValueError(f"Unsupported shape function {shape.name}")
| 8,321 | Python | 35.986667 | 118 | 0.569403 |
NVIDIA/warp/warp/fem/space/shape/cube_shape_function.py | import math
import numpy as np
import warp as wp
from warp.fem import cache
from warp.fem.geometry import Grid3D
from warp.fem.polynomial import Polynomial, is_closed, lagrange_scales, quadrature_1d
from warp.fem.types import Coords
from .tet_shape_function import TetrahedronPolynomialShapeFunctions
class CubeTripolynomialShapeFunctions:
VERTEX = 0
EDGE = 1
FACE = 2
INTERIOR = 3
def __init__(self, degree: int, family: Polynomial):
self.family = family
self.ORDER = wp.constant(degree)
self.NODES_PER_ELEMENT = wp.constant((degree + 1) ** 3)
self.NODES_PER_EDGE = wp.constant(degree + 1)
lobatto_coords, lobatto_weight = quadrature_1d(point_count=degree + 1, family=family)
lagrange_scale = lagrange_scales(lobatto_coords)
NodeVec = wp.types.vector(length=degree + 1, dtype=wp.float32)
self.LOBATTO_COORDS = wp.constant(NodeVec(lobatto_coords))
self.LOBATTO_WEIGHT = wp.constant(NodeVec(lobatto_weight))
self.LAGRANGE_SCALE = wp.constant(NodeVec(lagrange_scale))
self.ORDER_PLUS_ONE = wp.constant(self.ORDER + 1)
self._node_ijk = self._make_node_ijk()
self.node_type_and_type_index = self._make_node_type_and_type_index()
@property
def name(self) -> str:
return f"Cube_Q{self.ORDER}_{self.family}"
@wp.func
def _vertex_coords_f(vidx_in_cell: int):
x = vidx_in_cell // 4
y = (vidx_in_cell - 4 * x) // 2
z = vidx_in_cell - 4 * x - 2 * y
return wp.vec3(float(x), float(y), float(z))
def _make_node_ijk(self):
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
def node_ijk(
node_index_in_elt: int,
):
node_i = node_index_in_elt // (ORDER_PLUS_ONE * ORDER_PLUS_ONE)
node_jk = node_index_in_elt - ORDER_PLUS_ONE * ORDER_PLUS_ONE * node_i
node_j = node_jk // ORDER_PLUS_ONE
node_k = node_jk - ORDER_PLUS_ONE * node_j
return node_i, node_j, node_k
return cache.get_func(node_ijk, self.name)
def _make_node_type_and_type_index(self):
ORDER = self.ORDER
@cache.dynamic_func(suffix=self.name)
def node_type_and_type_index(
node_index_in_elt: int,
):
i, j, k = self._node_ijk(node_index_in_elt)
zi = wp.select(i == 0, 0, 1)
zj = wp.select(j == 0, 0, 1)
zk = wp.select(k == 0, 0, 1)
mi = wp.select(i == ORDER, 0, 1)
mj = wp.select(j == ORDER, 0, 1)
mk = wp.select(k == ORDER, 0, 1)
if zi + mi == 1:
if zj + mj == 1:
if zk + mk == 1:
# vertex
type_instance = mi * 4 + mj * 2 + mk
return CubeTripolynomialShapeFunctions.VERTEX, type_instance, 0
# z edge
type_instance = 8 + mi * 2 + mj
type_index = k - 1
return CubeTripolynomialShapeFunctions.EDGE, type_instance, type_index
if zk + mk == 1:
# y edge
type_instance = 4 + mk * 2 + mi
type_index = j - 1
return CubeTripolynomialShapeFunctions.EDGE, type_instance, type_index
# x face
type_instance = mi
type_index = wp.select(mi == 1, (j - 1) * (ORDER - 1) + k - 1, (k - 1) * (ORDER - 1) + j - 1)
return CubeTripolynomialShapeFunctions.FACE, type_instance, type_index
if zj + mj == 1:
if zk + mk == 1:
# x edge
type_instance = mj * 2 + mk
type_index = i - 1
return CubeTripolynomialShapeFunctions.EDGE, type_instance, type_index
# y face
type_instance = 2 + mj
type_index = wp.select(mj == 1, (i - 1) * (ORDER - 1) + k - 1, (k - 1) * (ORDER - 1) + i - 1)
return CubeTripolynomialShapeFunctions.FACE, type_instance, type_index
if zk + mk == 1:
# z face
type_instance = 4 + mk
type_index = wp.select(mk == 1, (j - 1) * (ORDER - 1) + i - 1, (i - 1) * (ORDER - 1) + j - 1)
return CubeTripolynomialShapeFunctions.FACE, type_instance, type_index
type_index = ((i - 1) * (ORDER - 1) + (j - 1)) * (ORDER - 1) + k - 1
return CubeTripolynomialShapeFunctions.INTERIOR, 0, type_index
return node_type_and_type_index
def make_node_coords_in_element(self):
LOBATTO_COORDS = self.LOBATTO_COORDS
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
node_i, node_j, node_k = self._node_ijk(node_index_in_elt)
return Coords(LOBATTO_COORDS[node_i], LOBATTO_COORDS[node_j], LOBATTO_COORDS[node_k])
return node_coords_in_element
def make_node_quadrature_weight(self):
ORDER = self.ORDER
LOBATTO_WEIGHT = self.LOBATTO_WEIGHT
def node_quadrature_weight(
node_index_in_elt: int,
):
node_i, node_j, node_k = self._node_ijk(node_index_in_elt)
return LOBATTO_WEIGHT[node_i] * LOBATTO_WEIGHT[node_j] * LOBATTO_WEIGHT[node_k]
def node_quadrature_weight_linear(
node_index_in_elt: int,
):
return 0.125
if ORDER == 1:
return cache.get_func(node_quadrature_weight_linear, self.name)
return cache.get_func(node_quadrature_weight, self.name)
def make_trace_node_quadrature_weight(self):
ORDER = self.ORDER
LOBATTO_WEIGHT = self.LOBATTO_WEIGHT
def trace_node_quadrature_weight(
node_index_in_elt: int,
):
# We're either on a side interior or at a vertex
# If we find one index at extremum, pick the two other
node_i, node_j, node_k = self._node_ijk(node_index_in_elt)
if node_i == 0 or node_i == ORDER:
return LOBATTO_WEIGHT[node_j] * LOBATTO_WEIGHT[node_k]
if node_j == 0 or node_j == ORDER:
return LOBATTO_WEIGHT[node_i] * LOBATTO_WEIGHT[node_k]
return LOBATTO_WEIGHT[node_i] * LOBATTO_WEIGHT[node_j]
def trace_node_quadrature_weight_linear(
node_index_in_elt: int,
):
return 0.25
def trace_node_quadrature_weight_open(
node_index_in_elt: int,
):
return 0.0
if not is_closed(self.family):
return cache.get_func(trace_node_quadrature_weight_open, self.name)
if ORDER == 1:
return cache.get_func(trace_node_quadrature_weight_linear, self.name)
return cache.get_func(trace_node_quadrature_weight, self.name)
def make_element_inner_weight(self):
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
node_i, node_j, node_k = self._node_ijk(node_index_in_elt)
w = float(1.0)
for k in range(ORDER_PLUS_ONE):
if k != node_i:
w *= coords[0] - LOBATTO_COORDS[k]
if k != node_j:
w *= coords[1] - LOBATTO_COORDS[k]
if k != node_k:
w *= coords[2] - LOBATTO_COORDS[k]
w *= LAGRANGE_SCALE[node_i] * LAGRANGE_SCALE[node_j] * LAGRANGE_SCALE[node_k]
return w
def element_inner_weight_linear(
coords: Coords,
node_index_in_elt: int,
):
v = CubeTripolynomialShapeFunctions._vertex_coords_f(node_index_in_elt)
wx = (1.0 - coords[0]) * (1.0 - v[0]) + v[0] * coords[0]
wy = (1.0 - coords[1]) * (1.0 - v[1]) + v[1] * coords[1]
wz = (1.0 - coords[2]) * (1.0 - v[2]) + v[2] * coords[2]
return wx * wy * wz
if self.ORDER == 1 and is_closed(self.family):
return cache.get_func(element_inner_weight_linear, self.name)
return cache.get_func(element_inner_weight, self.name)
def make_element_inner_weight_gradient(self):
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
node_i, node_j, node_k = self._node_ijk(node_index_in_elt)
prefix_xy = float(1.0)
prefix_yz = float(1.0)
prefix_zx = float(1.0)
for k in range(ORDER_PLUS_ONE):
if k != node_i:
prefix_yz *= coords[0] - LOBATTO_COORDS[k]
if k != node_j:
prefix_zx *= coords[1] - LOBATTO_COORDS[k]
if k != node_k:
prefix_xy *= coords[2] - LOBATTO_COORDS[k]
prefix_x = prefix_zx * prefix_xy
prefix_y = prefix_yz * prefix_xy
prefix_z = prefix_zx * prefix_yz
grad_x = float(0.0)
grad_y = float(0.0)
grad_z = float(0.0)
for k in range(ORDER_PLUS_ONE):
if k != node_i:
delta_x = coords[0] - LOBATTO_COORDS[k]
grad_x = grad_x * delta_x + prefix_x
prefix_x *= delta_x
if k != node_j:
delta_y = coords[1] - LOBATTO_COORDS[k]
grad_y = grad_y * delta_y + prefix_y
prefix_y *= delta_y
if k != node_k:
delta_z = coords[2] - LOBATTO_COORDS[k]
grad_z = grad_z * delta_z + prefix_z
prefix_z *= delta_z
grad = (
LAGRANGE_SCALE[node_i]
* LAGRANGE_SCALE[node_j]
* LAGRANGE_SCALE[node_k]
* wp.vec3(
grad_x,
grad_y,
grad_z,
)
)
return grad
def element_inner_weight_gradient_linear(
coords: Coords,
node_index_in_elt: int,
):
v = CubeTripolynomialShapeFunctions._vertex_coords_f(node_index_in_elt)
wx = (1.0 - coords[0]) * (1.0 - v[0]) + v[0] * coords[0]
wy = (1.0 - coords[1]) * (1.0 - v[1]) + v[1] * coords[1]
wz = (1.0 - coords[2]) * (1.0 - v[2]) + v[2] * coords[2]
dx = 2.0 * v[0] - 1.0
dy = 2.0 * v[1] - 1.0
dz = 2.0 * v[2] - 1.0
return wp.vec3(dx * wy * wz, dy * wz * wx, dz * wx * wy)
if self.ORDER == 1 and is_closed(self.family):
return cache.get_func(element_inner_weight_gradient_linear, self.name)
return cache.get_func(element_inner_weight_gradient, self.name)
def element_node_hexes(self):
from warp.fem.utils import grid_to_hexes
return grid_to_hexes(self.ORDER, self.ORDER, self.ORDER)
def element_node_tets(self):
from warp.fem.utils import grid_to_tets
return grid_to_tets(self.ORDER, self.ORDER, self.ORDER)
class CubeSerendipityShapeFunctions:
"""
Serendipity element ~ tensor product space without interior nodes
Edge shape functions are usual Lagrange shape functions times a bilinear function in the normal directions
Corner shape functions are trilinear shape functions times a function of (x^{d-1} + y^{d-1})
"""
# Node categories
VERTEX = wp.constant(0)
EDGE_X = wp.constant(1)
EDGE_Y = wp.constant(2)
def __init__(self, degree: int, family: Polynomial):
if not is_closed(family):
raise ValueError("A closed polynomial family is required to define serendipity elements")
if degree not in [2, 3]:
raise NotImplementedError("Serendipity element only implemented for order 2 or 3")
self.family = family
self.ORDER = wp.constant(degree)
self.NODES_PER_ELEMENT = wp.constant(8 + 12 * (degree - 1))
self.NODES_PER_EDGE = wp.constant(degree + 1)
lobatto_coords, lobatto_weight = quadrature_1d(point_count=degree + 1, family=family)
lagrange_scale = lagrange_scales(lobatto_coords)
NodeVec = wp.types.vector(length=degree + 1, dtype=wp.float32)
self.LOBATTO_COORDS = wp.constant(NodeVec(lobatto_coords))
self.LOBATTO_WEIGHT = wp.constant(NodeVec(lobatto_weight))
self.LAGRANGE_SCALE = wp.constant(NodeVec(lagrange_scale))
self.ORDER_PLUS_ONE = wp.constant(self.ORDER + 1)
self.node_type_and_type_index = self._get_node_type_and_type_index()
self._node_lobatto_indices = self._get_node_lobatto_indices()
@property
def name(self) -> str:
return f"Cube_S{self.ORDER}_{self.family}"
def _get_node_type_and_type_index(self):
@cache.dynamic_func(suffix=self.name)
def node_type_and_index(
node_index_in_elt: int,
):
if node_index_in_elt < 8:
return CubeSerendipityShapeFunctions.VERTEX, node_index_in_elt
type_index = (node_index_in_elt - 8) // 3
side = node_index_in_elt - 8 - 3 * type_index
return CubeSerendipityShapeFunctions.EDGE_X + side, type_index
return node_type_and_index
@wp.func
def _vertex_coords(vidx_in_cell: int):
x = vidx_in_cell // 4
y = (vidx_in_cell - 4 * x) // 2
z = vidx_in_cell - 4 * x - 2 * y
return wp.vec3i(x, y, z)
@wp.func
def _edge_coords(type_index: int):
index_in_side = type_index // 4
side_offset = type_index - 4 * index_in_side
return wp.vec3i(index_in_side + 1, side_offset // 2, side_offset & 1)
@wp.func
def _edge_axis(node_type: int):
return node_type - CubeSerendipityShapeFunctions.EDGE_X
@wp.func
def _cube_edge_index(node_type: int, type_index: int):
index_in_side = type_index // 4
side_offset = type_index - 4 * index_in_side
return 4 * (node_type - CubeSerendipityShapeFunctions.EDGE_X) + side_offset, index_in_side
def _get_node_lobatto_indices(self):
ORDER = self.ORDER
@cache.dynamic_func(suffix=self.name)
def node_lobatto_indices(node_type: int, type_index: int):
if node_type == CubeSerendipityShapeFunctions.VERTEX:
return CubeSerendipityShapeFunctions._vertex_coords(type_index) * ORDER
axis = CubeSerendipityShapeFunctions._edge_axis(node_type)
local_coords = CubeSerendipityShapeFunctions._edge_coords(type_index)
local_indices = wp.vec3i(local_coords[0], local_coords[1] * ORDER, local_coords[2] * ORDER)
return Grid3D._local_to_world(axis, local_indices)
return node_lobatto_indices
def make_node_coords_in_element(self):
LOBATTO_COORDS = self.LOBATTO_COORDS
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
node_coords = self._node_lobatto_indices(node_type, type_index)
return Coords(
LOBATTO_COORDS[node_coords[0]], LOBATTO_COORDS[node_coords[1]], LOBATTO_COORDS[node_coords[2]]
)
return node_coords_in_element
def make_node_quadrature_weight(self):
ORDER = self.ORDER
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == CubeSerendipityShapeFunctions.VERTEX:
return 1.0 / float(8 * ORDER * ORDER * ORDER)
return (1.0 - 1.0 / float(ORDER * ORDER * ORDER)) / float(12 * (ORDER - 1))
return node_quadrature_weight
def make_trace_node_quadrature_weight(self):
ORDER = self.ORDER
@cache.dynamic_func(suffix=self.name)
def trace_node_quadrature_weight(
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == CubeSerendipityShapeFunctions.VERTEX:
return 0.25 / float(ORDER * ORDER)
return (0.25 - 0.25 / float(ORDER * ORDER)) / float(ORDER - 1)
return trace_node_quadrature_weight
def make_element_inner_weight(self):
ORDER = self.ORDER
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
DEGREE_3_SPHERE_RAD = wp.constant(2 * 0.5**2 + (0.5 - LOBATTO_COORDS[1]) ** 2)
DEGREE_3_SPHERE_SCALE = 1.0 / (0.75 - DEGREE_3_SPHERE_RAD)
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == CubeSerendipityShapeFunctions.VERTEX:
node_ijk = CubeSerendipityShapeFunctions._vertex_coords(type_index)
cx = wp.select(node_ijk[0] == 0, coords[0], 1.0 - coords[0])
cy = wp.select(node_ijk[1] == 0, coords[1], 1.0 - coords[1])
cz = wp.select(node_ijk[2] == 0, coords[2], 1.0 - coords[2])
w = cx * cy * cz
if ORDER == 2:
w *= cx + cy + cz - 3.0 + LOBATTO_COORDS[1]
return w * LAGRANGE_SCALE[0]
if ORDER == 3:
w *= (
(cx - 0.5) * (cx - 0.5)
+ (cy - 0.5) * (cy - 0.5)
+ (cz - 0.5) * (cz - 0.5)
- DEGREE_3_SPHERE_RAD
)
return w * DEGREE_3_SPHERE_SCALE
axis = CubeSerendipityShapeFunctions._edge_axis(node_type)
node_all = CubeSerendipityShapeFunctions._edge_coords(type_index)
local_coords = Grid3D._world_to_local(axis, coords)
w = float(1.0)
w *= wp.select(node_all[1] == 0, local_coords[1], 1.0 - local_coords[1])
w *= wp.select(node_all[2] == 0, local_coords[2], 1.0 - local_coords[2])
for k in range(ORDER_PLUS_ONE):
if k != node_all[0]:
w *= local_coords[0] - LOBATTO_COORDS[k]
w *= LAGRANGE_SCALE[node_all[0]]
return w
return element_inner_weight
def make_element_inner_weight_gradient(self):
ORDER = self.ORDER
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
DEGREE_3_SPHERE_RAD = wp.constant(2 * 0.5**2 + (0.5 - LOBATTO_COORDS[1]) ** 2)
DEGREE_3_SPHERE_SCALE = 1.0 / (0.75 - DEGREE_3_SPHERE_RAD)
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == CubeSerendipityShapeFunctions.VERTEX:
node_ijk = CubeSerendipityShapeFunctions._vertex_coords(type_index)
cx = wp.select(node_ijk[0] == 0, coords[0], 1.0 - coords[0])
cy = wp.select(node_ijk[1] == 0, coords[1], 1.0 - coords[1])
cz = wp.select(node_ijk[2] == 0, coords[2], 1.0 - coords[2])
gx = wp.select(node_ijk[0] == 0, 1.0, -1.0)
gy = wp.select(node_ijk[1] == 0, 1.0, -1.0)
gz = wp.select(node_ijk[2] == 0, 1.0, -1.0)
if ORDER == 2:
w = cx + cy + cz - 3.0 + LOBATTO_COORDS[1]
grad_x = cy * cz * gx * (w + cx)
grad_y = cz * cx * gy * (w + cy)
grad_z = cx * cy * gz * (w + cz)
return wp.vec3(grad_x, grad_y, grad_z) * LAGRANGE_SCALE[0]
if ORDER == 3:
w = (
(cx - 0.5) * (cx - 0.5)
+ (cy - 0.5) * (cy - 0.5)
+ (cz - 0.5) * (cz - 0.5)
- DEGREE_3_SPHERE_RAD
)
dw_dcx = 2.0 * cx - 1.0
dw_dcy = 2.0 * cy - 1.0
dw_dcz = 2.0 * cz - 1.0
grad_x = cy * cz * gx * (w + dw_dcx * cx)
grad_y = cz * cx * gy * (w + dw_dcy * cy)
grad_z = cx * cy * gz * (w + dw_dcz * cz)
return wp.vec3(grad_x, grad_y, grad_z) * DEGREE_3_SPHERE_SCALE
axis = CubeSerendipityShapeFunctions._edge_axis(node_type)
node_all = CubeSerendipityShapeFunctions._edge_coords(type_index)
local_coords = Grid3D._world_to_local(axis, coords)
w_long = wp.select(node_all[1] == 0, local_coords[1], 1.0 - local_coords[1])
w_lat = wp.select(node_all[2] == 0, local_coords[2], 1.0 - local_coords[2])
g_long = wp.select(node_all[1] == 0, 1.0, -1.0)
g_lat = wp.select(node_all[2] == 0, 1.0, -1.0)
w_alt = LAGRANGE_SCALE[node_all[0]]
g_alt = float(0.0)
prefix_alt = LAGRANGE_SCALE[node_all[0]]
for k in range(ORDER_PLUS_ONE):
if k != node_all[0]:
delta_alt = local_coords[0] - LOBATTO_COORDS[k]
w_alt *= delta_alt
g_alt = g_alt * delta_alt + prefix_alt
prefix_alt *= delta_alt
local_grad = wp.vec3(g_alt * w_long * w_lat, w_alt * g_long * w_lat, w_alt * w_long * g_lat)
return Grid3D._local_to_world(axis, local_grad)
return element_inner_weight_gradient
def element_node_tets(self):
from warp.fem.utils import grid_to_tets
if self.ORDER == 2:
element_tets = np.array(
[
[0, 8, 9, 10],
[1, 11, 10, 15],
[2, 9, 14, 13],
[3, 15, 13, 17],
[4, 12, 8, 16],
[5, 18, 16, 11],
[6, 14, 12, 19],
[7, 19, 18, 17],
[16, 12, 18, 11],
[8, 16, 12, 11],
[12, 19, 18, 14],
[14, 19, 17, 18],
[10, 9, 15, 8],
[10, 8, 11, 15],
[9, 13, 15, 14],
[13, 14, 17, 15],
]
)
middle_hex = np.array([8, 11, 9, 15, 12, 18, 14, 17])
middle_tets = middle_hex[grid_to_tets(1, 1, 1)]
return np.concatenate((element_tets, middle_tets))
raise NotImplementedError()
class CubeNonConformingPolynomialShapeFunctions:
# embeds the largest regular tet centered at (0.5, 0.5, 0.5) into the reference cube
_tet_height = 2.0 / 3.0
_tet_side = math.sqrt(3.0 / 2.0) * _tet_height
_tet_face_height = math.sqrt(3.0) / 2.0 * _tet_side
_tet_to_cube = np.array(
[
[_tet_side, _tet_side / 2.0, _tet_side / 2.0],
[0.0, _tet_face_height, _tet_face_height / 3.0],
[0.0, 0.0, _tet_height],
]
)
_TET_OFFSET = wp.constant(wp.vec3(0.5 - 0.5 * _tet_side, 0.5 - _tet_face_height / 3.0, 0.5 - 0.25 * _tet_height))
def __init__(self, degree: int):
self._tet_shape = TetrahedronPolynomialShapeFunctions(degree=degree)
self.ORDER = self._tet_shape.ORDER
self.NODES_PER_ELEMENT = self._tet_shape.NODES_PER_ELEMENT
self.element_node_tets = self._tet_shape.element_node_tets
@property
def name(self) -> str:
return f"Cube_P{self.ORDER}d"
def make_node_coords_in_element(self):
node_coords_in_tet = self._tet_shape.make_node_coords_in_element()
TET_TO_CUBE = wp.constant(wp.mat33(self._tet_to_cube))
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
tet_coords = node_coords_in_tet(node_index_in_elt)
return TET_TO_CUBE * tet_coords + CubeNonConformingPolynomialShapeFunctions._TET_OFFSET
return node_coords_in_element
def make_node_quadrature_weight(self):
NODES_PER_ELEMENT = self.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def node_uniform_quadrature_weight(
node_index_in_elt: int,
):
return 1.0 / float(NODES_PER_ELEMENT)
return node_uniform_quadrature_weight
def make_trace_node_quadrature_weight(self):
# Non-conforming, zero measure on sides
@wp.func
def zero(node_index_in_elt: int):
return 0.0
return zero
def make_element_inner_weight(self):
tet_inner_weight = self._tet_shape.make_element_inner_weight()
CUBE_TO_TET = wp.constant(wp.mat33(np.linalg.inv(self._tet_to_cube)))
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
tet_coords = CUBE_TO_TET * (coords - CubeNonConformingPolynomialShapeFunctions._TET_OFFSET)
return tet_inner_weight(tet_coords, node_index_in_elt)
return element_inner_weight
def make_element_inner_weight_gradient(self):
tet_inner_weight_gradient = self._tet_shape.make_element_inner_weight_gradient()
CUBE_TO_TET = wp.constant(wp.mat33(np.linalg.inv(self._tet_to_cube)))
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
tet_coords = CUBE_TO_TET * (coords - CubeNonConformingPolynomialShapeFunctions._TET_OFFSET)
grad = tet_inner_weight_gradient(tet_coords, node_index_in_elt)
return wp.transpose(CUBE_TO_TET) * grad
return element_inner_weight_gradient
| 26,552 | Python | 35.423868 | 117 | 0.532239 |
NVIDIA/warp/warp/fem/space/shape/__init__.py | from enum import Enum
from typing import Optional
from warp.fem.geometry import element as _element
from warp.fem.polynomial import Polynomial
from .cube_shape_function import (
CubeNonConformingPolynomialShapeFunctions,
CubeSerendipityShapeFunctions,
CubeTripolynomialShapeFunctions,
)
from .shape_function import ConstantShapeFunction, ShapeFunction
from .square_shape_function import (
SquareBipolynomialShapeFunctions,
SquareNonConformingPolynomialShapeFunctions,
SquareSerendipityShapeFunctions,
)
from .tet_shape_function import TetrahedronNonConformingPolynomialShapeFunctions, TetrahedronPolynomialShapeFunctions
from .triangle_shape_function import Triangle2DNonConformingPolynomialShapeFunctions, Triangle2DPolynomialShapeFunctions
class ElementBasis(Enum):
"""Choice of basis function to equip individual elements"""
LAGRANGE = 0
"""Lagrange basis functions :math:`P_k` for simplices, tensor products :math:`Q_k` for squares and cubes"""
SERENDIPITY = 1
"""Serendipity elements :math:`S_k`, corresponding to Lagrange nodes with interior points removed (for degree <= 3)"""
NONCONFORMING_POLYNOMIAL = 2
"""Simplex Lagrange basis functions :math:`P_{kd}` embedded into non conforming reference elements (e.g. squares or cubes). Discontinuous only."""
def get_shape_function(
element: _element.Element,
space_dimension: int,
degree: int,
element_basis: ElementBasis,
family: Optional[Polynomial] = None,
):
"""
Equips a reference element with a shape function basis.
Args:
element: the reference element on which to build the shape function
space_dimension: the dimension of the embedding space
degree: polynomial degree of the per-element shape functions
element_basis: type of basis function for the individual elements
family: Polynomial family used to generate the shape function basis. If not provided, a reasonable basis is chosen.
Returns:
the corresponding shape function
"""
if degree == 0:
return ConstantShapeFunction(element, space_dimension)
if family is None:
family = Polynomial.LOBATTO_GAUSS_LEGENDRE
if isinstance(element, _element.Square):
if element_basis == ElementBasis.NONCONFORMING_POLYNOMIAL:
return SquareNonConformingPolynomialShapeFunctions(degree=degree)
if element_basis == ElementBasis.SERENDIPITY and degree > 1:
return SquareSerendipityShapeFunctions(degree=degree, family=family)
return SquareBipolynomialShapeFunctions(degree=degree, family=family)
if isinstance(element, _element.Triangle):
if element_basis == ElementBasis.NONCONFORMING_POLYNOMIAL:
return Triangle2DNonConformingPolynomialShapeFunctions(degree=degree)
if element_basis == ElementBasis.SERENDIPITY and degree > 2:
raise NotImplementedError("Serendipity variant not implemented yet for Triangle elements")
return Triangle2DPolynomialShapeFunctions(degree=degree)
if isinstance(element, _element.Cube):
if element_basis == ElementBasis.NONCONFORMING_POLYNOMIAL:
return CubeNonConformingPolynomialShapeFunctions(degree=degree)
if element_basis == ElementBasis.SERENDIPITY and degree > 1:
return CubeSerendipityShapeFunctions(degree=degree, family=family)
return CubeTripolynomialShapeFunctions(degree=degree, family=family)
if isinstance(element, _element.Tetrahedron):
if element_basis == ElementBasis.NONCONFORMING_POLYNOMIAL:
return TetrahedronNonConformingPolynomialShapeFunctions(degree=degree)
if element_basis == ElementBasis.SERENDIPITY and degree > 2:
raise NotImplementedError("Serendipity variant not implemented yet for Tet elements")
return TetrahedronPolynomialShapeFunctions(degree=degree)
return NotImplementedError("Unrecognized element type")
| 3,962 | Python | 42.54945 | 150 | 0.751388 |
NVIDIA/warp/warp/fem/space/shape/square_shape_function.py | import math
import numpy as np
import warp as wp
from warp.fem import cache
from warp.fem.polynomial import Polynomial, is_closed, lagrange_scales, quadrature_1d
from warp.fem.types import Coords
from .triangle_shape_function import Triangle2DPolynomialShapeFunctions
class SquareBipolynomialShapeFunctions:
def __init__(self, degree: int, family: Polynomial):
self.family = family
self.ORDER = wp.constant(degree)
self.NODES_PER_ELEMENT = wp.constant((degree + 1) * (degree + 1))
self.NODES_PER_SIDE = wp.constant(degree + 1)
lobatto_coords, lobatto_weight = quadrature_1d(point_count=degree + 1, family=family)
lagrange_scale = lagrange_scales(lobatto_coords)
NodeVec = wp.types.vector(length=degree + 1, dtype=wp.float32)
self.LOBATTO_COORDS = wp.constant(NodeVec(lobatto_coords))
self.LOBATTO_WEIGHT = wp.constant(NodeVec(lobatto_weight))
self.LAGRANGE_SCALE = wp.constant(NodeVec(lagrange_scale))
self.ORDER_PLUS_ONE = wp.constant(self.ORDER + 1)
@property
def name(self) -> str:
return f"Square_Q{self.ORDER}_{self.family}"
def make_node_coords_in_element(self):
ORDER = self.ORDER
LOBATTO_COORDS = self.LOBATTO_COORDS
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
node_i = node_index_in_elt // (ORDER + 1)
node_j = node_index_in_elt - (ORDER + 1) * node_i
return Coords(LOBATTO_COORDS[node_i], LOBATTO_COORDS[node_j], 0.0)
return node_coords_in_element
def make_node_quadrature_weight(self):
ORDER = self.ORDER
LOBATTO_WEIGHT = self.LOBATTO_WEIGHT
def node_quadrature_weight(
node_index_in_elt: int,
):
node_i = node_index_in_elt // (ORDER + 1)
node_j = node_index_in_elt - (ORDER + 1) * node_i
return LOBATTO_WEIGHT[node_i] * LOBATTO_WEIGHT[node_j]
def node_quadrature_weight_linear(
node_index_in_elt: int,
):
return 0.25
if ORDER == 1:
return cache.get_func(node_quadrature_weight_linear, self.name)
return cache.get_func(node_quadrature_weight, self.name)
@wp.func
def _vertex_coords_f(vidx_in_cell: int):
x = vidx_in_cell // 2
y = vidx_in_cell - 2 * x
return wp.vec2(float(x), float(y))
def make_trace_node_quadrature_weight(self):
ORDER = self.ORDER
LOBATTO_WEIGHT = self.LOBATTO_WEIGHT
def trace_node_quadrature_weight(
node_index_in_elt: int,
):
# We're either on a side interior or at a vertex
# I.e., either both indices are at extrema, or only one is
# Pick the interior one if possible, if both are at extrema pick any one
node_i = node_index_in_elt // (ORDER + 1)
if node_i > 0 and node_i < ORDER:
return LOBATTO_WEIGHT[node_i]
node_j = node_index_in_elt - (ORDER + 1) * node_i
return LOBATTO_WEIGHT[node_j]
def trace_node_quadrature_weight_linear(
node_index_in_elt: int,
):
return 0.5
def trace_node_quadrature_weight_open(
node_index_in_elt: int,
):
return 0.0
if not is_closed(self.family):
return cache.get_func(trace_node_quadrature_weight_open, self.name)
if ORDER == 1:
return cache.get_func(trace_node_quadrature_weight_linear, self.name)
return cache.get_func(trace_node_quadrature_weight, self.name)
def make_element_inner_weight(self):
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
node_i = node_index_in_elt // ORDER_PLUS_ONE
node_j = node_index_in_elt - ORDER_PLUS_ONE * node_i
w = float(1.0)
for k in range(ORDER_PLUS_ONE):
if k != node_i:
w *= coords[0] - LOBATTO_COORDS[k]
if k != node_j:
w *= coords[1] - LOBATTO_COORDS[k]
w *= LAGRANGE_SCALE[node_i] * LAGRANGE_SCALE[node_j]
return w
def element_inner_weight_linear(
coords: Coords,
node_index_in_elt: int,
):
v = SquareBipolynomialShapeFunctions._vertex_coords_f(node_index_in_elt)
wx = (1.0 - coords[0]) * (1.0 - v[0]) + v[0] * coords[0]
wy = (1.0 - coords[1]) * (1.0 - v[1]) + v[1] * coords[1]
return wx * wy
if self.ORDER == 1 and is_closed(self.family):
return cache.get_func(element_inner_weight_linear, self.name)
return cache.get_func(element_inner_weight, self.name)
def make_element_inner_weight_gradient(self):
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
node_i = node_index_in_elt // ORDER_PLUS_ONE
node_j = node_index_in_elt - ORDER_PLUS_ONE * node_i
prefix_x = float(1.0)
prefix_y = float(1.0)
for k in range(ORDER_PLUS_ONE):
if k != node_i:
prefix_y *= coords[0] - LOBATTO_COORDS[k]
if k != node_j:
prefix_x *= coords[1] - LOBATTO_COORDS[k]
grad_x = float(0.0)
grad_y = float(0.0)
for k in range(ORDER_PLUS_ONE):
if k != node_i:
delta_x = coords[0] - LOBATTO_COORDS[k]
grad_x = grad_x * delta_x + prefix_x
prefix_x *= delta_x
if k != node_j:
delta_y = coords[1] - LOBATTO_COORDS[k]
grad_y = grad_y * delta_y + prefix_y
prefix_y *= delta_y
grad = LAGRANGE_SCALE[node_i] * LAGRANGE_SCALE[node_j] * wp.vec2(grad_x, grad_y)
return grad
def element_inner_weight_gradient_linear(
coords: Coords,
node_index_in_elt: int,
):
v = SquareBipolynomialShapeFunctions._vertex_coords_f(node_index_in_elt)
wx = (1.0 - coords[0]) * (1.0 - v[0]) + v[0] * coords[0]
wy = (1.0 - coords[1]) * (1.0 - v[1]) + v[1] * coords[1]
dx = 2.0 * v[0] - 1.0
dy = 2.0 * v[1] - 1.0
return wp.vec2(dx * wy, dy * wx)
if self.ORDER == 1 and is_closed(self.family):
return cache.get_func(element_inner_weight_gradient_linear, self.name)
return cache.get_func(element_inner_weight_gradient, self.name)
def element_node_triangulation(self):
from warp.fem.utils import grid_to_tris
return grid_to_tris(self.ORDER, self.ORDER)
class SquareSerendipityShapeFunctions:
"""
Serendipity element ~ tensor product space without interior nodes
Side shape functions are usual Lagrange shape functions times a linear function in the normal direction
Corner shape functions are bilinear shape functions times a function of (x^{d-1} + y^{d-1})
"""
# Node categories
VERTEX = wp.constant(0)
EDGE_X = wp.constant(1)
EDGE_Y = wp.constant(2)
def __init__(self, degree: int, family: Polynomial):
if not is_closed(family):
raise ValueError("A closed polynomial family is required to define serendipity elements")
if degree not in [2, 3]:
raise NotImplementedError("Serendipity element only implemented for order 2 or 3")
self.family = family
self.ORDER = wp.constant(degree)
self.NODES_PER_ELEMENT = wp.constant(4 * degree)
self.NODES_PER_SIDE = wp.constant(degree + 1)
lobatto_coords, lobatto_weight = quadrature_1d(point_count=degree + 1, family=family)
lagrange_scale = lagrange_scales(lobatto_coords)
NodeVec = wp.types.vector(length=degree + 1, dtype=wp.float32)
self.LOBATTO_COORDS = wp.constant(NodeVec(lobatto_coords))
self.LOBATTO_WEIGHT = wp.constant(NodeVec(lobatto_weight))
self.LAGRANGE_SCALE = wp.constant(NodeVec(lagrange_scale))
self.ORDER_PLUS_ONE = wp.constant(self.ORDER + 1)
self.node_type_and_type_index = self._get_node_type_and_type_index()
self._node_lobatto_indices = self._get_node_lobatto_indices()
@property
def name(self) -> str:
return f"Square_S{self.ORDER}_{self.family}"
def _get_node_type_and_type_index(self):
@cache.dynamic_func(suffix=self.name)
def node_type_and_index(
node_index_in_elt: int,
):
if node_index_in_elt < 4:
return SquareSerendipityShapeFunctions.VERTEX, node_index_in_elt
type_index = (node_index_in_elt - 4) // 2
side = node_index_in_elt - 4 - 2 * type_index
return SquareSerendipityShapeFunctions.EDGE_X + side, type_index
return node_type_and_index
@wp.func
def side_offset_and_index(type_index: int):
index_in_side = type_index // 2
side_offset = type_index - 2 * index_in_side
return side_offset, index_in_side
def _get_node_lobatto_indices(self):
ORDER = self.ORDER
@cache.dynamic_func(suffix=self.name)
def node_lobatto_indices(node_type: int, type_index: int):
if node_type == SquareSerendipityShapeFunctions.VERTEX:
node_i = type_index // 2
node_j = type_index - 2 * node_i
return node_i * ORDER, node_j * ORDER
side_offset, index_in_side = SquareSerendipityShapeFunctions.side_offset_and_index(type_index)
if node_type == SquareSerendipityShapeFunctions.EDGE_X:
node_i = 1 + index_in_side
node_j = side_offset * ORDER
else:
node_j = 1 + index_in_side
node_i = side_offset * ORDER
return node_i, node_j
return node_lobatto_indices
def make_node_coords_in_element(self):
LOBATTO_COORDS = self.LOBATTO_COORDS
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
node_i, node_j = self._node_lobatto_indices(node_type, type_index)
return Coords(LOBATTO_COORDS[node_i], LOBATTO_COORDS[node_j], 0.0)
return node_coords_in_element
def make_node_quadrature_weight(self):
ORDER = self.ORDER
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == SquareSerendipityShapeFunctions.VERTEX:
return 0.25 / float(ORDER * ORDER)
return (0.25 - 0.25 / float(ORDER * ORDER)) / float(ORDER - 1)
return node_quadrature_weight
def make_trace_node_quadrature_weight(self):
LOBATTO_WEIGHT = self.LOBATTO_WEIGHT
@cache.dynamic_func(suffix=self.name)
def trace_node_quadrature_weight(
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == SquareSerendipityShapeFunctions.VERTEX:
return LOBATTO_WEIGHT[0]
side_offset, index_in_side = SquareSerendipityShapeFunctions.side_offset_and_index(type_index)
return LOBATTO_WEIGHT[1 + index_in_side]
return trace_node_quadrature_weight
def make_element_inner_weight(self):
ORDER = self.ORDER
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
DEGREE_3_CIRCLE_RAD = wp.constant(0.5**2 + (0.5 - LOBATTO_COORDS[1]) ** 2)
DEGREE_3_CIRCLE_SCALE = 1.0 / (0.5 - DEGREE_3_CIRCLE_RAD)
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
node_i, node_j = self._node_lobatto_indices(node_type, type_index)
if node_type == SquareSerendipityShapeFunctions.VERTEX:
cx = wp.select(node_i == 0, coords[0], 1.0 - coords[0])
cy = wp.select(node_j == 0, coords[1], 1.0 - coords[1])
w = cx * cy
if ORDER == 2:
w *= cx + cy - 2.0 + LOBATTO_COORDS[1]
return w * LAGRANGE_SCALE[0]
if ORDER == 3:
w *= (cx - 0.5) * (cx - 0.5) + (cy - 0.5) * (cy - 0.5) - DEGREE_3_CIRCLE_RAD
return w * DEGREE_3_CIRCLE_SCALE
w = float(1.0)
if node_type == SquareSerendipityShapeFunctions.EDGE_Y:
w *= wp.select(node_i == 0, coords[0], 1.0 - coords[0])
else:
for k in range(ORDER_PLUS_ONE):
if k != node_i:
w *= coords[0] - LOBATTO_COORDS[k]
w *= LAGRANGE_SCALE[node_i]
if node_type == SquareSerendipityShapeFunctions.EDGE_X:
w *= wp.select(node_j == 0, coords[1], 1.0 - coords[1])
else:
for k in range(ORDER_PLUS_ONE):
if k != node_j:
w *= coords[1] - LOBATTO_COORDS[k]
w *= LAGRANGE_SCALE[node_j]
return w
return element_inner_weight
def make_element_inner_weight_gradient(self):
ORDER = self.ORDER
ORDER_PLUS_ONE = self.ORDER_PLUS_ONE
LOBATTO_COORDS = self.LOBATTO_COORDS
LAGRANGE_SCALE = self.LAGRANGE_SCALE
DEGREE_3_CIRCLE_RAD = wp.constant(0.5**2 + (0.5 - LOBATTO_COORDS[1]) ** 2)
DEGREE_3_CIRCLE_SCALE = 1.0 / (0.5 - DEGREE_3_CIRCLE_RAD)
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
node_i, node_j = self._node_lobatto_indices(node_type, type_index)
if node_type == SquareSerendipityShapeFunctions.VERTEX:
cx = wp.select(node_i == 0, coords[0], 1.0 - coords[0])
cy = wp.select(node_j == 0, coords[1], 1.0 - coords[1])
gx = wp.select(node_i == 0, 1.0, -1.0)
gy = wp.select(node_j == 0, 1.0, -1.0)
if ORDER == 2:
w = cx + cy - 2.0 + LOBATTO_COORDS[1]
grad_x = cy * gx * (w + cx)
grad_y = cx * gy * (w + cy)
return wp.vec2(grad_x, grad_y) * LAGRANGE_SCALE[0]
if ORDER == 3:
w = (cx - 0.5) * (cx - 0.5) + (cy - 0.5) * (cy - 0.5) - DEGREE_3_CIRCLE_RAD
dw_dcx = 2.0 * cx - 1.0
dw_dcy = 2.0 * cy - 1.0
grad_x = cy * gx * (w + cx * dw_dcx)
grad_y = cx * gy * (w + cy * dw_dcy)
return wp.vec2(grad_x, grad_y) * DEGREE_3_CIRCLE_SCALE
if node_type == SquareSerendipityShapeFunctions.EDGE_X:
prefix_x = wp.select(node_j == 0, coords[1], 1.0 - coords[1])
else:
prefix_x = LAGRANGE_SCALE[node_j]
for k in range(ORDER_PLUS_ONE):
if k != node_j:
prefix_x *= coords[1] - LOBATTO_COORDS[k]
if node_type == SquareSerendipityShapeFunctions.EDGE_Y:
prefix_y = wp.select(node_i == 0, coords[0], 1.0 - coords[0])
else:
prefix_y = LAGRANGE_SCALE[node_i]
for k in range(ORDER_PLUS_ONE):
if k != node_i:
prefix_y *= coords[0] - LOBATTO_COORDS[k]
if node_type == SquareSerendipityShapeFunctions.EDGE_X:
grad_y = wp.select(node_j == 0, 1.0, -1.0) * prefix_y
else:
prefix_y *= LAGRANGE_SCALE[node_j]
grad_y = float(0.0)
for k in range(ORDER_PLUS_ONE):
if k != node_j:
delta_y = coords[1] - LOBATTO_COORDS[k]
grad_y = grad_y * delta_y + prefix_y
prefix_y *= delta_y
if node_type == SquareSerendipityShapeFunctions.EDGE_Y:
grad_x = wp.select(node_i == 0, 1.0, -1.0) * prefix_x
else:
prefix_x *= LAGRANGE_SCALE[node_i]
grad_x = float(0.0)
for k in range(ORDER_PLUS_ONE):
if k != node_i:
delta_x = coords[0] - LOBATTO_COORDS[k]
grad_x = grad_x * delta_x + prefix_x
prefix_x *= delta_x
grad = wp.vec2(grad_x, grad_y)
return grad
return element_inner_weight_gradient
def element_node_triangulation(self):
if self.ORDER == 2:
element_triangles = [
[0, 4, 5],
[5, 4, 6],
[5, 6, 1],
[4, 2, 7],
[4, 7, 6],
[6, 7, 3],
]
else:
element_triangles = [
[0, 4, 5],
[2, 7, 8],
[3, 10, 11],
[1, 9, 6],
[5, 6, 9],
[5, 4, 6],
[8, 11, 10],
[8, 7, 11],
[4, 8, 10],
[4, 10, 6],
]
return element_triangles
class SquareNonConformingPolynomialShapeFunctions:
# embeds the largest equilateral triangle centered at (0.5, 0.5) into the reference square
_tri_height = 0.75
_tri_side = 2.0 / math.sqrt(3.0) * _tri_height
_tri_to_square = np.array([[_tri_side, _tri_side / 2.0], [0.0, _tri_height]])
_TRI_OFFSET = wp.constant(wp.vec2(0.5 - 0.5 * _tri_side, 0.5 - _tri_height / 3.0))
def __init__(self, degree: int):
self._tri_shape = Triangle2DPolynomialShapeFunctions(degree=degree)
self.ORDER = self._tri_shape.ORDER
self.NODES_PER_ELEMENT = self._tri_shape.NODES_PER_ELEMENT
self.element_node_triangulation = self._tri_shape.element_node_triangulation
@property
def name(self) -> str:
return f"Square_P{self.ORDER}d"
def make_node_coords_in_element(self):
node_coords_in_tet = self._tri_shape.make_node_coords_in_element()
TRI_TO_SQUARE = wp.constant(wp.mat22(self._tri_to_square))
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
tri_coords = node_coords_in_tet(node_index_in_elt)
coords = (
TRI_TO_SQUARE * wp.vec2(tri_coords[1], tri_coords[2])
) + SquareNonConformingPolynomialShapeFunctions._TRI_OFFSET
return Coords(coords[0], coords[1], 0.0)
return node_coords_in_element
def make_node_quadrature_weight(self):
NODES_PER_ELEMENT = self.NODES_PER_ELEMENT
if self.ORDER == 2:
# Intrinsic quadrature (order 2)
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight_quadratic(
node_index_in_elt: int,
):
node_type, type_index = self._tri_shape.node_type_and_type_index(node_index_in_elt)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
return 0.18518521
return 0.14814811
return node_quadrature_weight_quadratic
@cache.dynamic_func(suffix=self.name)
def node_uniform_quadrature_weight(
node_index_in_elt: int,
):
return 1.0 / float(NODES_PER_ELEMENT)
return node_uniform_quadrature_weight
def make_trace_node_quadrature_weight(self):
# Non-conforming, zero measure on sides
@wp.func
def zero(node_index_in_elt: int):
return 0.0
return zero
def make_element_inner_weight(self):
tri_inner_weight = self._tri_shape.make_element_inner_weight()
SQUARE_TO_TRI = wp.constant(wp.mat22(np.linalg.inv(self._tri_to_square)))
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
tri_param = SQUARE_TO_TRI * (
wp.vec2(coords[0], coords[1]) - SquareNonConformingPolynomialShapeFunctions._TRI_OFFSET
)
tri_coords = Coords(1.0 - tri_param[0] - tri_param[1], tri_param[0], tri_param[1])
return tri_inner_weight(tri_coords, node_index_in_elt)
return element_inner_weight
def make_element_inner_weight_gradient(self):
tri_inner_weight_gradient = self._tri_shape.make_element_inner_weight_gradient()
SQUARE_TO_TRI = wp.constant(wp.mat22(np.linalg.inv(self._tri_to_square)))
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
tri_param = SQUARE_TO_TRI * (
wp.vec2(coords[0], coords[1]) - SquareNonConformingPolynomialShapeFunctions._TRI_OFFSET
)
tri_coords = Coords(1.0 - tri_param[0] - tri_param[1], tri_param[0], tri_param[1])
grad = tri_inner_weight_gradient(tri_coords, node_index_in_elt)
return wp.transpose(SQUARE_TO_TRI) * grad
return element_inner_weight_gradient
| 22,214 | Python | 35.29902 | 107 | 0.547673 |
NVIDIA/warp/warp/fem/space/shape/triangle_shape_function.py | import numpy as np
import warp as wp
from warp.fem import cache
from warp.fem.types import Coords
def _triangle_node_index(tx: int, ty: int, degree: int):
VERTEX_NODE_COUNT = 3
SIDE_INTERIOR_NODE_COUNT = degree - 1
# Index in similar order to e.g. VTK
# First vertices, then edge (counterclockwise) then interior points (recursively)
if tx == 0:
if ty == 0:
return 0
elif ty == degree:
return 2
else:
edge_index = 2
return VERTEX_NODE_COUNT + SIDE_INTERIOR_NODE_COUNT * edge_index + (SIDE_INTERIOR_NODE_COUNT - ty)
elif ty == 0:
if tx == degree:
return 1
else:
edge_index = 0
return VERTEX_NODE_COUNT + SIDE_INTERIOR_NODE_COUNT * edge_index + tx - 1
elif tx + ty == degree:
edge_index = 1
return VERTEX_NODE_COUNT + SIDE_INTERIOR_NODE_COUNT * edge_index + ty - 1
vertex_edge_node_count = 3 * degree
return vertex_edge_node_count + _triangle_node_index(tx - 1, ty - 1, degree - 3)
class Triangle2DPolynomialShapeFunctions:
VERTEX = wp.constant(0)
EDGE = wp.constant(1)
INTERIOR = wp.constant(2)
def __init__(self, degree: int):
self.ORDER = wp.constant(degree)
self.NODES_PER_ELEMENT = wp.constant((degree + 1) * (degree + 2) // 2)
self.NODES_PER_SIDE = wp.constant(degree + 1)
triangle_coords = np.empty((self.NODES_PER_ELEMENT, 2), dtype=int)
for tx in range(degree + 1):
for ty in range(degree + 1 - tx):
index = _triangle_node_index(tx, ty, degree)
triangle_coords[index] = [tx, ty]
CoordTypeVec = wp.mat(dtype=int, shape=(self.NODES_PER_ELEMENT, 2))
self.NODE_TRIANGLE_COORDS = wp.constant(CoordTypeVec(triangle_coords))
self.node_type_and_type_index = self._get_node_type_and_type_index()
self._node_triangle_coordinates = self._get_node_triangle_coordinates()
@property
def name(self) -> str:
return f"Tri_P{self.ORDER}"
def _get_node_triangle_coordinates(self):
NODE_TRIANGLE_COORDS = self.NODE_TRIANGLE_COORDS
def node_triangle_coordinates(
node_index_in_elt: int,
):
return wp.vec2i(NODE_TRIANGLE_COORDS[node_index_in_elt, 0], NODE_TRIANGLE_COORDS[node_index_in_elt, 1])
return cache.get_func(node_triangle_coordinates, self.name)
def _get_node_type_and_type_index(self):
ORDER = self.ORDER
def node_type_and_index(
node_index_in_elt: int,
):
if node_index_in_elt < 3:
return Triangle2DPolynomialShapeFunctions.VERTEX, node_index_in_elt
if node_index_in_elt < 3 * ORDER:
return Triangle2DPolynomialShapeFunctions.EDGE, (node_index_in_elt - 3)
return Triangle2DPolynomialShapeFunctions.INTERIOR, (node_index_in_elt - 3 * ORDER)
return cache.get_func(node_type_and_index, self.name)
def make_node_coords_in_element(self):
ORDER = self.ORDER
def node_coords_in_element(
node_index_in_elt: int,
):
tri_coords = self._node_triangle_coordinates(node_index_in_elt)
cx = float(tri_coords[0]) / float(ORDER)
cy = float(tri_coords[1]) / float(ORDER)
return Coords(1.0 - cx - cy, cx, cy)
return cache.get_func(node_coords_in_element, self.name)
def make_node_quadrature_weight(self):
if self.ORDER == 3:
# P3 intrisic quadrature
vertex_weight = 1.0 / 30
edge_weight = 0.075
interior_weight = 0.45
elif self.ORDER == 2:
# Order 1, but optimized quadrature weights for monomials of order <= 4
vertex_weight = 0.022335964126
edge_weight = 0.310997369207
interior_weight = 0.0
else:
vertex_weight = 1.0 / self.NODES_PER_ELEMENT
edge_weight = 1.0 / self.NODES_PER_ELEMENT
interior_weight = 1.0 / self.NODES_PER_ELEMENT
VERTEX_WEIGHT = wp.constant(vertex_weight)
EDGE_WEIGHT = wp.constant(edge_weight)
INTERIOR_WEIGHT = wp.constant(interior_weight)
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(node_index_in_element: int):
node_type, type_index = self.node_type_and_type_index(node_index_in_element)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
return VERTEX_WEIGHT
elif node_type == Triangle2DPolynomialShapeFunctions.EDGE:
return EDGE_WEIGHT
return INTERIOR_WEIGHT
return node_quadrature_weight
def make_trace_node_quadrature_weight(self):
# Closed Newton-Cotes
if self.ORDER == 3:
vertex_weight = 1.0 / 8.0
edge_weight = 3.0 / 8.0
elif self.ORDER == 2:
vertex_weight = 1.0 / 6.0
edge_weight = 2.0 / 3.0
else:
vertex_weight = 1.0 / self.NODES_PER_SIDE
edge_weight = 1.0 / self.NODES_PER_SIDE
VERTEX_WEIGHT = wp.constant(vertex_weight)
EDGE_WEIGHT = wp.constant(edge_weight)
@cache.dynamic_func(suffix=self.name)
def trace_node_quadrature_weight(node_index_in_element: int):
node_type, type_index = self.node_type_and_type_index(node_index_in_element)
return wp.select(node_type == Triangle2DPolynomialShapeFunctions.VERTEX, EDGE_WEIGHT, VERTEX_WEIGHT)
return trace_node_quadrature_weight
def make_element_inner_weight(self):
ORDER = self.ORDER
def element_inner_weight_linear(
coords: Coords,
node_index_in_elt: int,
):
return coords[node_index_in_elt]
def element_inner_weight_quadratic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
# Vertex
return coords[type_index] * (2.0 * coords[type_index] - 1.0)
# Edge
c1 = type_index
c2 = (type_index + 1) % 3
return 4.0 * coords[c1] * coords[c2]
def element_inner_weight_cubic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
# Vertex
return 0.5 * coords[type_index] * (3.0 * coords[type_index] - 1.0) * (3.0 * coords[type_index] - 2.0)
elif node_type == Triangle2DPolynomialShapeFunctions.EDGE:
# Edge
edge = type_index // 2
k = type_index - 2 * edge
c1 = (edge + k) % 3
c2 = (edge + 1 - k) % 3
return 4.5 * coords[c1] * coords[c2] * (3.0 * coords[c1] - 1.0)
# Interior
return 27.0 * coords[0] * coords[1] * coords[2]
if ORDER == 1:
return cache.get_func(element_inner_weight_linear, self.name)
elif ORDER == 2:
return cache.get_func(element_inner_weight_quadratic, self.name)
elif ORDER == 3:
return cache.get_func(element_inner_weight_cubic, self.name)
return None
def make_element_inner_weight_gradient(self):
ORDER = self.ORDER
def element_inner_weight_gradient_linear(
coords: Coords,
node_index_in_elt: int,
):
dw_dc = wp.vec3(0.0)
dw_dc[node_index_in_elt] = 1.0
dw_du = wp.vec2(dw_dc[1] - dw_dc[0], dw_dc[2] - dw_dc[0])
return dw_du
def element_inner_weight_gradient_quadratic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
dw_dc = wp.vec3(0.0)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
# Vertex
dw_dc[type_index] = 4.0 * coords[type_index] - 1.0
else:
# Edge
c1 = type_index
c2 = (type_index + 1) % 3
dw_dc[c1] = 4.0 * coords[c2]
dw_dc[c2] = 4.0 * coords[c1]
dw_du = wp.vec2(dw_dc[1] - dw_dc[0], dw_dc[2] - dw_dc[0])
return dw_du
def element_inner_weight_gradient_cubic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
dw_dc = wp.vec3(0.0)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
# Vertex
dw_dc[type_index] = (
0.5 * 27.0 * coords[type_index] * coords[type_index] - 9.0 * coords[type_index] + 1.0
)
elif node_type == Triangle2DPolynomialShapeFunctions.EDGE:
# Edge
edge = type_index // 2
k = type_index - 2 * edge
c1 = (edge + k) % 3
c2 = (edge + 1 - k) % 3
dw_dc[c1] = 4.5 * coords[c2] * (6.0 * coords[c1] - 1.0)
dw_dc[c2] = 4.5 * coords[c1] * (3.0 * coords[c1] - 1.0)
else:
# Interior
dw_dc = wp.vec3(
27.0 * coords[1] * coords[2], 27.0 * coords[2] * coords[0], 27.0 * coords[0] * coords[1]
)
dw_du = wp.vec2(dw_dc[1] - dw_dc[0], dw_dc[2] - dw_dc[0])
return dw_du
if ORDER == 1:
return cache.get_func(element_inner_weight_gradient_linear, self.name)
elif ORDER == 2:
return cache.get_func(element_inner_weight_gradient_quadratic, self.name)
elif ORDER == 3:
return cache.get_func(element_inner_weight_gradient_cubic, self.name)
return None
def element_node_triangulation(self):
if self.ORDER == 1:
element_triangles = [[0, 1, 2]]
if self.ORDER == 2:
element_triangles = [[0, 3, 5], [3, 1, 4], [2, 5, 4], [3, 4, 5]]
elif self.ORDER == 3:
element_triangles = [
[0, 3, 8],
[3, 4, 9],
[4, 1, 5],
[8, 3, 9],
[4, 5, 9],
[8, 9, 7],
[9, 5, 6],
[6, 7, 9],
[7, 6, 2],
]
return np.array(element_triangles)
class Triangle2DNonConformingPolynomialShapeFunctions:
def __init__(self, degree: int):
self._tri_shape = Triangle2DPolynomialShapeFunctions(degree=degree)
self.ORDER = self._tri_shape.ORDER
self.NODES_PER_ELEMENT = self._tri_shape.NODES_PER_ELEMENT
self.element_node_triangulation = self._tri_shape.element_node_triangulation
# Coordinates (a, b, b) of embedded triangle
if self.ORDER == 1:
# Order 2
a = 2.0 / 3.0
elif self.ORDER == 2:
# Order 2, optimized for small intrinsic quadrature error up to degree 4
a = 0.7790771484375001
elif self.ORDER == 3:
# Order 3, optimized for small intrinsic quadrature error up to degree 6
a = 0.8429443359375002
else:
a = 1.0
b = 0.5 * (1.0 - a)
self._small_to_big = np.full((3, 3), b) + (a - b) * np.eye(3)
self._tri_scale = a - b
@property
def name(self) -> str:
return f"Tri_P{self.ORDER}d"
def make_node_quadrature_weight(self):
# Intrinsic quadrature -- precomputed integral of node shape functions
# over element. Order equal to self.ORDER
if self.ORDER == 2:
vertex_weight = 0.13743348
edge_weight = 0.19589985
interior_weight = 0.0
elif self.ORDER == 3:
vertex_weight = 0.07462578
edge_weight = 0.1019807
interior_weight = 0.16423881
else:
vertex_weight = 1.0 / self.NODES_PER_ELEMENT
edge_weight = 1.0 / self.NODES_PER_ELEMENT
interior_weight = 1.0 / self.NODES_PER_ELEMENT
VERTEX_WEIGHT = wp.constant(vertex_weight)
EDGE_WEIGHT = wp.constant(edge_weight)
INTERIOR_WEIGHT = wp.constant(interior_weight)
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(node_index_in_element: int):
node_type, type_index = self._tri_shape.node_type_and_type_index(node_index_in_element)
if node_type == Triangle2DPolynomialShapeFunctions.VERTEX:
return VERTEX_WEIGHT
elif node_type == Triangle2DPolynomialShapeFunctions.EDGE:
return EDGE_WEIGHT
return INTERIOR_WEIGHT
return node_quadrature_weight
def make_trace_node_quadrature_weight(self):
# Non-conforming, zero measure on sides
@wp.func
def zero(node_index_in_elt: int):
return 0.0
return zero
def make_node_coords_in_element(self):
node_coords_in_tet = self._tri_shape.make_node_coords_in_element()
SMALL_TO_BIG = wp.constant(wp.mat33(self._small_to_big))
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
tri_coords = node_coords_in_tet(node_index_in_elt)
return SMALL_TO_BIG * tri_coords
return node_coords_in_element
def make_element_inner_weight(self):
tri_inner_weight = self._tri_shape.make_element_inner_weight()
BIG_TO_SMALL = wp.constant(wp.mat33(np.linalg.inv(self._small_to_big)))
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
tri_coords = BIG_TO_SMALL * coords
return tri_inner_weight(tri_coords, node_index_in_elt)
return element_inner_weight
def make_element_inner_weight_gradient(self):
tri_inner_weight_gradient = self._tri_shape.make_element_inner_weight_gradient()
BIG_TO_SMALL = wp.constant(wp.mat33(np.linalg.inv(self._small_to_big)))
INV_TRI_SCALE = wp.constant(1.0 / self._tri_scale)
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
tri_coords = BIG_TO_SMALL * coords
grad = tri_inner_weight_gradient(tri_coords, node_index_in_elt)
return INV_TRI_SCALE * grad
return element_inner_weight_gradient
| 14,886 | Python | 33.62093 | 117 | 0.555757 |
NVIDIA/warp/warp/fem/space/shape/tet_shape_function.py | import numpy as np
import warp as wp
from warp.fem import cache
from warp.fem.types import Coords
def _tet_node_index(tx: int, ty: int, tz: int, degree: int):
from .triangle_shape_function import _triangle_node_index
VERTEX_NODE_COUNT = 4
EDGE_INTERIOR_NODE_COUNT = degree - 1
VERTEX_EDGE_NODE_COUNT = VERTEX_NODE_COUNT + 6 * EDGE_INTERIOR_NODE_COUNT
FACE_INTERIOR_NODE_COUNT = (degree - 1) * (degree - 2) // 2
VERTEX_EDGE_FACE_NODE_COUNT = VERTEX_EDGE_NODE_COUNT + 4 * FACE_INTERIOR_NODE_COUNT
# Index in similar order to e.g. VTK
# First vertices, then edges (counterclockwise), then faces, then interior points (recursively)
if tx == 0:
if ty == 0:
if tz == 0:
return 0
elif tz == degree:
return 3
else:
# 0-3 edge
edge_index = 3
return VERTEX_NODE_COUNT + EDGE_INTERIOR_NODE_COUNT * edge_index + (tz - 1)
elif tz == 0:
if ty == degree:
return 2
else:
# 2-0 edge
edge_index = 2
return VERTEX_NODE_COUNT + EDGE_INTERIOR_NODE_COUNT * edge_index + (EDGE_INTERIOR_NODE_COUNT - ty)
elif tz + ty == degree:
# 2-3 edge
edge_index = 5
return VERTEX_NODE_COUNT + EDGE_INTERIOR_NODE_COUNT * edge_index + (tz - 1)
else:
# 2-3-0 face
face_index = 2
return (
VERTEX_EDGE_NODE_COUNT
+ FACE_INTERIOR_NODE_COUNT * face_index
+ _triangle_node_index(degree - 1 - ty - tz, tz - 1, degree - 3)
)
elif ty == 0:
if tz == 0:
if tx == degree:
return 1
else:
# 0-1 edge
edge_index = 0
return VERTEX_NODE_COUNT + EDGE_INTERIOR_NODE_COUNT * edge_index + (tx - 1)
elif tz + tx == degree:
# 1-3 edge
edge_index = 4
return VERTEX_NODE_COUNT + EDGE_INTERIOR_NODE_COUNT * edge_index + (tz - 1)
else:
# 3-0-1 face
face_index = 3
return (
VERTEX_EDGE_NODE_COUNT
+ FACE_INTERIOR_NODE_COUNT * face_index
+ _triangle_node_index(tx - 1, tz - 1, degree - 3)
)
elif tz == 0:
if tx + ty == degree:
# 1-2 edge
edge_index = 1
return VERTEX_NODE_COUNT + EDGE_INTERIOR_NODE_COUNT * edge_index + (ty - 1)
else:
# 0-1-2 face
face_index = 0
return (
VERTEX_EDGE_NODE_COUNT
+ FACE_INTERIOR_NODE_COUNT * face_index
+ _triangle_node_index(tx - 1, ty - 1, degree - 3)
)
elif tx + ty + tz == degree:
# 1-2-3 face
face_index = 1
return (
VERTEX_EDGE_NODE_COUNT
+ FACE_INTERIOR_NODE_COUNT * face_index
+ _triangle_node_index(tx - 1, tz - 1, degree - 3)
)
return VERTEX_EDGE_FACE_NODE_COUNT + _tet_node_index(tx - 1, ty - 1, tz - 1, degree - 4)
class TetrahedronPolynomialShapeFunctions:
INVALID = wp.constant(-1)
VERTEX = wp.constant(0)
EDGE = wp.constant(1)
FACE = wp.constant(2)
INTERIOR = wp.constant(3)
def __init__(self, degree: int):
self.ORDER = wp.constant(degree)
self.NODES_PER_ELEMENT = wp.constant((degree + 1) * (degree + 2) * (degree + 3) // 6)
self.NODES_PER_SIDE = wp.constant((degree + 1) * (degree + 2) // 2)
tet_coords = np.empty((self.NODES_PER_ELEMENT, 3), dtype=int)
for tx in range(degree + 1):
for ty in range(degree + 1 - tx):
for tz in range(degree + 1 - tx - ty):
index = _tet_node_index(tx, ty, tz, degree)
tet_coords[index] = [tx, ty, tz]
CoordTypeVec = wp.mat(dtype=int, shape=(self.NODES_PER_ELEMENT, 3))
self.NODE_TET_COORDS = wp.constant(CoordTypeVec(tet_coords))
self.node_type_and_type_index = self._get_node_type_and_type_index()
self._node_tet_coordinates = self._get_node_tet_coordinates()
@property
def name(self) -> str:
return f"Tet_P{self.ORDER}"
def _get_node_tet_coordinates(self):
NODE_TET_COORDS = self.NODE_TET_COORDS
def node_tet_coordinates(
node_index_in_elt: int,
):
return wp.vec3i(
NODE_TET_COORDS[node_index_in_elt, 0],
NODE_TET_COORDS[node_index_in_elt, 1],
NODE_TET_COORDS[node_index_in_elt, 2],
)
return cache.get_func(node_tet_coordinates, self.name)
def _get_node_type_and_type_index(self):
ORDER = self.ORDER
NODES_PER_ELEMENT = self.NODES_PER_ELEMENT
def node_type_and_index(
node_index_in_elt: int,
):
if node_index_in_elt < 0 or node_index_in_elt >= NODES_PER_ELEMENT:
return TetrahedronPolynomialShapeFunctions.INVALID, TetrahedronPolynomialShapeFunctions.INVALID
if node_index_in_elt < 4:
return TetrahedronPolynomialShapeFunctions.VERTEX, node_index_in_elt
if node_index_in_elt < (6 * ORDER - 2):
return TetrahedronPolynomialShapeFunctions.EDGE, (node_index_in_elt - 4)
if node_index_in_elt < (2 * ORDER * ORDER + 2):
return TetrahedronPolynomialShapeFunctions.FACE, (node_index_in_elt - (6 * ORDER - 2))
return TetrahedronPolynomialShapeFunctions.INTERIOR, (node_index_in_elt - (2 * ORDER * ORDER + 2))
return cache.get_func(node_type_and_index, self.name)
def make_node_coords_in_element(self):
ORDER = self.ORDER
def node_coords_in_element(
node_index_in_elt: int,
):
tet_coords = self._node_tet_coordinates(node_index_in_elt)
cx = float(tet_coords[0]) / float(ORDER)
cy = float(tet_coords[1]) / float(ORDER)
cz = float(tet_coords[2]) / float(ORDER)
return Coords(cx, cy, cz)
return cache.get_func(node_coords_in_element, self.name)
def make_node_quadrature_weight(self):
if self.ORDER == 3:
# Order 1, but optimized quadrature weights for monomials of order <= 6
vertex_weight = 0.007348845656
edge_weight = 0.020688129855
face_weight = 0.180586764778
interior_weight = 0.0
else:
vertex_weight = 1.0 / self.NODES_PER_ELEMENT
edge_weight = 1.0 / self.NODES_PER_ELEMENT
face_weight = 1.0 / self.NODES_PER_ELEMENT
interior_weight = 1.0 / self.NODES_PER_ELEMENT
VERTEX_WEIGHT = wp.constant(vertex_weight)
EDGE_WEIGHT = wp.constant(edge_weight)
FACE_WEIGHT = wp.constant(face_weight)
INTERIOR_WEIGHT = wp.constant(interior_weight)
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(node_index_in_element: int):
node_type, type_index = self.node_type_and_type_index(node_index_in_element)
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
return VERTEX_WEIGHT
elif node_type == TetrahedronPolynomialShapeFunctions.EDGE:
return EDGE_WEIGHT
elif node_type == TetrahedronPolynomialShapeFunctions.FACE:
return FACE_WEIGHT
return INTERIOR_WEIGHT
return node_quadrature_weight
def make_trace_node_quadrature_weight(self):
if self.ORDER == 3:
# P3 intrisic quadrature
vertex_weight = 1.0 / 30
edge_weight = 0.075
interior_weight = 0.45
elif self.ORDER == 2:
# Order 1, but optimized quadrature weights for monomials of order <= 4
vertex_weight = 0.022335964126
edge_weight = 0.310997369207
interior_weight = 0.0
else:
vertex_weight = 1.0 / self.NODES_PER_SIDE
edge_weight = 1.0 / self.NODES_PER_SIDE
interior_weight = 1.0 / self.NODES_PER_SIDE
VERTEX_WEIGHT = wp.constant(vertex_weight)
EDGE_WEIGHT = wp.constant(edge_weight)
FACE_INTERIOR_WEIGHT = wp.constant(interior_weight)
@cache.dynamic_func(suffix=self.name)
def trace_node_quadrature_weight(node_index_in_element: int):
node_type, type_index = self.node_type_and_type_index(node_index_in_element)
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
return VERTEX_WEIGHT
elif node_type == TetrahedronPolynomialShapeFunctions.EDGE:
return EDGE_WEIGHT
return FACE_INTERIOR_WEIGHT
return trace_node_quadrature_weight
def make_element_inner_weight(self):
ORDER = self.ORDER
def element_inner_weight_linear(
coords: Coords,
node_index_in_elt: int,
):
if node_index_in_elt < 0 or node_index_in_elt >= 4:
return 0.0
tet_coords = wp.vec4(1.0 - coords[0] - coords[1] - coords[2], coords[0], coords[1], coords[2])
return tet_coords[node_index_in_elt]
def element_inner_weight_quadratic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
tet_coords = wp.vec4(1.0 - coords[0] - coords[1] - coords[2], coords[0], coords[1], coords[2])
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
# Vertex
return tet_coords[type_index] * (2.0 * tet_coords[type_index] - 1.0)
elif node_type == TetrahedronPolynomialShapeFunctions.EDGE:
# Edge
if type_index < 3:
c1 = type_index
c2 = (type_index + 1) % 3
else:
c1 = type_index - 3
c2 = 3
return 4.0 * tet_coords[c1] * tet_coords[c2]
return 0.0
def element_inner_weight_cubic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
tet_coords = wp.vec4(1.0 - coords[0] - coords[1] - coords[2], coords[0], coords[1], coords[2])
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
# Vertex
return (
0.5
* tet_coords[type_index]
* (3.0 * tet_coords[type_index] - 1.0)
* (3.0 * tet_coords[type_index] - 2.0)
)
elif node_type == TetrahedronPolynomialShapeFunctions.EDGE:
# Edge
edge = type_index // 2
edge_node = type_index - 2 * edge
if edge < 3:
c1 = (edge + edge_node) % 3
c2 = (edge + 1 - edge_node) % 3
elif edge_node == 0:
c1 = edge - 3
c2 = 3
else:
c1 = 3
c2 = edge - 3
return 4.5 * tet_coords[c1] * tet_coords[c2] * (3.0 * tet_coords[c1] - 1.0)
elif node_type == TetrahedronPolynomialShapeFunctions.FACE:
# Interior
c1 = type_index
c2 = (c1 + 1) % 4
c3 = (c1 + 2) % 4
return 27.0 * tet_coords[c1] * tet_coords[c2] * tet_coords[c3]
return 0.0
if ORDER == 1:
return cache.get_func(element_inner_weight_linear, self.name)
elif ORDER == 2:
return cache.get_func(element_inner_weight_quadratic, self.name)
elif ORDER == 3:
return cache.get_func(element_inner_weight_cubic, self.name)
return None
def make_element_inner_weight_gradient(self):
ORDER = self.ORDER
def element_inner_weight_gradient_linear(
coords: Coords,
node_index_in_elt: int,
):
if node_index_in_elt < 0 or node_index_in_elt >= 4:
return wp.vec3(0.0)
dw_dc = wp.vec4(0.0)
dw_dc[node_index_in_elt] = 1.0
dw_du = wp.vec3(dw_dc[1] - dw_dc[0], dw_dc[2] - dw_dc[0], dw_dc[3] - dw_dc[0])
return dw_du
def element_inner_weight_gradient_quadratic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
tet_coords = wp.vec4(1.0 - coords[0] - coords[1] - coords[2], coords[0], coords[1], coords[2])
dw_dc = wp.vec4(0.0)
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
# Vertex
dw_dc[type_index] = 4.0 * tet_coords[type_index] - 1.0
elif node_type == TetrahedronPolynomialShapeFunctions.EDGE:
# Edge
if type_index < 3:
c1 = type_index
c2 = (type_index + 1) % 3
else:
c1 = type_index - 3
c2 = 3
dw_dc[c1] = 4.0 * tet_coords[c2]
dw_dc[c2] = 4.0 * tet_coords[c1]
dw_du = wp.vec3(dw_dc[1] - dw_dc[0], dw_dc[2] - dw_dc[0], dw_dc[3] - dw_dc[0])
return dw_du
def element_inner_weight_gradient_cubic(
coords: Coords,
node_index_in_elt: int,
):
node_type, type_index = self.node_type_and_type_index(node_index_in_elt)
tet_coords = wp.vec4(1.0 - coords[0] - coords[1] - coords[2], coords[0], coords[1], coords[2])
dw_dc = wp.vec4(0.0)
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
# Vertex
dw_dc[type_index] = (
0.5 * 27.0 * tet_coords[type_index] * tet_coords[type_index] - 9.0 * tet_coords[type_index] + 1.0
)
elif node_type == TetrahedronPolynomialShapeFunctions.EDGE:
# Edge
edge = type_index // 2
edge_node = type_index - 2 * edge
if edge < 3:
c1 = (edge + edge_node) % 3
c2 = (edge + 1 - edge_node) % 3
elif edge_node == 0:
c1 = edge - 3
c2 = 3
else:
c1 = 3
c2 = edge - 3
dw_dc[c1] = 4.5 * tet_coords[c2] * (6.0 * tet_coords[c1] - 1.0)
dw_dc[c2] = 4.5 * tet_coords[c1] * (3.0 * tet_coords[c1] - 1.0)
elif node_type == TetrahedronPolynomialShapeFunctions.FACE:
# Interior
c1 = type_index
c2 = (c1 + 1) % 4
c3 = (c1 + 2) % 4
dw_dc[c1] = 27.0 * tet_coords[c2] * tet_coords[c3]
dw_dc[c2] = 27.0 * tet_coords[c3] * tet_coords[c1]
dw_dc[c3] = 27.0 * tet_coords[c1] * tet_coords[c2]
dw_du = wp.vec3(dw_dc[1] - dw_dc[0], dw_dc[2] - dw_dc[0], dw_dc[3] - dw_dc[0])
return dw_du
if ORDER == 1:
return cache.get_func(element_inner_weight_gradient_linear, self.name)
elif ORDER == 2:
return cache.get_func(element_inner_weight_gradient_quadratic, self.name)
elif ORDER == 3:
return cache.get_func(element_inner_weight_gradient_cubic, self.name)
return None
def element_node_tets(self):
if self.ORDER == 1:
element_tets = [[0, 1, 2, 3]]
if self.ORDER == 2:
element_tets = [
[0, 4, 6, 7],
[1, 5, 4, 8],
[2, 6, 5, 9],
[3, 7, 8, 9],
[4, 5, 6, 8],
[8, 7, 9, 6],
[6, 5, 9, 8],
[6, 8, 7, 4],
]
elif self.ORDER == 3:
raise NotImplementedError()
return np.array(element_tets)
class TetrahedronNonConformingPolynomialShapeFunctions:
def __init__(self, degree: int):
self._tet_shape = TetrahedronPolynomialShapeFunctions(degree=degree)
self.ORDER = self._tet_shape.ORDER
self.NODES_PER_ELEMENT = self._tet_shape.NODES_PER_ELEMENT
self.element_node_tets = self._tet_shape.element_node_tets
if self.ORDER == 1:
self._TET_SCALE = 0.4472135955 # so v at 0.5854101966249680 (order 2)
elif self.ORDER == 2:
self._TET_SCALE = 0.6123779296874996 # optimized for low intrinsic quadrature error of deg 4
elif self.ORDER == 3:
self._TET_SCALE = 0.7153564453124999 # optimized for low intrinsic quadrature error of deg 6
else:
self._TET_SCALE = 1.0
self._TET_SCALE = wp.constant(self._TET_SCALE)
self._TET_OFFSET = wp.constant((1.0 - self._TET_SCALE) * wp.vec3(0.25, 0.25, 0.25))
@property
def name(self) -> str:
return f"Tet_P{self.ORDER}d"
def make_node_coords_in_element(self):
node_coords_in_tet = self._tet_shape.make_node_coords_in_element()
TET_SCALE = self._TET_SCALE
TET_OFFSET = self._TET_OFFSET
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
tet_coords = node_coords_in_tet(node_index_in_elt)
return TET_SCALE * tet_coords + TET_OFFSET
return node_coords_in_element
def make_node_quadrature_weight(self):
# Intrinsic quadrature -- precomputed integral of node shape functions
# over element. Order euqla to self.ORDER
if self.ORDER == 2:
vertex_weight = 0.07499641
edge_weight = 0.11666908
face_interior_weight = 0.0
elif self.ORDER == 3:
vertex_weight = 0.03345134
edge_weight = 0.04521887
face_interior_weight = 0.08089206
else:
vertex_weight = 1.0 / self.NODES_PER_ELEMENT
edge_weight = 1.0 / self.NODES_PER_ELEMENT
face_interior_weight = 1.0 / self.NODES_PER_ELEMENT
VERTEX_WEIGHT = wp.constant(vertex_weight)
EDGE_WEIGHT = wp.constant(edge_weight)
FACE_INTERIOR_WEIGHT = wp.constant(face_interior_weight)
@cache.dynamic_func(suffix=self.name)
def node_quadrature_weight(node_index_in_element: int):
node_type, type_index = self._tet_shape.node_type_and_type_index(node_index_in_element)
if node_type == TetrahedronPolynomialShapeFunctions.VERTEX:
return VERTEX_WEIGHT
elif node_type == TetrahedronPolynomialShapeFunctions.EDGE:
return EDGE_WEIGHT
return FACE_INTERIOR_WEIGHT
return node_quadrature_weight
def make_trace_node_quadrature_weight(self):
# Non-conforming, zero measure on sides
@wp.func
def zero(node_index_in_elt: int):
return 0.0
return zero
def make_element_inner_weight(self):
tet_inner_weight = self._tet_shape.make_element_inner_weight()
TET_SCALE = self._TET_SCALE
TET_OFFSET = self._TET_OFFSET
@cache.dynamic_func(suffix=self.name)
def element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
tet_coords = (coords - TET_OFFSET) / TET_SCALE
return tet_inner_weight(tet_coords, node_index_in_elt)
return element_inner_weight
def make_element_inner_weight_gradient(self):
tet_inner_weight_gradient = self._tet_shape.make_element_inner_weight_gradient()
TET_SCALE = self._TET_SCALE
TET_OFFSET = self._TET_OFFSET
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
tet_coords = (coords - TET_OFFSET) / TET_SCALE
grad = tet_inner_weight_gradient(tet_coords, node_index_in_elt)
return grad / TET_SCALE
return element_inner_weight_gradient
| 20,400 | Python | 35.04417 | 117 | 0.531667 |
NVIDIA/warp/warp/fem/space/shape/shape_function.py | import warp as wp
from warp.fem import cache
from warp.fem.geometry import Element
from warp.fem.types import Coords
class ShapeFunction:
"""Interface class for defining scalar-valued shape functions over a single element"""
ORDER: int
"""Maximum degree of the polynomials used to define the shape function"""
NODES_PER_ELEMENT: int
"""Number of shape function nodes"""
@property
def name(self) -> str:
"""Unique name encoding all parameters defining the shape function"""
raise NotImplementedError()
def make_node_coords_in_element(self):
"""Creates a device function returning the coordinates of each node"""
raise NotImplementedError()
def make_node_quadrature_weight(self):
"""Creates a device function returning the weight of each node when use as a quadrature point over the element"""
raise NotImplementedError()
def make_trace_node_quadrature_weight(self):
"""Creates a device function returning the weight of each node when use as a quadrature point over the element boundary"""
raise NotImplementedError()
def make_element_inner_weight(self):
"""Creates a device function returning the value of the shape function associated to a given node at given coordinates"""
raise NotImplementedError()
def make_element_inner_weight_gradient(self):
"""Creates a device function returning the gradient of the shape function associated to a given node at given coordinates"""
raise NotImplementedError()
class ConstantShapeFunction:
"""Shape function that is constant over the element"""
def __init__(self, element: Element, space_dimension: int):
self._element = element
self._dimension = space_dimension
self.ORDER = wp.constant(0)
self.NODES_PER_ELEMENT = wp.constant(1)
coords, _ = element.instantiate_quadrature(order=0, family=None)
self.COORDS = wp.constant(coords[0])
@property
def name(self) -> str:
return f"{self._element.__class__.__name__}{self._dimension}"
def make_node_coords_in_element(self):
COORDS = self.COORDS
@cache.dynamic_func(suffix=self.name)
def node_coords_in_element(
node_index_in_elt: int,
):
return COORDS
return node_coords_in_element
@wp.func
def _node_quadrature_weight(
node_index_in_elt: int,
):
return 1.0
def make_node_quadrature_weight(self):
return ConstantShapeFunction._node_quadrature_weight
def make_trace_node_quadrature_weight(self):
return ConstantShapeFunction._node_quadrature_weight
@wp.func
def _element_inner_weight(
coords: Coords,
node_index_in_elt: int,
):
return 1.0
def make_element_inner_weight(self):
return ConstantShapeFunction._element_inner_weight
def make_element_inner_weight_gradient(self):
grad_type = wp.vec(length=self._dimension, dtype=float)
@cache.dynamic_func(suffix=self.name)
def element_inner_weight_gradient(
coords: Coords,
node_index_in_elt: int,
):
return grad_type(0.0)
return element_inner_weight_gradient
| 3,278 | Python | 30.834951 | 132 | 0.664124 |
NVIDIA/warp/warp/fem/geometry/geometry.py | from typing import Any
import warp as wp
from warp.fem.types import Coords, ElementIndex, Sample
from .element import Element
class Geometry:
"""
Interface class for discrete geometries
A geometry is composed of cells and sides. Sides may be boundary or interior (between cells).
"""
dimension: int = 0
def cell_count(self):
"""Number of cells in the geometry"""
raise NotImplementedError
def side_count(self):
"""Number of sides in the geometry"""
raise NotImplementedError
def boundary_side_count(self):
"""Number of boundary sides (sides with a single neighbour cell) in the geometry"""
raise NotImplementedError
def reference_cell(self) -> Element:
"""Prototypical element for a cell"""
raise NotImplementedError
def reference_side(self) -> Element:
"""Prototypical element for a side"""
raise NotImplementedError
@property
def name(self) -> str:
return self.__class__.__name__
def __str__(self) -> str:
return self.name
CellArg: wp.codegen.Struct
"""Structure containing arguments to be passed to device functions evaluating cell-related quantities"""
SideArg: wp.codegen.Struct
"""Structure containing arguments to be passed to device functions evaluating side-related quantities"""
SideIndexArg: wp.codegen.Struct
"""Structure containing arguments to be passed to device functions for indexing sides"""
@staticmethod
def cell_arg_value(self, device) -> "Geometry.CellArg":
"""Value of the arguments to be passed to cell-related device functions"""
raise NotImplementedError
@staticmethod
def cell_position(args: "Geometry.CellArg", s: "Sample"):
"""Device function returning the world position of a cell sample point"""
raise NotImplementedError
@staticmethod
def cell_deformation_gradient(args: "Geometry.CellArg", s: "Sample"):
"""Device function returning the transpose of the gradient of world position with respect to reference cell"""
raise NotImplementedError
@staticmethod
def cell_inverse_deformation_gradient(args: "Geometry.CellArg", cell_index: ElementIndex, coords: Coords):
"""Device function returning the matrix right-transforming a gradient w.r.t. cell space to a gradient w.r.t. world space
(i.e. the inverse deformation gradient)
"""
raise NotImplementedError
@staticmethod
def cell_lookup(args: "Geometry.CellArg", pos: Any):
"""Device function returning the cell sample point corresponding to a world position"""
raise NotImplementedError
@staticmethod
def cell_lookup(args: "Geometry.CellArg", pos: Any, guess: "Sample"):
"""Device function returning the cell sample point corresponding to a world position. Can use guess for faster lookup"""
raise NotImplementedError
@staticmethod
def cell_measure(args: "Geometry.CellArg", s: "Sample"):
"""Device function returning the measure determinant (e.g. volume, area) at a given point"""
raise NotImplementedError
@wp.func
def cell_measure_ratio(args: Any, s: Sample):
return 1.0
@staticmethod
def cell_normal(args: "Geometry.CellArg", s: "Sample"):
"""Device function returning the element normal at a sample point.
For elements with the same dimension as the embedding space, this will be zero."""
raise NotImplementedError
@staticmethod
def side_arg_value(self, device) -> "Geometry.SideArg":
"""Value of the arguments to be passed to side-related device functions"""
raise NotImplementedError
@staticmethod
def boundary_side_index(args: "Geometry.SideIndexArg", boundary_side_index: int):
"""Device function returning the side index corresponding to a boundary side"""
raise NotImplementedError
@staticmethod
def side_position(args: "Geometry.SideArg", s: "Sample"):
"""Device function returning the side position at a sample point"""
raise NotImplementedError
@staticmethod
def side_deformation_gradient(args: "Geometry.CellArg", s: "Sample"):
"""Device function returning the gradient of world position with respect to reference cell"""
raise NotImplementedError
@staticmethod
def side_inner_inverse_deformation_gradient(args: "Geometry.CellArg", side_index: ElementIndex, coords: Coords):
"""Device function returning the matrix right-transforming a gradient w.r.t. inner cell space to a gradient w.r.t. world space
(i.e. the inverse deformation gradient)
"""
raise NotImplementedError
@staticmethod
def side_outer_inverse_deformation_gradient(args: "Geometry.CellArg", side_index: ElementIndex, coords: Coords):
"""Device function returning the matrix right-transforming a gradient w.r.t. outer cell space to a gradient w.r.t. world space
(i.e. the inverse deformation gradient)
"""
raise NotImplementedError
@staticmethod
def side_measure(args: "Geometry.SideArg", s: "Sample"):
"""Device function returning the measure determinant (e.g. volume, area) at a given point"""
raise NotImplementedError
@staticmethod
def side_measure_ratio(args: "Geometry.SideArg", s: "Sample"):
"""Device function returning the ratio of the measure of a side to that of its neighbour cells"""
raise NotImplementedError
@staticmethod
def side_normal(args: "Geometry.SideArg", s: "Sample"):
"""Device function returning the element normal at a sample point"""
raise NotImplementedError
@staticmethod
def side_inner_cell_index(args: "Geometry.SideArg", side_index: ElementIndex):
"""Device function returning the inner cell index for a given side"""
raise NotImplementedError
@staticmethod
def side_outer_cell_index(args: "Geometry.SideArg", side_index: ElementIndex):
"""Device function returning the outer cell index for a given side"""
raise NotImplementedError
@staticmethod
def side_inner_cell_coords(args: "Geometry.SideArg", side_index: ElementIndex, side_coords: Coords):
"""Device function returning the coordinates of a point on a side in the inner cell"""
raise NotImplementedError
@staticmethod
def side_outer_cell_coords(args: "Geometry.SideArg", side_index: ElementIndex, side_coords: Coords):
"""Device function returning the coordinates of a point on a side in the outer cell"""
raise NotImplementedError
@staticmethod
def side_from_cell_coords(
args: "Geometry.SideArg",
side_index: ElementIndex,
element_index: ElementIndex,
element_coords: Coords,
):
"""Device function converting coordinates on a cell to coordinates on a side, or ``OUTSIDE``"""
raise NotImplementedError
@staticmethod
def side_to_cell_arg(side_arg: "Geometry.SideArg"):
"""Device function converting a side-related argument value to a cell-related argument value, for promoting trace samples to the full space"""
raise NotImplementedError
| 7,260 | Python | 38.248648 | 150 | 0.689532 |
NVIDIA/warp/warp/fem/geometry/quadmesh_2d.py | from typing import Optional
import warp as wp
from warp.fem.cache import (
TemporaryStore,
borrow_temporary,
borrow_temporary_like,
cached_arg_value,
)
from warp.fem.types import OUTSIDE, Coords, ElementIndex, Sample, make_free_sample
from .element import LinearEdge, Square
from .geometry import Geometry
# from .closest_point import project_on_tet_at_origin
@wp.struct
class Quadmesh2DCellArg:
quad_vertex_indices: wp.array2d(dtype=int)
positions: wp.array(dtype=wp.vec2)
# for neighbor cell lookup
vertex_quad_offsets: wp.array(dtype=int)
vertex_quad_indices: wp.array(dtype=int)
@wp.struct
class Quadmesh2DSideArg:
cell_arg: Quadmesh2DCellArg
edge_vertex_indices: wp.array(dtype=wp.vec2i)
edge_quad_indices: wp.array(dtype=wp.vec2i)
class Quadmesh2D(Geometry):
"""Two-dimensional quadrilateral mesh geometry"""
dimension = 2
def __init__(
self, quad_vertex_indices: wp.array, positions: wp.array, temporary_store: Optional[TemporaryStore] = None
):
"""
Constructs a two-dimensional quadrilateral mesh.
Args:
quad_vertex_indices: warp array of shape (num_tris, 4) containing vertex indices for each quad, in counter-clockwise order
positions: warp array of shape (num_vertices, 2) containing 2d position for each vertex
temporary_store: shared pool from which to allocate temporary arrays
"""
self.quad_vertex_indices = quad_vertex_indices
self.positions = positions
self._edge_vertex_indices: wp.array = None
self._edge_quad_indices: wp.array = None
self._vertex_quad_offsets: wp.array = None
self._vertex_quad_indices: wp.array = None
self._build_topology(temporary_store)
def cell_count(self):
return self.quad_vertex_indices.shape[0]
def vertex_count(self):
return self.positions.shape[0]
def side_count(self):
return self._edge_vertex_indices.shape[0]
def boundary_side_count(self):
return self._boundary_edge_indices.shape[0]
def reference_cell(self) -> Square:
return Square()
def reference_side(self) -> LinearEdge:
return LinearEdge()
@property
def edge_quad_indices(self) -> wp.array:
return self._edge_quad_indices
@property
def edge_vertex_indices(self) -> wp.array:
return self._edge_vertex_indices
CellArg = Quadmesh2DCellArg
SideArg = Quadmesh2DSideArg
@wp.struct
class SideIndexArg:
boundary_edge_indices: wp.array(dtype=int)
# Geometry device interface
@cached_arg_value
def cell_arg_value(self, device) -> CellArg:
args = self.CellArg()
args.quad_vertex_indices = self.quad_vertex_indices.to(device)
args.positions = self.positions.to(device)
args.vertex_quad_offsets = self._vertex_quad_offsets.to(device)
args.vertex_quad_indices = self._vertex_quad_indices.to(device)
return args
@wp.func
def cell_position(args: CellArg, s: Sample):
quad_idx = args.quad_vertex_indices[s.element_index]
w_p = s.element_coords
w_m = Coords(1.0) - s.element_coords
# 0 : m m
# 1 : p m
# 2 : p p
# 3 : m p
return (
w_m[0] * w_m[1] * args.positions[quad_idx[0]]
+ w_p[0] * w_m[1] * args.positions[quad_idx[1]]
+ w_p[0] * w_p[1] * args.positions[quad_idx[2]]
+ w_m[0] * w_p[1] * args.positions[quad_idx[3]]
)
@wp.func
def cell_deformation_gradient(cell_arg: CellArg, s: Sample):
"""Deformation gradient at `coords`"""
quad_idx = cell_arg.quad_vertex_indices[s.element_index]
w_p = s.element_coords
w_m = Coords(1.0) - s.element_coords
return (
wp.outer(cell_arg.positions[quad_idx[0]], wp.vec2(-w_m[1], -w_m[0]))
+ wp.outer(cell_arg.positions[quad_idx[1]], wp.vec2(w_m[1], -w_p[0]))
+ wp.outer(cell_arg.positions[quad_idx[2]], wp.vec2(w_p[1], w_p[0]))
+ wp.outer(cell_arg.positions[quad_idx[3]], wp.vec2(-w_p[1], w_m[0]))
)
@wp.func
def cell_inverse_deformation_gradient(cell_arg: CellArg, s: Sample):
return wp.inverse(Quadmesh2D.cell_deformation_gradient(cell_arg, s))
@wp.func
def cell_measure(args: CellArg, s: Sample):
return wp.abs(wp.determinant(Quadmesh2D.cell_deformation_gradient(args, s)))
@wp.func
def cell_normal(args: CellArg, s: Sample):
return wp.vec2(0.0)
@cached_arg_value
def side_index_arg_value(self, device) -> SideIndexArg:
args = self.SideIndexArg()
args.boundary_edge_indices = self._boundary_edge_indices.to(device)
return args
@wp.func
def boundary_side_index(args: SideIndexArg, boundary_side_index: int):
"""Boundary side to side index"""
return args.boundary_edge_indices[boundary_side_index]
@cached_arg_value
def side_arg_value(self, device) -> CellArg:
args = self.SideArg()
args.cell_arg = self.cell_arg_value(device)
args.edge_vertex_indices = self._edge_vertex_indices.to(device)
args.edge_quad_indices = self._edge_quad_indices.to(device)
return args
@wp.func
def side_position(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
return (1.0 - s.element_coords[0]) * args.cell_arg.positions[edge_idx[0]] + s.element_coords[
0
] * args.cell_arg.positions[edge_idx[1]]
@wp.func
def side_deformation_gradient(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
v0 = args.cell_arg.positions[edge_idx[0]]
v1 = args.cell_arg.positions[edge_idx[1]]
return v1 - v0
@wp.func
def side_inner_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Quadmesh2D.side_inner_cell_index(args, s.element_index)
cell_coords = Quadmesh2D.side_inner_cell_coords(args, s.element_index, s.element_coords)
return Quadmesh2D.cell_inverse_deformation_gradient(args.cell_arg, make_free_sample(cell_index, cell_coords))
@wp.func
def side_outer_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Quadmesh2D.side_outer_cell_index(args, s.element_index)
cell_coords = Quadmesh2D.side_outer_cell_coords(args, s.element_index, s.element_coords)
return Quadmesh2D.cell_inverse_deformation_gradient(args.cell_arg, make_free_sample(cell_index, cell_coords))
@wp.func
def side_measure(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
v0 = args.cell_arg.positions[edge_idx[0]]
v1 = args.cell_arg.positions[edge_idx[1]]
return wp.length(v1 - v0)
@wp.func
def side_measure_ratio(args: SideArg, s: Sample):
inner = Quadmesh2D.side_inner_cell_index(args, s.element_index)
outer = Quadmesh2D.side_outer_cell_index(args, s.element_index)
inner_coords = Quadmesh2D.side_inner_cell_coords(args, s.element_index, s.element_coords)
outer_coords = Quadmesh2D.side_outer_cell_coords(args, s.element_index, s.element_coords)
return Quadmesh2D.side_measure(args, s) / wp.min(
Quadmesh2D.cell_measure(args.cell_arg, make_free_sample(inner, inner_coords)),
Quadmesh2D.cell_measure(args.cell_arg, make_free_sample(outer, outer_coords)),
)
@wp.func
def side_normal(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
v0 = args.cell_arg.positions[edge_idx[0]]
v1 = args.cell_arg.positions[edge_idx[1]]
e = v1 - v0
return wp.normalize(wp.vec2(-e[1], e[0]))
@wp.func
def side_inner_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.edge_quad_indices[side_index][0]
@wp.func
def side_outer_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.edge_quad_indices[side_index][1]
@wp.func
def edge_to_quad_coords(args: SideArg, side_index: ElementIndex, quad_index: ElementIndex, side_coords: Coords):
edge_vidx = args.edge_vertex_indices[side_index]
quad_vidx = args.cell_arg.quad_vertex_indices[quad_index]
vs = edge_vidx[0]
ve = edge_vidx[1]
s = side_coords[0]
if vs == quad_vidx[0]:
return wp.select(ve == quad_vidx[1], Coords(0.0, s, 0.0), Coords(s, 0.0, 0.0))
elif vs == quad_vidx[1]:
return wp.select(ve == quad_vidx[2], Coords(1.0 - s, 0.0, 0.0), Coords(1.0, s, 0.0))
elif vs == quad_vidx[2]:
return wp.select(ve == quad_vidx[3], Coords(1.0, 1.0 - s, 0.0), Coords(1.0 - s, 1.0, 0.0))
return wp.select(ve == quad_vidx[0], Coords(s, 1.0, 0.0), Coords(0.0, 1.0 - s, 0.0))
@wp.func
def side_inner_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
inner_cell_index = Quadmesh2D.side_inner_cell_index(args, side_index)
return Quadmesh2D.edge_to_quad_coords(args, side_index, inner_cell_index, side_coords)
@wp.func
def side_outer_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
outer_cell_index = Quadmesh2D.side_outer_cell_index(args, side_index)
return Quadmesh2D.edge_to_quad_coords(args, side_index, outer_cell_index, side_coords)
@wp.func
def side_from_cell_coords(
args: SideArg,
side_index: ElementIndex,
quad_index: ElementIndex,
quad_coords: Coords,
):
edge_vidx = args.edge_vertex_indices[side_index]
quad_vidx = args.cell_arg.quad_vertex_indices[quad_index]
vs = edge_vidx[0]
ve = edge_vidx[1]
cx = quad_coords[0]
cy = quad_coords[1]
if vs == quad_vidx[0]:
oc = wp.select(ve == quad_vidx[1], cx, cy)
ec = wp.select(ve == quad_vidx[1], cy, cx)
elif vs == quad_vidx[1]:
oc = wp.select(ve == quad_vidx[2], cy, 1.0 - cx)
ec = wp.select(ve == quad_vidx[2], 1.0 - cx, cy)
elif vs == quad_vidx[2]:
oc = wp.select(ve == quad_vidx[3], 1.0 - cx, 1.0 - cy)
ec = wp.select(ve == quad_vidx[3], 1.0 - cy, 1.0 - cx)
else:
oc = wp.select(ve == quad_vidx[0], 1.0 - cy, cx)
ec = wp.select(ve == quad_vidx[0], cx, 1.0 - cy)
return wp.select(oc == 0.0, Coords(OUTSIDE), Coords(ec, 0.0, 0.0))
@wp.func
def side_to_cell_arg(side_arg: SideArg):
return side_arg.cell_arg
def _build_topology(self, temporary_store: TemporaryStore):
from warp.fem.utils import compress_node_indices, masked_indices
from warp.utils import array_scan
device = self.quad_vertex_indices.device
vertex_quad_offsets, vertex_quad_indices, _, __ = compress_node_indices(
self.vertex_count(), self.quad_vertex_indices, temporary_store=temporary_store
)
self._vertex_quad_offsets = vertex_quad_offsets.detach()
self._vertex_quad_indices = vertex_quad_indices.detach()
vertex_start_edge_count = borrow_temporary(temporary_store, dtype=int, device=device, shape=self.vertex_count())
vertex_start_edge_count.array.zero_()
vertex_start_edge_offsets = borrow_temporary_like(vertex_start_edge_count, temporary_store=temporary_store)
vertex_edge_ends = borrow_temporary(temporary_store, dtype=int, device=device, shape=(4 * self.cell_count()))
vertex_edge_quads = borrow_temporary(
temporary_store, dtype=int, device=device, shape=(4 * self.cell_count(), 2)
)
# Count face edges starting at each vertex
wp.launch(
kernel=Quadmesh2D._count_starting_edges_kernel,
device=device,
dim=self.cell_count(),
inputs=[self.quad_vertex_indices, vertex_start_edge_count.array],
)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_start_edge_offsets.array, inclusive=False)
# Count number of unique edges (deduplicate across faces)
vertex_unique_edge_count = vertex_start_edge_count
wp.launch(
kernel=Quadmesh2D._count_unique_starting_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_quad_offsets,
self._vertex_quad_indices,
self.quad_vertex_indices,
vertex_start_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
vertex_edge_quads.array,
],
)
vertex_unique_edge_offsets = borrow_temporary_like(vertex_start_edge_offsets, temporary_store=temporary_store)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_unique_edge_offsets.array, inclusive=False)
# Get back edge count to host
if device.is_cuda:
edge_count = borrow_temporary(temporary_store, shape=(1,), dtype=int, device="cpu", pinned=True)
# Last vertex will not own any edge, so its count will be zero; just fetching last prefix count is ok
wp.copy(
dest=edge_count.array, src=vertex_unique_edge_offsets.array, src_offset=self.vertex_count() - 1, count=1
)
wp.synchronize_stream(wp.get_stream(device))
edge_count = int(edge_count.array.numpy()[0])
else:
edge_count = int(vertex_unique_edge_offsets.array.numpy()[self.vertex_count() - 1])
self._edge_vertex_indices = wp.empty(shape=(edge_count,), dtype=wp.vec2i, device=device)
self._edge_quad_indices = wp.empty(shape=(edge_count,), dtype=wp.vec2i, device=device)
boundary_mask = borrow_temporary(temporary_store=temporary_store, shape=(edge_count,), dtype=int, device=device)
# Compress edge data
wp.launch(
kernel=Quadmesh2D._compress_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
vertex_start_edge_offsets.array,
vertex_unique_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
vertex_edge_quads.array,
self._edge_vertex_indices,
self._edge_quad_indices,
boundary_mask.array,
],
)
vertex_start_edge_offsets.release()
vertex_unique_edge_offsets.release()
vertex_unique_edge_count.release()
vertex_edge_ends.release()
vertex_edge_quads.release()
# Flip normals if necessary
wp.launch(
kernel=Quadmesh2D._flip_edge_normals,
device=device,
dim=self.side_count(),
inputs=[self._edge_vertex_indices, self._edge_quad_indices, self.quad_vertex_indices, self.positions],
)
boundary_edge_indices, _ = masked_indices(boundary_mask.array, temporary_store=temporary_store)
self._boundary_edge_indices = boundary_edge_indices.detach()
boundary_mask.release()
@wp.kernel
def _count_starting_edges_kernel(
quad_vertex_indices: wp.array2d(dtype=int), vertex_start_edge_count: wp.array(dtype=int)
):
t = wp.tid()
for k in range(4):
v0 = quad_vertex_indices[t, k]
v1 = quad_vertex_indices[t, (k + 1) % 4]
if v0 < v1:
wp.atomic_add(vertex_start_edge_count, v0, 1)
else:
wp.atomic_add(vertex_start_edge_count, v1, 1)
@wp.func
def _find(
needle: int,
values: wp.array(dtype=int),
beg: int,
end: int,
):
for i in range(beg, end):
if values[i] == needle:
return i
return -1
@wp.kernel
def _count_unique_starting_edges_kernel(
vertex_quad_offsets: wp.array(dtype=int),
vertex_quad_indices: wp.array(dtype=int),
quad_vertex_indices: wp.array2d(dtype=int),
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_start_edge_count: wp.array(dtype=int),
edge_ends: wp.array(dtype=int),
edge_quads: wp.array2d(dtype=int),
):
v = wp.tid()
edge_beg = vertex_start_edge_offsets[v]
quad_beg = vertex_quad_offsets[v]
quad_end = vertex_quad_offsets[v + 1]
edge_cur = edge_beg
for quad in range(quad_beg, quad_end):
q = vertex_quad_indices[quad]
for k in range(4):
v0 = quad_vertex_indices[q, k]
v1 = quad_vertex_indices[q, (k + 1) % 4]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
# Check if other_v has been seen
seen_idx = Quadmesh2D._find(other_v, edge_ends, edge_beg, edge_cur)
if seen_idx == -1:
edge_ends[edge_cur] = other_v
edge_quads[edge_cur, 0] = q
edge_quads[edge_cur, 1] = q
edge_cur += 1
else:
edge_quads[seen_idx, 1] = q
vertex_start_edge_count[v] = edge_cur - edge_beg
@wp.kernel
def _compress_edges_kernel(
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_count: wp.array(dtype=int),
uncompressed_edge_ends: wp.array(dtype=int),
uncompressed_edge_quads: wp.array2d(dtype=int),
edge_vertex_indices: wp.array(dtype=wp.vec2i),
edge_quad_indices: wp.array(dtype=wp.vec2i),
boundary_mask: wp.array(dtype=int),
):
v = wp.tid()
start_beg = vertex_start_edge_offsets[v]
unique_beg = vertex_unique_edge_offsets[v]
unique_count = vertex_unique_edge_count[v]
for e in range(unique_count):
src_index = start_beg + e
edge_index = unique_beg + e
edge_vertex_indices[edge_index] = wp.vec2i(v, uncompressed_edge_ends[src_index])
q0 = uncompressed_edge_quads[src_index, 0]
q1 = uncompressed_edge_quads[src_index, 1]
edge_quad_indices[edge_index] = wp.vec2i(q0, q1)
if q0 == q1:
boundary_mask[edge_index] = 1
else:
boundary_mask[edge_index] = 0
@wp.kernel
def _flip_edge_normals(
edge_vertex_indices: wp.array(dtype=wp.vec2i),
edge_quad_indices: wp.array(dtype=wp.vec2i),
quad_vertex_indices: wp.array2d(dtype=int),
positions: wp.array(dtype=wp.vec2),
):
e = wp.tid()
tri = edge_quad_indices[e][0]
quad_vidx = quad_vertex_indices[tri]
edge_vidx = edge_vertex_indices[e]
quad_centroid = (
positions[quad_vidx[0]] + positions[quad_vidx[1]] + positions[quad_vidx[2]] + positions[quad_vidx[3]]
) / 4.0
v0 = positions[edge_vidx[0]]
v1 = positions[edge_vidx[1]]
edge_center = 0.5 * (v1 + v0)
edge_vec = v1 - v0
edge_normal = wp.vec2(-edge_vec[1], edge_vec[0])
# if edge normal points toward first triangle centroid, flip indices
if wp.dot(quad_centroid - edge_center, edge_normal) > 0.0:
edge_vertex_indices[e] = wp.vec2i(edge_vidx[1], edge_vidx[0])
| 19,553 | Python | 35.686679 | 134 | 0.602823 |
NVIDIA/warp/warp/fem/geometry/closest_point.py | from typing import Any
import warp as wp
from warp.fem.types import Coords
@wp.func
def project_on_seg_at_origin(q: Any, seg: Any, len_sq: float):
s = wp.clamp(wp.dot(q, seg) / len_sq, 0.0, 1.0)
return wp.length_sq(q - s * seg), s
@wp.func
def project_on_tri_at_origin(q: Any, e1: Any, e2: Any):
e1e1 = wp.dot(e1, e1)
e1e2 = wp.dot(e1, e2)
e2e2 = wp.dot(e2, e2)
det = e1e1 * e2e2 - e1e2 * e1e2
if det > e1e1 * e2e2 * 1.0e-6:
e1p = wp.dot(e1, q)
e2p = wp.dot(e2, q)
s = (e2e2 * e1p - e1e2 * e2p) / det
t = (e1e1 * e2p - e1e2 * e1p) / det
if s >= 0.0 and t >= 0.0 and s + t <= 1.0:
# point inside triangle (distance can be non-zero in 3D case)
return wp.length_sq(q - s * e1 - t * e2), Coords(1.0 - s - t, s, t)
d1, s1 = project_on_seg_at_origin(q, e1, e1e1)
d2, s2 = project_on_seg_at_origin(q, e2, e2e2)
d12, s12 = project_on_seg_at_origin(q - e1, e2 - e1, wp.length_sq(e2 - e1))
if d1 <= d2:
if d1 <= d12:
return d1, Coords(1.0 - s1, s1, 0.0)
elif d2 <= d12:
return d2, Coords(1.0 - s2, 0.0, s2)
return d12, Coords(0.0, 1.0 - s12, s12)
@wp.func
def project_on_tet_at_origin(q: wp.vec3, e1: wp.vec3, e2: wp.vec3, e3: wp.vec3):
mat = wp.inverse(wp.mat33(e1, e2, e3))
coords = mat * q
if wp.min(coords) >= 0.0 and coords[0] + coords[1] + coords[2] <= 1.0:
return 0.0, coords
# Not inside tet, compare closest point on each tri
d12, s12 = project_on_tri_at_origin(q, e1, e2)
d23, s23 = project_on_tri_at_origin(q, e2, e3)
d31, s31 = project_on_tri_at_origin(q, e3, e1)
d123, s123 = project_on_tri_at_origin(q - e1, e2 - e1, e3 - e1)
dmin = wp.min(wp.vec4(d12, d23, d31, d123))
if dmin == d12:
return dmin, Coords(s12[1], s12[2], 0.0)
elif dmin == d23:
return dmin, Coords(0.0, s23[1], s23[2])
elif dmin == d31:
return dmin, Coords(s31[2], 0.0, s31[1])
else:
return dmin, s123
| 2,028 | Python | 27.985714 | 80 | 0.54783 |
NVIDIA/warp/warp/fem/geometry/partition.py | from typing import Any
import warp as wp
from warp.fem.cache import TemporaryStore, borrow_temporary, cached_arg_value
from warp.fem.types import NULL_ELEMENT_INDEX, ElementIndex
from warp.fem.utils import masked_indices
from .geometry import Geometry
wp.set_module_options({"enable_backward": False})
class GeometryPartition:
"""Base class for geometry partitions, i.e. subset of cells and sides"""
class CellArg:
pass
class SideArg:
pass
def __init__(self, geometry: Geometry):
self.geometry = geometry
def cell_count(self) -> int:
"""Number of cells that are 'owned' by this partition"""
raise NotImplementedError()
def side_count(self) -> int:
"""Number of sides that are 'owned' by this partition"""
raise NotImplementedError()
def boundary_side_count(self) -> int:
"""Number of geo-boundary sides that are 'owned' by this partition"""
raise NotImplementedError()
def frontier_side_count(self) -> int:
"""Number of sides with neighbors owned by this and another partition"""
raise NotImplementedError()
@property
def name(self) -> str:
return f"{self.geometry.name}_{self.__class__.__name__}"
def __str__(self) -> str:
return self.name
def cell_arg_value(self, device):
raise NotImplementedError()
def side_arg_value(self, device):
raise NotImplementedError()
@staticmethod
def cell_index(args: CellArg, partition_cell_index: int):
"""Index in the geometry of a partition cell"""
raise NotImplementedError()
@staticmethod
def partition_cell_index(args: CellArg, cell_index: int):
"""Index of a geometry cell in the partition (or ``NULL_ELEMENT_INDEX``)"""
raise NotImplementedError()
@staticmethod
def side_index(args: SideArg, partition_side_index: int):
"""Partition side to side index"""
raise NotImplementedError()
@staticmethod
def boundary_side_index(args: SideArg, boundary_side_index: int):
"""Boundary side to side index"""
raise NotImplementedError()
@staticmethod
def frontier_side_index(args: SideArg, frontier_side_index: int):
"""Frontier side to side index"""
raise NotImplementedError()
class WholeGeometryPartition(GeometryPartition):
"""Trivial (NOP) partition"""
def __init__(
self,
geometry: Geometry,
):
super().__init__(geometry)
self.SideArg = geometry.SideIndexArg
self.side_arg_value = geometry.side_index_arg_value
self.cell_index = WholeGeometryPartition._identity_element_index
self.partition_cell_index = WholeGeometryPartition._identity_element_index
self.side_index = WholeGeometryPartition._identity_element_index
self.boundary_side_index = geometry.boundary_side_index
self.frontier_side_index = WholeGeometryPartition._identity_element_index
def __eq__(self, other: GeometryPartition) -> bool:
# Ensures that two whole partition instances of the same geometry are considered equal
return isinstance(other, WholeGeometryPartition) and self.geometry == other.geometry
def cell_count(self) -> int:
return self.geometry.cell_count()
def side_count(self) -> int:
return self.geometry.side_count()
def boundary_side_count(self) -> int:
return self.geometry.boundary_side_count()
def frontier_side_count(self) -> int:
return 0
@wp.struct
class CellArg:
pass
def cell_arg_value(self, device):
arg = WholeGeometryPartition.CellArg()
return arg
@wp.func
def _identity_element_index(args: Any, idx: ElementIndex):
return idx
@property
def name(self) -> str:
return self.geometry.name
class CellBasedGeometryPartition(GeometryPartition):
"""Geometry partition based on a subset of cells. Interior, boundary and frontier sides are automatically categorized."""
def __init__(
self,
geometry: Geometry,
device=None,
):
super().__init__(geometry)
@wp.struct
class SideArg:
partition_side_indices: wp.array(dtype=int)
boundary_side_indices: wp.array(dtype=int)
frontier_side_indices: wp.array(dtype=int)
def side_count(self) -> int:
return self._partition_side_indices.array.shape[0]
def boundary_side_count(self) -> int:
return self._boundary_side_indices.array.shape[0]
def frontier_side_count(self) -> int:
return self._frontier_side_indices.array.shape[0]
@cached_arg_value
def side_arg_value(self, device):
arg = LinearGeometryPartition.SideArg()
arg.partition_side_indices = self._partition_side_indices.array.to(device)
arg.boundary_side_indices = self._boundary_side_indices.array.to(device)
arg.frontier_side_indices = self._frontier_side_indices.array.to(device)
return arg
@wp.func
def side_index(args: SideArg, partition_side_index: int):
"""partition side to side index"""
return args.partition_side_indices[partition_side_index]
@wp.func
def boundary_side_index(args: SideArg, boundary_side_index: int):
"""Boundary side to side index"""
return args.boundary_side_indices[boundary_side_index]
@wp.func
def frontier_side_index(args: SideArg, frontier_side_index: int):
"""Frontier side to side index"""
return args.frontier_side_indices[frontier_side_index]
def compute_side_indices_from_cells(
self, cell_arg_value: Any, cell_inclusion_test_func: wp.Function, device, temporary_store: TemporaryStore = None
):
from warp.fem import cache
cell_arg_type = next(iter(cell_inclusion_test_func.input_types.values()))
@cache.dynamic_kernel(suffix=f"{self.geometry.name}_{cell_inclusion_test_func.key}")
def count_sides(
geo_arg: self.geometry.SideArg,
cell_arg_value: cell_arg_type,
partition_side_mask: wp.array(dtype=int),
boundary_side_mask: wp.array(dtype=int),
frontier_side_mask: wp.array(dtype=int),
):
side_index = wp.tid()
inner_cell_index = self.geometry.side_inner_cell_index(geo_arg, side_index)
outer_cell_index = self.geometry.side_outer_cell_index(geo_arg, side_index)
inner_in = cell_inclusion_test_func(cell_arg_value, inner_cell_index)
outer_in = cell_inclusion_test_func(cell_arg_value, outer_cell_index)
if inner_in:
# Inner neighbor in partition; count as partition side
partition_side_mask[side_index] = 1
# Inner and outer element as the same -- this is a boundary side
if inner_cell_index == outer_cell_index:
boundary_side_mask[side_index] = 1
if inner_in != outer_in:
# Exactly one neighbor in partition; count as frontier side
frontier_side_mask[side_index] = 1
partition_side_mask = borrow_temporary(
temporary_store,
shape=(self.geometry.side_count(),),
dtype=int,
device=device,
)
boundary_side_mask = borrow_temporary(
temporary_store,
shape=(self.geometry.side_count(),),
dtype=int,
device=device,
)
frontier_side_mask = borrow_temporary(
temporary_store,
shape=(self.geometry.side_count(),),
dtype=int,
device=device,
)
partition_side_mask.array.zero_()
boundary_side_mask.array.zero_()
frontier_side_mask.array.zero_()
wp.launch(
dim=partition_side_mask.array.shape[0],
kernel=count_sides,
inputs=[
self.geometry.side_arg_value(device),
cell_arg_value,
partition_side_mask.array,
boundary_side_mask.array,
frontier_side_mask.array,
],
device=device,
)
# Convert counts to indices
self._partition_side_indices, _ = masked_indices(partition_side_mask.array, temporary_store=temporary_store)
self._boundary_side_indices, _ = masked_indices(boundary_side_mask.array, temporary_store=temporary_store)
self._frontier_side_indices, _ = masked_indices(frontier_side_mask.array, temporary_store=temporary_store)
partition_side_mask.release()
boundary_side_mask.release()
frontier_side_mask.release()
class LinearGeometryPartition(CellBasedGeometryPartition):
def __init__(
self,
geometry: Geometry,
partition_rank: int,
partition_count: int,
device=None,
temporary_store: TemporaryStore = None,
):
"""Creates a geometry partition by uniformly partionning cell indices
Args:
geometry: the geometry to partition
partition_rank: the index of the partition being created
partition_count: the number of partitions that will be created over the geometry
device: Warp device on which to perform and store computations
"""
super().__init__(geometry)
total_cell_count = geometry.cell_count()
cells_per_partition = (total_cell_count + partition_count - 1) // partition_count
self.cell_begin = cells_per_partition * partition_rank
self.cell_end = min(self.cell_begin + cells_per_partition, total_cell_count)
super().compute_side_indices_from_cells(
self.cell_arg_value(device),
LinearGeometryPartition._cell_inclusion_test,
device,
temporary_store=temporary_store,
)
def cell_count(self) -> int:
return self.cell_end - self.cell_begin
@wp.struct
class CellArg:
cell_begin: int
cell_end: int
def cell_arg_value(self, device):
arg = LinearGeometryPartition.CellArg()
arg.cell_begin = self.cell_begin
arg.cell_end = self.cell_end
return arg
@wp.func
def cell_index(args: CellArg, partition_cell_index: int):
"""Partition cell to cell index"""
return args.cell_begin + partition_cell_index
@wp.func
def partition_cell_index(args: CellArg, cell_index: int):
"""Partition cell to cell index"""
if cell_index > args.cell_end:
return NULL_ELEMENT_INDEX
partition_cell_index = cell_index - args.cell_begin
if partition_cell_index < 0:
return NULL_ELEMENT_INDEX
return partition_cell_index
@wp.func
def _cell_inclusion_test(arg: CellArg, cell_index: int):
return cell_index >= arg.cell_begin and cell_index < arg.cell_end
class ExplicitGeometryPartition(CellBasedGeometryPartition):
def __init__(self, geometry: Geometry, cell_mask: "wp.array(dtype=int)", temporary_store: TemporaryStore = None):
"""Creates a geometry partition by uniformly partionning cell indices
Args:
geometry: the geometry to partition
cell_mask: warp array of length ``geometry.cell_count()`` indicating which cells are selected. Array values must be either ``1`` (selected) or ``0`` (not selected).
"""
super().__init__(geometry)
self._cell_mask = cell_mask
self._cells, self._partition_cells = masked_indices(self._cell_mask, temporary_store=temporary_store)
super().compute_side_indices_from_cells(
self._cell_mask,
ExplicitGeometryPartition._cell_inclusion_test,
self._cell_mask.device,
temporary_store=temporary_store,
)
def cell_count(self) -> int:
return self._cells.array.shape[0]
@wp.struct
class CellArg:
cell_index: wp.array(dtype=int)
partition_cell_index: wp.array(dtype=int)
@cached_arg_value
def cell_arg_value(self, device):
arg = ExplicitGeometryPartition.CellArg()
arg.cell_index = self._cells.array.to(device)
arg.partition_cell_index = self._partition_cells.array.to(device)
return arg
@wp.func
def cell_index(args: CellArg, partition_cell_index: int):
return args.cell_index[partition_cell_index]
@wp.func
def partition_cell_index(args: CellArg, cell_index: int):
return args.partition_cell_index[cell_index]
@wp.func
def _cell_inclusion_test(mask: wp.array(dtype=int), cell_index: int):
return mask[cell_index] > 0
| 12,705 | Python | 32.882667 | 176 | 0.632271 |
NVIDIA/warp/warp/fem/geometry/grid_3d.py | from typing import Any, Optional
import warp as wp
from warp.fem.cache import cached_arg_value
from warp.fem.types import OUTSIDE, Coords, ElementIndex, Sample, make_free_sample
from .element import Cube, Square
from .geometry import Geometry
@wp.struct
class Grid3DCellArg:
res: wp.vec3i
cell_size: wp.vec3
origin: wp.vec3
_mat32 = wp.mat(shape=(3, 2), dtype=float)
class Grid3D(Geometry):
"""Three-dimensional regular grid geometry"""
dimension = 3
Permutation = wp.types.matrix(shape=(3, 3), dtype=int)
LOC_TO_WORLD = wp.constant(Permutation(0, 1, 2, 1, 2, 0, 2, 0, 1))
WORLD_TO_LOC = wp.constant(Permutation(0, 1, 2, 2, 0, 1, 1, 2, 0))
def __init__(self, res: wp.vec3i, bounds_lo: Optional[wp.vec3] = None, bounds_hi: Optional[wp.vec3] = None):
"""Constructs a dense 3D grid
Args:
res: Resolution of the grid along each dimension
bounds_lo: Position of the lower bound of the axis-aligned grid
bounds_up: Position of the upper bound of the axis-aligned grid
"""
if bounds_lo is None:
bounds_lo = wp.vec3(0.0)
if bounds_hi is None:
bounds_hi = wp.vec3(1.0)
self.bounds_lo = bounds_lo
self.bounds_hi = bounds_hi
self._res = res
@property
def extents(self) -> wp.vec3:
# Avoid using native sub due to higher over of calling builtins from Python
return wp.vec3(
self.bounds_hi[0] - self.bounds_lo[0],
self.bounds_hi[1] - self.bounds_lo[1],
self.bounds_hi[2] - self.bounds_lo[2],
)
@property
def cell_size(self) -> wp.vec3:
ex = self.extents
return wp.vec3(
ex[0] / self.res[0],
ex[1] / self.res[1],
ex[2] / self.res[2],
)
def cell_count(self):
return self.res[0] * self.res[1] * self.res[2]
def vertex_count(self):
return (self.res[0] + 1) * (self.res[1] + 1) * (self.res[2] + 1)
def side_count(self):
return (
(self.res[0] + 1) * (self.res[1]) * (self.res[2])
+ (self.res[0]) * (self.res[1] + 1) * (self.res[2])
+ (self.res[0]) * (self.res[1]) * (self.res[2] + 1)
)
def edge_count(self):
return (
(self.res[0] + 1) * (self.res[1] + 1) * (self.res[2])
+ (self.res[0]) * (self.res[1] + 1) * (self.res[2] + 1)
+ (self.res[0] + 1) * (self.res[1]) * (self.res[2] + 1)
)
def boundary_side_count(self):
return 2 * (self.res[1]) * (self.res[2]) + (self.res[0]) * 2 * (self.res[2]) + (self.res[0]) * (self.res[1]) * 2
def reference_cell(self) -> Cube:
return Cube()
def reference_side(self) -> Square:
return Square()
@property
def res(self):
return self._res
@property
def origin(self):
return self.bounds_lo
@property
def strides(self):
return wp.vec3i(self.res[1] * self.res[2], self.res[2], 1)
# Utility device functions
CellArg = Grid3DCellArg
Cell = wp.vec3i
@wp.func
def _to_3d_index(strides: wp.vec2i, index: int):
x = index // strides[0]
y = (index - strides[0] * x) // strides[1]
z = index - strides[0] * x - strides[1] * y
return wp.vec3i(x, y, z)
@wp.func
def _from_3d_index(strides: wp.vec2i, index: wp.vec3i):
return strides[0] * index[0] + strides[1] * index[1] + index[2]
@wp.func
def cell_index(res: wp.vec3i, cell: Cell):
strides = wp.vec2i(res[1] * res[2], res[2])
return Grid3D._from_3d_index(strides, cell)
@wp.func
def get_cell(res: wp.vec3i, cell_index: ElementIndex):
strides = wp.vec2i(res[1] * res[2], res[2])
return Grid3D._to_3d_index(strides, cell_index)
@wp.struct
class Side:
axis: int # normal
origin: wp.vec3i # index of vertex at corner (0,0,0)
@wp.struct
class SideArg:
cell_count: int
axis_offsets: wp.vec3i
cell_arg: Grid3DCellArg
SideIndexArg = SideArg
@wp.func
def _world_to_local(axis: int, vec: Any):
return type(vec)(
vec[Grid3D.LOC_TO_WORLD[axis, 0]],
vec[Grid3D.LOC_TO_WORLD[axis, 1]],
vec[Grid3D.LOC_TO_WORLD[axis, 2]],
)
@wp.func
def _local_to_world(axis: int, vec: Any):
return type(vec)(
vec[Grid3D.WORLD_TO_LOC[axis, 0]],
vec[Grid3D.WORLD_TO_LOC[axis, 1]],
vec[Grid3D.WORLD_TO_LOC[axis, 2]],
)
@wp.func
def side_index(arg: SideArg, side: Side):
alt_axis = Grid3D.LOC_TO_WORLD[side.axis, 0]
if side.origin[0] == arg.cell_arg.res[alt_axis]:
# Upper-boundary side
longitude = side.origin[1]
latitude = side.origin[2]
latitude_res = arg.cell_arg.res[Grid3D.LOC_TO_WORLD[side.axis, 2]]
lat_long = latitude_res * longitude + latitude
return 3 * arg.cell_count + arg.axis_offsets[side.axis] + lat_long
cell_index = Grid3D.cell_index(arg.cell_arg.res, Grid3D._local_to_world(side.axis, side.origin))
return side.axis * arg.cell_count + cell_index
@wp.func
def get_side(arg: SideArg, side_index: ElementIndex):
if side_index < 3 * arg.cell_count:
axis = side_index // arg.cell_count
cell_index = side_index - axis * arg.cell_count
origin = Grid3D._world_to_local(axis, Grid3D.get_cell(arg.cell_arg.res, cell_index))
return Grid3D.Side(axis, origin)
axis_side_index = side_index - 3 * arg.cell_count
if axis_side_index < arg.axis_offsets[1]:
axis = 0
elif axis_side_index < arg.axis_offsets[2]:
axis = 1
else:
axis = 2
altitude = arg.cell_arg.res[Grid3D.LOC_TO_WORLD[axis, 0]]
lat_long = axis_side_index - arg.axis_offsets[axis]
latitude_res = arg.cell_arg.res[Grid3D.LOC_TO_WORLD[axis, 2]]
longitude = lat_long // latitude_res
latitude = lat_long - longitude * latitude_res
origin_loc = wp.vec3i(altitude, longitude, latitude)
return Grid3D.Side(axis, origin_loc)
# Geometry device interface
@cached_arg_value
def cell_arg_value(self, device) -> CellArg:
args = self.CellArg()
args.res = self.res
args.origin = self.bounds_lo
args.cell_size = self.cell_size
return args
@wp.func
def cell_position(args: CellArg, s: Sample):
cell = Grid3D.get_cell(args.res, s.element_index)
return (
wp.vec3(
(float(cell[0]) + s.element_coords[0]) * args.cell_size[0],
(float(cell[1]) + s.element_coords[1]) * args.cell_size[1],
(float(cell[2]) + s.element_coords[2]) * args.cell_size[2],
)
+ args.origin
)
@wp.func
def cell_deformation_gradient(args: CellArg, s: Sample):
return wp.diag(args.cell_size)
@wp.func
def cell_inverse_deformation_gradient(args: CellArg, s: Sample):
return wp.diag(wp.cw_div(wp.vec3(1.0), args.cell_size))
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec3):
loc_pos = wp.cw_div(pos - args.origin, args.cell_size)
x = wp.clamp(loc_pos[0], 0.0, float(args.res[0]))
y = wp.clamp(loc_pos[1], 0.0, float(args.res[1]))
z = wp.clamp(loc_pos[2], 0.0, float(args.res[2]))
x_cell = wp.min(wp.floor(x), float(args.res[0]) - 1.0)
y_cell = wp.min(wp.floor(y), float(args.res[1]) - 1.0)
z_cell = wp.min(wp.floor(z), float(args.res[2]) - 1.0)
coords = Coords(x - x_cell, y - y_cell, z - z_cell)
cell_index = Grid3D.cell_index(args.res, Grid3D.Cell(int(x_cell), int(y_cell), int(z_cell)))
return make_free_sample(cell_index, coords)
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec3, guess: Sample):
return Grid3D.cell_lookup(args, pos)
@wp.func
def cell_measure(args: CellArg, s: Sample):
return args.cell_size[0] * args.cell_size[1] * args.cell_size[2]
@wp.func
def cell_normal(args: CellArg, s: Sample):
return wp.vec3(0.0)
@cached_arg_value
def side_arg_value(self, device) -> SideArg:
args = self.SideArg()
axis_dims = wp.vec3i(
self.res[1] * self.res[2],
self.res[2] * self.res[0],
self.res[0] * self.res[1],
)
args.axis_offsets = wp.vec3i(
0,
axis_dims[0],
axis_dims[0] + axis_dims[1],
)
args.cell_count = self.cell_count()
args.cell_arg = self.cell_arg_value(device)
return args
def side_index_arg_value(self, device) -> SideIndexArg:
return self.side_arg_value(device)
@wp.func
def boundary_side_index(args: SideArg, boundary_side_index: int):
"""Boundary side to side index"""
axis_side_index = boundary_side_index // 2
border = boundary_side_index - 2 * axis_side_index
if axis_side_index < args.axis_offsets[1]:
axis = 0
elif axis_side_index < args.axis_offsets[2]:
axis = 1
else:
axis = 2
lat_long = axis_side_index - args.axis_offsets[axis]
latitude_res = args.cell_arg.res[Grid3D.LOC_TO_WORLD[axis, 2]]
longitude = lat_long // latitude_res
latitude = lat_long - longitude * latitude_res
altitude = border * args.cell_arg.res[axis]
side = Grid3D.Side(axis, wp.vec3i(altitude, longitude, latitude))
return Grid3D.side_index(args, side)
@wp.func
def side_position(args: SideArg, s: Sample):
side = Grid3D.get_side(args, s.element_index)
coord0 = wp.select(side.origin[0] == 0, s.element_coords[0], 1.0 - s.element_coords[0])
local_pos = wp.vec3(
float(side.origin[0]),
float(side.origin[1]) + coord0,
float(side.origin[2]) + s.element_coords[1],
)
pos = args.cell_arg.origin + wp.cw_mul(Grid3D._local_to_world(side.axis, local_pos), args.cell_arg.cell_size)
return pos
@wp.func
def side_deformation_gradient(args: SideArg, s: Sample):
side = Grid3D.get_side(args, s.element_index)
sign = wp.select(side.origin[0] == 0, 1.0, -1.0)
return _mat32(
wp.cw_mul(Grid3D._local_to_world(side.axis, wp.vec3(0.0, sign, 0.0)), args.cell_arg.cell_size),
wp.cw_mul(Grid3D._local_to_world(side.axis, wp.vec3(0.0, 0.0, 1.0)), args.cell_arg.cell_size),
)
@wp.func
def side_inner_inverse_deformation_gradient(args: SideArg, s: Sample):
return Grid3D.cell_inverse_deformation_gradient(args.cell_arg, s)
@wp.func
def side_outer_inverse_deformation_gradient(args: SideArg, s: Sample):
return Grid3D.cell_inverse_deformation_gradient(args.cell_arg, s)
@wp.func
def side_measure(args: SideArg, s: Sample):
side = Grid3D.get_side(args, s.element_index)
long_axis = Grid3D.LOC_TO_WORLD[side.axis, 1]
lat_axis = Grid3D.LOC_TO_WORLD[side.axis, 2]
return args.cell_arg.cell_size[long_axis] * args.cell_arg.cell_size[lat_axis]
@wp.func
def side_measure_ratio(args: SideArg, s: Sample):
side = Grid3D.get_side(args, s.element_index)
alt_axis = Grid3D.LOC_TO_WORLD[side.axis, 0]
return 1.0 / args.cell_arg.cell_size[alt_axis]
@wp.func
def side_normal(args: SideArg, s: Sample):
side = Grid3D.get_side(args, s.element_index)
sign = wp.select(side.origin[0] == 0, 1.0, -1.0)
local_n = wp.vec3(sign, 0.0, 0.0)
return Grid3D._local_to_world(side.axis, local_n)
@wp.func
def side_inner_cell_index(arg: SideArg, side_index: ElementIndex):
side = Grid3D.get_side(arg, side_index)
inner_alt = wp.select(side.origin[0] == 0, side.origin[0] - 1, 0)
inner_origin = wp.vec3i(inner_alt, side.origin[1], side.origin[2])
cell = Grid3D._local_to_world(side.axis, inner_origin)
return Grid3D.cell_index(arg.cell_arg.res, cell)
@wp.func
def side_outer_cell_index(arg: SideArg, side_index: ElementIndex):
side = Grid3D.get_side(arg, side_index)
alt_axis = Grid3D.LOC_TO_WORLD[side.axis, 0]
outer_alt = wp.select(
side.origin[0] == arg.cell_arg.res[alt_axis], side.origin[0], arg.cell_arg.res[alt_axis] - 1
)
outer_origin = wp.vec3i(outer_alt, side.origin[1], side.origin[2])
cell = Grid3D._local_to_world(side.axis, outer_origin)
return Grid3D.cell_index(arg.cell_arg.res, cell)
@wp.func
def side_inner_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
side = Grid3D.get_side(args, side_index)
inner_alt = wp.select(side.origin[0] == 0, 1.0, 0.0)
side_coord0 = wp.select(side.origin[0] == 0, side_coords[0], 1.0 - side_coords[0])
return Grid3D._local_to_world(side.axis, wp.vec3(inner_alt, side_coord0, side_coords[1]))
@wp.func
def side_outer_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
side = Grid3D.get_side(args, side_index)
alt_axis = Grid3D.LOC_TO_WORLD[side.axis, 0]
outer_alt = wp.select(side.origin[0] == args.cell_arg.res[alt_axis], 0.0, 1.0)
side_coord0 = wp.select(side.origin[0] == 0, side_coords[0], 1.0 - side_coords[0])
return Grid3D._local_to_world(side.axis, wp.vec3(outer_alt, side_coord0, side_coords[1]))
@wp.func
def side_from_cell_coords(
args: SideArg,
side_index: ElementIndex,
element_index: ElementIndex,
element_coords: Coords,
):
side = Grid3D.get_side(args, side_index)
cell = Grid3D.get_cell(args.cell_arg.res, element_index)
if float(side.origin[0] - cell[side.axis]) == element_coords[side.axis]:
long_axis = Grid3D.LOC_TO_WORLD[side.axis, 1]
lat_axis = Grid3D.LOC_TO_WORLD[side.axis, 2]
long_coord = element_coords[long_axis]
long_coord = wp.select(side.origin[0] == 0, long_coord, 1.0 - long_coord)
return Coords(long_coord, element_coords[lat_axis], 0.0)
return Coords(OUTSIDE)
@wp.func
def side_to_cell_arg(side_arg: SideArg):
return side_arg.cell_arg
| 14,466 | Python | 32.02968 | 120 | 0.579358 |
NVIDIA/warp/warp/fem/geometry/tetmesh.py | from typing import Optional
import warp as wp
from warp.fem.cache import (
TemporaryStore,
borrow_temporary,
borrow_temporary_like,
cached_arg_value,
)
from warp.fem.types import (
NULL_ELEMENT_INDEX,
OUTSIDE,
Coords,
ElementIndex,
Sample,
make_free_sample,
)
from .closest_point import project_on_tet_at_origin
from .element import Tetrahedron, Triangle
from .geometry import Geometry
@wp.struct
class TetmeshCellArg:
tet_vertex_indices: wp.array2d(dtype=int)
positions: wp.array(dtype=wp.vec3)
# for neighbor cell lookup
vertex_tet_offsets: wp.array(dtype=int)
vertex_tet_indices: wp.array(dtype=int)
# for transforming reference gradient
deformation_gradients: wp.array(dtype=wp.mat33f)
@wp.struct
class TetmeshSideArg:
cell_arg: TetmeshCellArg
face_vertex_indices: wp.array(dtype=wp.vec3i)
face_tet_indices: wp.array(dtype=wp.vec2i)
_mat32 = wp.mat(shape=(3, 2), dtype=float)
class Tetmesh(Geometry):
"""Tetrahedral mesh geometry"""
dimension = 3
def __init__(
self, tet_vertex_indices: wp.array, positions: wp.array, temporary_store: Optional[TemporaryStore] = None
):
"""
Constructs a tetrahedral mesh.
Args:
tet_vertex_indices: warp array of shape (num_tets, 4) containing vertex indices for each tet
positions: warp array of shape (num_vertices, 3) containing 3d position for each vertex
temporary_store: shared pool from which to allocate temporary arrays
"""
self.tet_vertex_indices = tet_vertex_indices
self.positions = positions
self._face_vertex_indices: wp.array = None
self._face_tet_indices: wp.array = None
self._vertex_tet_offsets: wp.array = None
self._vertex_tet_indices: wp.array = None
self._tet_edge_indices: wp.array = None
self._edge_count = 0
self._build_topology(temporary_store)
self._deformation_gradients: wp.array = None
self._compute_deformation_gradients()
def cell_count(self):
return self.tet_vertex_indices.shape[0]
def vertex_count(self):
return self.positions.shape[0]
def side_count(self):
return self._face_vertex_indices.shape[0]
def edge_count(self):
if self._tet_edge_indices is None:
self._compute_tet_edges()
return self._edge_count
def boundary_side_count(self):
return self._boundary_face_indices.shape[0]
def reference_cell(self) -> Tetrahedron:
return Tetrahedron()
def reference_side(self) -> Triangle:
return Triangle()
@property
def tet_edge_indices(self) -> wp.array:
if self._tet_edge_indices is None:
self._compute_tet_edges()
return self._tet_edge_indices
@property
def face_tet_indices(self) -> wp.array:
return self._face_tet_indices
@property
def face_vertex_indices(self) -> wp.array:
return self._face_vertex_indices
CellArg = TetmeshCellArg
SideArg = TetmeshSideArg
@wp.struct
class SideIndexArg:
boundary_face_indices: wp.array(dtype=int)
# Geometry device interface
@cached_arg_value
def cell_arg_value(self, device) -> CellArg:
args = self.CellArg()
args.tet_vertex_indices = self.tet_vertex_indices.to(device)
args.positions = self.positions.to(device)
args.vertex_tet_offsets = self._vertex_tet_offsets.to(device)
args.vertex_tet_indices = self._vertex_tet_indices.to(device)
args.deformation_gradients = self._deformation_gradients.to(device)
return args
@wp.func
def cell_position(args: CellArg, s: Sample):
tet_idx = args.tet_vertex_indices[s.element_index]
w0 = 1.0 - s.element_coords[0] - s.element_coords[1] - s.element_coords[2]
return (
w0 * args.positions[tet_idx[0]]
+ s.element_coords[0] * args.positions[tet_idx[1]]
+ s.element_coords[1] * args.positions[tet_idx[2]]
+ s.element_coords[2] * args.positions[tet_idx[3]]
)
@wp.func
def cell_deformation_gradient(args: CellArg, s: Sample):
return args.deformation_gradients[s.element_index]
@wp.func
def cell_inverse_deformation_gradient(args: CellArg, s: Sample):
return wp.inverse(args.deformation_gradients[s.element_index])
@wp.func
def _project_on_tet(args: CellArg, pos: wp.vec3, tet_index: int):
p0 = args.positions[args.tet_vertex_indices[tet_index, 0]]
q = pos - p0
e1 = args.positions[args.tet_vertex_indices[tet_index, 1]] - p0
e2 = args.positions[args.tet_vertex_indices[tet_index, 2]] - p0
e3 = args.positions[args.tet_vertex_indices[tet_index, 3]] - p0
dist, coords = project_on_tet_at_origin(q, e1, e2, e3)
return dist, coords
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec3, guess: Sample):
closest_tet = int(NULL_ELEMENT_INDEX)
closest_coords = Coords(OUTSIDE)
closest_dist = float(1.0e8)
for v in range(4):
vtx = args.tet_vertex_indices[guess.element_index, v]
tet_beg = args.vertex_tet_offsets[vtx]
tet_end = args.vertex_tet_offsets[vtx + 1]
for t in range(tet_beg, tet_end):
tet = args.vertex_tet_indices[t]
dist, coords = Tetmesh._project_on_tet(args, pos, tet)
if dist <= closest_dist:
closest_dist = dist
closest_tet = tet
closest_coords = coords
return make_free_sample(closest_tet, closest_coords)
@wp.func
def cell_measure(args: CellArg, s: Sample):
return wp.abs(wp.determinant(args.deformation_gradients[s.element_index])) / 6.0
@wp.func
def cell_measure_ratio(args: CellArg, s: Sample):
return 1.0
@wp.func
def cell_normal(args: CellArg, s: Sample):
return wp.vec3(0.0)
@cached_arg_value
def side_index_arg_value(self, device) -> SideIndexArg:
args = self.SideIndexArg()
args.boundary_face_indices = self._boundary_face_indices.to(device)
return args
@wp.func
def boundary_side_index(args: SideIndexArg, boundary_side_index: int):
"""Boundary side to side index"""
return args.boundary_face_indices[boundary_side_index]
@cached_arg_value
def side_arg_value(self, device) -> CellArg:
args = self.SideArg()
args.cell_arg = self.cell_arg_value(device)
args.face_vertex_indices = self._face_vertex_indices.to(device)
args.face_tet_indices = self._face_tet_indices.to(device)
return args
@wp.func
def side_position(args: SideArg, s: Sample):
face_idx = args.face_vertex_indices[s.element_index]
return (
s.element_coords[0] * args.cell_arg.positions[face_idx[0]]
+ s.element_coords[1] * args.cell_arg.positions[face_idx[1]]
+ s.element_coords[2] * args.cell_arg.positions[face_idx[2]]
)
@wp.func
def _side_vecs(args: SideArg, side_index: ElementIndex):
face_idx = args.face_vertex_indices[side_index]
v0 = args.cell_arg.positions[face_idx[0]]
v1 = args.cell_arg.positions[face_idx[1]]
v2 = args.cell_arg.positions[face_idx[2]]
return v1 - v0, v2 - v0
@wp.func
def side_deformation_gradient(args: SideArg, s: Sample):
e1, e2 = Tetmesh._side_vecs(args, s.element_index)
return _mat32(e1, e2)
@wp.func
def side_inner_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Tetmesh.side_inner_cell_index(args, s.element_index)
return wp.inverse(args.cell_arg.deformation_gradients[cell_index])
@wp.func
def side_outer_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Tetmesh.side_outer_cell_index(args, s.element_index)
return wp.inverse(args.cell_arg.deformation_gradients[cell_index])
@wp.func
def side_measure(args: SideArg, s: Sample):
e1, e2 = Tetmesh._side_vecs(args, s.element_index)
return 0.5 * wp.length(wp.cross(e1, e2))
@wp.func
def side_measure_ratio(args: SideArg, s: Sample):
inner = Tetmesh.side_inner_cell_index(args, s.element_index)
outer = Tetmesh.side_outer_cell_index(args, s.element_index)
return Tetmesh.side_measure(args, s) / wp.min(
Tetmesh.cell_measure(args.cell_arg, make_free_sample(inner, Coords())),
Tetmesh.cell_measure(args.cell_arg, make_free_sample(outer, Coords())),
)
@wp.func
def side_normal(args: SideArg, s: Sample):
e1, e2 = Tetmesh._side_vecs(args, s.element_index)
return wp.normalize(wp.cross(e1, e2))
@wp.func
def side_inner_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.face_tet_indices[side_index][0]
@wp.func
def side_outer_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.face_tet_indices[side_index][1]
@wp.func
def face_to_tet_coords(args: SideArg, side_index: ElementIndex, tet_index: ElementIndex, side_coords: Coords):
fvi = args.face_vertex_indices[side_index]
tv1 = args.cell_arg.tet_vertex_indices[tet_index, 1]
tv2 = args.cell_arg.tet_vertex_indices[tet_index, 2]
tv3 = args.cell_arg.tet_vertex_indices[tet_index, 3]
c1 = float(0.0)
c2 = float(0.0)
c3 = float(0.0)
for k in range(3):
if tv1 == fvi[k]:
c1 = side_coords[k]
elif tv2 == fvi[k]:
c2 = side_coords[k]
elif tv3 == fvi[k]:
c3 = side_coords[k]
return Coords(c1, c2, c3)
@wp.func
def side_inner_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
inner_cell_index = Tetmesh.side_inner_cell_index(args, side_index)
return Tetmesh.face_to_tet_coords(args, side_index, inner_cell_index, side_coords)
@wp.func
def side_outer_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
outer_cell_index = Tetmesh.side_outer_cell_index(args, side_index)
return Tetmesh.face_to_tet_coords(args, side_index, outer_cell_index, side_coords)
@wp.func
def side_from_cell_coords(args: SideArg, side_index: ElementIndex, tet_index: ElementIndex, tet_coords: Coords):
fvi = args.face_vertex_indices[side_index]
tv1 = args.cell_arg.tet_vertex_indices[tet_index, 1]
tv2 = args.cell_arg.tet_vertex_indices[tet_index, 2]
tv3 = args.cell_arg.tet_vertex_indices[tet_index, 3]
if tv1 == fvi[0]:
c0 = tet_coords[0]
elif tv2 == fvi[0]:
c0 = tet_coords[1]
elif tv3 == fvi[0]:
c0 = tet_coords[2]
else:
c0 = 1.0 - tet_coords[0] - tet_coords[1] - tet_coords[2]
if tv1 == fvi[1]:
c1 = tet_coords[0]
elif tv2 == fvi[1]:
c1 = tet_coords[1]
elif tv3 == fvi[1]:
c1 = tet_coords[2]
else:
c1 = 1.0 - tet_coords[0] - tet_coords[1] - tet_coords[2]
if tv1 == fvi[2]:
c2 = tet_coords[0]
elif tv2 == fvi[2]:
c2 = tet_coords[1]
elif tv3 == fvi[2]:
c2 = tet_coords[2]
else:
c2 = 1.0 - tet_coords[0] - tet_coords[1] - tet_coords[2]
return wp.select(c0 + c1 + c2 > 0.999, Coords(OUTSIDE), Coords(c0, c1, c2))
@wp.func
def side_to_cell_arg(side_arg: SideArg):
return side_arg.cell_arg
def _build_topology(self, temporary_store: TemporaryStore):
from warp.fem.utils import compress_node_indices, masked_indices
from warp.utils import array_scan
device = self.tet_vertex_indices.device
vertex_tet_offsets, vertex_tet_indices, _, __ = compress_node_indices(
self.vertex_count(), self.tet_vertex_indices, temporary_store=temporary_store
)
self._vertex_tet_offsets = vertex_tet_offsets.detach()
self._vertex_tet_indices = vertex_tet_indices.detach()
vertex_start_face_count = borrow_temporary(temporary_store, dtype=int, device=device, shape=self.vertex_count())
vertex_start_face_count.array.zero_()
vertex_start_face_offsets = borrow_temporary_like(vertex_start_face_count, temporary_store=temporary_store)
vertex_face_other_vs = borrow_temporary(
temporary_store, dtype=wp.vec2i, device=device, shape=(4 * self.cell_count())
)
vertex_face_tets = borrow_temporary(temporary_store, dtype=int, device=device, shape=(4 * self.cell_count(), 2))
# Count face edges starting at each vertex
wp.launch(
kernel=Tetmesh._count_starting_faces_kernel,
device=device,
dim=self.cell_count(),
inputs=[self.tet_vertex_indices, vertex_start_face_count.array],
)
array_scan(in_array=vertex_start_face_count.array, out_array=vertex_start_face_offsets.array, inclusive=False)
# Count number of unique edges (deduplicate across faces)
vertex_unique_face_count = vertex_start_face_count
wp.launch(
kernel=Tetmesh._count_unique_starting_faces_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_tet_offsets,
self._vertex_tet_indices,
self.tet_vertex_indices,
vertex_start_face_offsets.array,
vertex_unique_face_count.array,
vertex_face_other_vs.array,
vertex_face_tets.array,
],
)
vertex_unique_face_offsets = borrow_temporary_like(vertex_start_face_offsets, temporary_store=temporary_store)
array_scan(in_array=vertex_start_face_count.array, out_array=vertex_unique_face_offsets.array, inclusive=False)
# Get back edge count to host
if device.is_cuda:
face_count = borrow_temporary(temporary_store, shape=(1,), dtype=int, device="cpu", pinned=True)
# Last vertex will not own any edge, so its count will be zero; just fetching last prefix count is ok
wp.copy(
dest=face_count.array, src=vertex_unique_face_offsets.array, src_offset=self.vertex_count() - 1, count=1
)
wp.synchronize_stream(wp.get_stream(device))
face_count = int(face_count.array.numpy()[0])
else:
face_count = int(vertex_unique_face_offsets.array.numpy()[self.vertex_count() - 1])
self._face_vertex_indices = wp.empty(shape=(face_count,), dtype=wp.vec3i, device=device)
self._face_tet_indices = wp.empty(shape=(face_count,), dtype=wp.vec2i, device=device)
boundary_mask = borrow_temporary(temporary_store, shape=(face_count,), dtype=int, device=device)
# Compress edge data
wp.launch(
kernel=Tetmesh._compress_faces_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
vertex_start_face_offsets.array,
vertex_unique_face_offsets.array,
vertex_unique_face_count.array,
vertex_face_other_vs.array,
vertex_face_tets.array,
self._face_vertex_indices,
self._face_tet_indices,
boundary_mask.array,
],
)
vertex_start_face_offsets.release()
vertex_unique_face_offsets.release()
vertex_unique_face_count.release()
vertex_face_other_vs.release()
vertex_face_tets.release()
# Flip normals if necessary
wp.launch(
kernel=Tetmesh._flip_face_normals,
device=device,
dim=self.side_count(),
inputs=[self._face_vertex_indices, self._face_tet_indices, self.tet_vertex_indices, self.positions],
)
boundary_face_indices, _ = masked_indices(boundary_mask.array)
self._boundary_face_indices = boundary_face_indices.detach()
def _compute_tet_edges(self, temporary_store: Optional[TemporaryStore] = None):
from warp.utils import array_scan
device = self.tet_vertex_indices.device
vertex_start_edge_count = borrow_temporary(temporary_store, dtype=int, device=device, shape=self.vertex_count())
vertex_start_edge_count.array.zero_()
vertex_start_edge_offsets = borrow_temporary_like(vertex_start_edge_count, temporary_store=temporary_store)
vertex_edge_ends = borrow_temporary(temporary_store, dtype=int, device=device, shape=(6 * self.cell_count()))
# Count face edges starting at each vertex
wp.launch(
kernel=Tetmesh._count_starting_edges_kernel,
device=device,
dim=self.cell_count(),
inputs=[self.tet_vertex_indices, vertex_start_edge_count.array],
)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_start_edge_offsets.array, inclusive=False)
# Count number of unique edges (deduplicate across faces)
vertex_unique_edge_count = vertex_start_edge_count
wp.launch(
kernel=Tetmesh._count_unique_starting_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_tet_offsets,
self._vertex_tet_indices,
self.tet_vertex_indices,
vertex_start_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
],
)
vertex_unique_edge_offsets = borrow_temporary_like(
vertex_start_edge_offsets.array, temporary_store=temporary_store
)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_unique_edge_offsets.array, inclusive=False)
# Get back edge count to host
if device.is_cuda:
edge_count = borrow_temporary(temporary_store, shape=(1,), dtype=int, device="cpu", pinned=True)
# Last vertex will not own any edge, so its count will be zero; just fetching last prefix count is ok
wp.copy(
dest=edge_count.array,
src=vertex_unique_edge_offsets.array,
src_offset=self.vertex_count() - 1,
count=1,
)
wp.synchronize_stream(wp.get_stream(device))
self._edge_count = int(edge_count.array.numpy()[0])
else:
self._edge_count = int(vertex_unique_edge_offsets.array.numpy()[self.vertex_count() - 1])
self._tet_edge_indices = wp.empty(
dtype=int, device=self.tet_vertex_indices.device, shape=(self.cell_count(), 6)
)
# Compress edge data
wp.launch(
kernel=Tetmesh._compress_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_tet_offsets,
self._vertex_tet_indices,
self.tet_vertex_indices,
vertex_start_edge_offsets.array,
vertex_unique_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
self._tet_edge_indices,
],
)
vertex_start_edge_offsets.release()
vertex_unique_edge_offsets.release()
vertex_unique_edge_count.release()
vertex_edge_ends.release()
def _compute_deformation_gradients(self):
self._deformation_gradients = wp.empty(dtype=wp.mat33f, device=self.positions.device, shape=(self.cell_count()))
wp.launch(
kernel=Tetmesh._compute_deformation_gradients_kernel,
dim=self._deformation_gradients.shape,
device=self._deformation_gradients.device,
inputs=[self.tet_vertex_indices, self.positions, self._deformation_gradients],
)
@wp.kernel
def _count_starting_faces_kernel(
tet_vertex_indices: wp.array2d(dtype=int), vertex_start_face_count: wp.array(dtype=int)
):
t = wp.tid()
for k in range(4):
vi = wp.vec3i(
tet_vertex_indices[t, k], tet_vertex_indices[t, (k + 1) % 4], tet_vertex_indices[t, (k + 2) % 4]
)
vm = wp.min(vi)
for i in range(3):
if vm == vi[i]:
wp.atomic_add(vertex_start_face_count, vm, 1)
@wp.func
def _find_face(
needle: wp.vec2i,
values: wp.array(dtype=wp.vec2i),
beg: int,
end: int,
):
for i in range(beg, end):
if values[i] == needle:
return i
return -1
@wp.kernel
def _count_unique_starting_faces_kernel(
vertex_tet_offsets: wp.array(dtype=int),
vertex_tet_indices: wp.array(dtype=int),
tet_vertex_indices: wp.array2d(dtype=int),
vertex_start_face_offsets: wp.array(dtype=int),
vertex_start_face_count: wp.array(dtype=int),
face_other_vs: wp.array(dtype=wp.vec2i),
face_tets: wp.array2d(dtype=int),
):
v = wp.tid()
face_beg = vertex_start_face_offsets[v]
tet_beg = vertex_tet_offsets[v]
tet_end = vertex_tet_offsets[v + 1]
face_cur = face_beg
for tet in range(tet_beg, tet_end):
t = vertex_tet_indices[tet]
for k in range(4):
vi = wp.vec3i(
tet_vertex_indices[t, k], tet_vertex_indices[t, (k + 1) % 4], tet_vertex_indices[t, (k + 2) % 4]
)
min_v = wp.min(vi)
if v == min_v:
max_v = wp.max(vi)
mid_v = vi[0] + vi[1] + vi[2] - min_v - max_v
other_v = wp.vec2i(mid_v, max_v)
# Check if other_v has been seen
seen_idx = Tetmesh._find_face(other_v, face_other_vs, face_beg, face_cur)
if seen_idx == -1:
face_other_vs[face_cur] = other_v
face_tets[face_cur, 0] = t
face_tets[face_cur, 1] = t
face_cur += 1
else:
face_tets[seen_idx, 1] = t
vertex_start_face_count[v] = face_cur - face_beg
@wp.kernel
def _compress_faces_kernel(
vertex_start_face_offsets: wp.array(dtype=int),
vertex_unique_face_offsets: wp.array(dtype=int),
vertex_unique_face_count: wp.array(dtype=int),
uncompressed_face_other_vs: wp.array(dtype=wp.vec2i),
uncompressed_face_tets: wp.array2d(dtype=int),
face_vertex_indices: wp.array(dtype=wp.vec3i),
face_tet_indices: wp.array(dtype=wp.vec2i),
boundary_mask: wp.array(dtype=int),
):
v = wp.tid()
start_beg = vertex_start_face_offsets[v]
unique_beg = vertex_unique_face_offsets[v]
unique_count = vertex_unique_face_count[v]
for f in range(unique_count):
src_index = start_beg + f
face_index = unique_beg + f
face_vertex_indices[face_index] = wp.vec3i(
v,
uncompressed_face_other_vs[src_index][0],
uncompressed_face_other_vs[src_index][1],
)
t0 = uncompressed_face_tets[src_index, 0]
t1 = uncompressed_face_tets[src_index, 1]
face_tet_indices[face_index] = wp.vec2i(t0, t1)
if t0 == t1:
boundary_mask[face_index] = 1
else:
boundary_mask[face_index] = 0
@wp.kernel
def _flip_face_normals(
face_vertex_indices: wp.array(dtype=wp.vec3i),
face_tet_indices: wp.array(dtype=wp.vec2i),
tet_vertex_indices: wp.array2d(dtype=int),
positions: wp.array(dtype=wp.vec3),
):
e = wp.tid()
tet = face_tet_indices[e][0]
tet_vidx = tet_vertex_indices[tet]
face_vidx = face_vertex_indices[e]
tet_centroid = (
positions[tet_vidx[0]] + positions[tet_vidx[1]] + positions[tet_vidx[2]] + positions[tet_vidx[3]]
) / 4.0
v0 = positions[face_vidx[0]]
v1 = positions[face_vidx[1]]
v2 = positions[face_vidx[2]]
face_center = (v1 + v0 + v2) / 3.0
face_normal = wp.cross(v1 - v0, v2 - v0)
# if face normal points toward first tet centroid, flip indices
if wp.dot(tet_centroid - face_center, face_normal) > 0.0:
face_vertex_indices[e] = wp.vec3i(face_vidx[0], face_vidx[2], face_vidx[1])
@wp.kernel
def _count_starting_edges_kernel(
tri_vertex_indices: wp.array2d(dtype=int), vertex_start_edge_count: wp.array(dtype=int)
):
t = wp.tid()
for k in range(3):
v0 = tri_vertex_indices[t, k]
v1 = tri_vertex_indices[t, (k + 1) % 3]
if v0 < v1:
wp.atomic_add(vertex_start_edge_count, v0, 1)
else:
wp.atomic_add(vertex_start_edge_count, v1, 1)
for k in range(3):
v0 = tri_vertex_indices[t, k]
v1 = tri_vertex_indices[t, 3]
if v0 < v1:
wp.atomic_add(vertex_start_edge_count, v0, 1)
else:
wp.atomic_add(vertex_start_edge_count, v1, 1)
@wp.func
def _find_edge(
needle: int,
values: wp.array(dtype=int),
beg: int,
end: int,
):
for i in range(beg, end):
if values[i] == needle:
return i
return -1
@wp.kernel
def _count_unique_starting_edges_kernel(
vertex_tet_offsets: wp.array(dtype=int),
vertex_tet_indices: wp.array(dtype=int),
tet_vertex_indices: wp.array2d(dtype=int),
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_start_edge_count: wp.array(dtype=int),
edge_ends: wp.array(dtype=int),
):
v = wp.tid()
edge_beg = vertex_start_edge_offsets[v]
tet_beg = vertex_tet_offsets[v]
tet_end = vertex_tet_offsets[v + 1]
edge_cur = edge_beg
for tet in range(tet_beg, tet_end):
t = vertex_tet_indices[tet]
for k in range(3):
v0 = tet_vertex_indices[t, k]
v1 = tet_vertex_indices[t, (k + 1) % 3]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
if Tetmesh._find_edge(other_v, edge_ends, edge_beg, edge_cur) == -1:
edge_ends[edge_cur] = other_v
edge_cur += 1
for k in range(3):
v0 = tet_vertex_indices[t, k]
v1 = tet_vertex_indices[t, 3]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
if Tetmesh._find_edge(other_v, edge_ends, edge_beg, edge_cur) == -1:
edge_ends[edge_cur] = other_v
edge_cur += 1
vertex_start_edge_count[v] = edge_cur - edge_beg
@wp.kernel
def _compress_edges_kernel(
vertex_tet_offsets: wp.array(dtype=int),
vertex_tet_indices: wp.array(dtype=int),
tet_vertex_indices: wp.array2d(dtype=int),
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_count: wp.array(dtype=int),
uncompressed_edge_ends: wp.array(dtype=int),
tet_edge_indices: wp.array2d(dtype=int),
):
v = wp.tid()
uncompressed_beg = vertex_start_edge_offsets[v]
unique_beg = vertex_unique_edge_offsets[v]
unique_count = vertex_unique_edge_count[v]
tet_beg = vertex_tet_offsets[v]
tet_end = vertex_tet_offsets[v + 1]
for tet in range(tet_beg, tet_end):
t = vertex_tet_indices[tet]
for k in range(3):
v0 = tet_vertex_indices[t, k]
v1 = tet_vertex_indices[t, (k + 1) % 3]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
edge_id = (
Tetmesh._find_edge(
other_v, uncompressed_edge_ends, uncompressed_beg, uncompressed_beg + unique_count
)
- uncompressed_beg
+ unique_beg
)
tet_edge_indices[t][k] = edge_id
for k in range(3):
v0 = tet_vertex_indices[t, k]
v1 = tet_vertex_indices[t, 3]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
edge_id = (
Tetmesh._find_edge(
other_v, uncompressed_edge_ends, uncompressed_beg, uncompressed_beg + unique_count
)
- uncompressed_beg
+ unique_beg
)
tet_edge_indices[t][k + 3] = edge_id
@wp.kernel
def _compute_deformation_gradients_kernel(
tet_vertex_indices: wp.array2d(dtype=int),
positions: wp.array(dtype=wp.vec3f),
transforms: wp.array(dtype=wp.mat33f),
):
t = wp.tid()
p0 = positions[tet_vertex_indices[t, 0]]
p1 = positions[tet_vertex_indices[t, 1]]
p2 = positions[tet_vertex_indices[t, 2]]
p3 = positions[tet_vertex_indices[t, 3]]
e1 = p1 - p0
e2 = p2 - p0
e3 = p3 - p0
transforms[t] = wp.mat33(e1, e2, e3)
| 29,806 | Python | 34.442331 | 120 | 0.575018 |
NVIDIA/warp/warp/fem/geometry/hexmesh.py | from typing import Optional
import warp as wp
from warp.fem.cache import (
TemporaryStore,
borrow_temporary,
borrow_temporary_like,
cached_arg_value,
)
from warp.fem.types import OUTSIDE, Coords, ElementIndex, Sample, make_free_sample
from .element import Cube, Square
from .geometry import Geometry
@wp.struct
class HexmeshCellArg:
hex_vertex_indices: wp.array2d(dtype=int)
positions: wp.array(dtype=wp.vec3)
# for neighbor cell lookup
vertex_hex_offsets: wp.array(dtype=int)
vertex_hex_indices: wp.array(dtype=int)
@wp.struct
class HexmeshSideArg:
cell_arg: HexmeshCellArg
face_vertex_indices: wp.array(dtype=wp.vec4i)
face_hex_indices: wp.array(dtype=wp.vec2i)
face_hex_face_orientation: wp.array(dtype=wp.vec4i)
_mat32 = wp.mat(shape=(3, 2), dtype=float)
FACE_VERTEX_INDICES = wp.constant(
wp.mat(shape=(6, 4), dtype=int)(
[
[0, 4, 7, 3], # x = 0
[1, 2, 6, 5], # x = 1
[0, 1, 5, 4], # y = 0
[3, 7, 6, 2], # y = 1
[0, 3, 2, 1], # z = 0
[4, 5, 6, 7], # z = 1
]
)
)
EDGE_VERTEX_INDICES = wp.constant(
wp.mat(shape=(12, 2), dtype=int)(
[
[0, 1],
[1, 2],
[3, 2],
[0, 3],
[4, 5],
[5, 6],
[7, 6],
[4, 7],
[0, 4],
[1, 5],
[2, 6],
[3, 7],
]
)
)
# orthogal transform for face coordinates given first vertex + winding
# (two rows per entry)
FACE_ORIENTATION = [
[1, 0], # FV: 0, det: +
[0, 1],
[0, 1], # FV: 0, det: -
[1, 0],
[0, -1], # FV: 1, det: +
[1, 0],
[-1, 0], # FV: 1, det: -
[0, 1],
[-1, 0], # FV: 2, det: +
[0, -1],
[0, -1], # FV: 2, det: -
[-1, 0],
[0, 1], # FV: 3, det: +
[-1, 0],
[1, 0], # FV: 3, det: -
[0, -1],
]
FACE_TRANSLATION = [
[0, 0],
[1, 0],
[1, 1],
[0, 1],
]
# local face coordinate system
_FACE_COORD_INDICES = wp.constant(
wp.mat(shape=(6, 4), dtype=int)(
[
[2, 1, 0, 0], # 0: z y -x
[1, 2, 0, 1], # 1: y z x-1
[0, 2, 1, 0], # 2: x z -y
[2, 0, 1, 1], # 3: z x y-1
[1, 0, 2, 0], # 4: y x -z
[0, 1, 2, 1], # 5: x y z-1
]
)
)
_FACE_ORIENTATION_F = wp.constant(wp.mat(shape=(16, 2), dtype=float)(FACE_ORIENTATION))
_FACE_TRANSLATION_F = wp.constant(wp.mat(shape=(4, 2), dtype=float)(FACE_TRANSLATION))
class Hexmesh(Geometry):
"""Hexahedral mesh geometry"""
dimension = 3
def __init__(
self, hex_vertex_indices: wp.array, positions: wp.array, temporary_store: Optional[TemporaryStore] = None
):
"""
Constructs a tetrahedral mesh.
Args:
hex_vertex_indices: warp array of shape (num_hexes, 8) containing vertex indices for each hex
following standard ordering (bottom face vertices in counter-clockwise order, then similarly for upper face)
positions: warp array of shape (num_vertices, 3) containing 3d position for each vertex
temporary_store: shared pool from which to allocate temporary arrays
"""
self.hex_vertex_indices = hex_vertex_indices
self.positions = positions
self._face_vertex_indices: wp.array = None
self._face_hex_indices: wp.array = None
self._face_hex_face_orientation: wp.array = None
self._vertex_hex_offsets: wp.array = None
self._vertex_hex_indices: wp.array = None
self._hex_edge_indices: wp.array = None
self._edge_count = 0
self._build_topology(temporary_store)
def cell_count(self):
return self.hex_vertex_indices.shape[0]
def vertex_count(self):
return self.positions.shape[0]
def side_count(self):
return self._face_vertex_indices.shape[0]
def edge_count(self):
if self._hex_edge_indices is None:
self._compute_hex_edges()
return self._edge_count
def boundary_side_count(self):
return self._boundary_face_indices.shape[0]
def reference_cell(self) -> Cube:
return Cube()
def reference_side(self) -> Square:
return Square()
@property
def hex_edge_indices(self) -> wp.array:
if self._hex_edge_indices is None:
self._compute_hex_edges()
return self._hex_edge_indices
@property
def face_hex_indices(self) -> wp.array:
return self._face_hex_indices
@property
def face_vertex_indices(self) -> wp.array:
return self._face_vertex_indices
CellArg = HexmeshCellArg
SideArg = HexmeshSideArg
@wp.struct
class SideIndexArg:
boundary_face_indices: wp.array(dtype=int)
# Geometry device interface
@cached_arg_value
def cell_arg_value(self, device) -> CellArg:
args = self.CellArg()
args.hex_vertex_indices = self.hex_vertex_indices.to(device)
args.positions = self.positions.to(device)
args.vertex_hex_offsets = self._vertex_hex_offsets.to(device)
args.vertex_hex_indices = self._vertex_hex_indices.to(device)
return args
@wp.func
def cell_position(args: CellArg, s: Sample):
hex_idx = args.hex_vertex_indices[s.element_index]
w_p = s.element_coords
w_m = Coords(1.0) - s.element_coords
# 0 : m m m
# 1 : p m m
# 2 : p p m
# 3 : m p m
# 4 : m m p
# 5 : p m p
# 6 : p p p
# 7 : m p p
return (
w_m[0] * w_m[1] * w_m[2] * args.positions[hex_idx[0]]
+ w_p[0] * w_m[1] * w_m[2] * args.positions[hex_idx[1]]
+ w_p[0] * w_p[1] * w_m[2] * args.positions[hex_idx[2]]
+ w_m[0] * w_p[1] * w_m[2] * args.positions[hex_idx[3]]
+ w_m[0] * w_m[1] * w_p[2] * args.positions[hex_idx[4]]
+ w_p[0] * w_m[1] * w_p[2] * args.positions[hex_idx[5]]
+ w_p[0] * w_p[1] * w_p[2] * args.positions[hex_idx[6]]
+ w_m[0] * w_p[1] * w_p[2] * args.positions[hex_idx[7]]
)
@wp.func
def cell_deformation_gradient(cell_arg: CellArg, s: Sample):
"""Deformation gradient at `coords`"""
"""Transposed deformation gradient at `coords`"""
hex_idx = cell_arg.hex_vertex_indices[s.element_index]
w_p = s.element_coords
w_m = Coords(1.0) - s.element_coords
return (
wp.outer(cell_arg.positions[hex_idx[0]], wp.vec3(-w_m[1] * w_m[2], -w_m[0] * w_m[2], -w_m[0] * w_m[1]))
+ wp.outer(cell_arg.positions[hex_idx[1]], wp.vec3(w_m[1] * w_m[2], -w_p[0] * w_m[2], -w_p[0] * w_m[1]))
+ wp.outer(cell_arg.positions[hex_idx[2]], wp.vec3(w_p[1] * w_m[2], w_p[0] * w_m[2], -w_p[0] * w_p[1]))
+ wp.outer(cell_arg.positions[hex_idx[3]], wp.vec3(-w_p[1] * w_m[2], w_m[0] * w_m[2], -w_m[0] * w_p[1]))
+ wp.outer(cell_arg.positions[hex_idx[4]], wp.vec3(-w_m[1] * w_p[2], -w_m[0] * w_p[2], w_m[0] * w_m[1]))
+ wp.outer(cell_arg.positions[hex_idx[5]], wp.vec3(w_m[1] * w_p[2], -w_p[0] * w_p[2], w_p[0] * w_m[1]))
+ wp.outer(cell_arg.positions[hex_idx[6]], wp.vec3(w_p[1] * w_p[2], w_p[0] * w_p[2], w_p[0] * w_p[1]))
+ wp.outer(cell_arg.positions[hex_idx[7]], wp.vec3(-w_p[1] * w_p[2], w_m[0] * w_p[2], w_m[0] * w_p[1]))
)
@wp.func
def cell_inverse_deformation_gradient(cell_arg: CellArg, s: Sample):
return wp.inverse(Hexmesh.cell_deformation_gradient(cell_arg, s))
@wp.func
def cell_measure(args: CellArg, s: Sample):
return wp.abs(wp.determinant(Hexmesh.cell_deformation_gradient(args, s)))
@wp.func
def cell_normal(args: CellArg, s: Sample):
return wp.vec3(0.0)
@cached_arg_value
def side_index_arg_value(self, device) -> SideIndexArg:
args = self.SideIndexArg()
args.boundary_face_indices = self._boundary_face_indices.to(device)
return args
@wp.func
def boundary_side_index(args: SideIndexArg, boundary_side_index: int):
"""Boundary side to side index"""
return args.boundary_face_indices[boundary_side_index]
@cached_arg_value
def side_arg_value(self, device) -> CellArg:
args = self.SideArg()
args.cell_arg = self.cell_arg_value(device)
args.face_vertex_indices = self._face_vertex_indices.to(device)
args.face_hex_indices = self._face_hex_indices.to(device)
args.face_hex_face_orientation = self._face_hex_face_orientation.to(device)
return args
@wp.func
def side_position(args: SideArg, s: Sample):
face_idx = args.face_vertex_indices[s.element_index]
w_p = s.element_coords
w_m = Coords(1.0) - s.element_coords
return (
w_m[0] * w_m[1] * args.cell_arg.positions[face_idx[0]]
+ w_p[0] * w_m[1] * args.cell_arg.positions[face_idx[1]]
+ w_p[0] * w_p[1] * args.cell_arg.positions[face_idx[2]]
+ w_m[0] * w_p[1] * args.cell_arg.positions[face_idx[3]]
)
@wp.func
def _side_deformation_vecs(args: SideArg, side_index: ElementIndex, coords: Coords):
face_idx = args.face_vertex_indices[side_index]
p0 = args.cell_arg.positions[face_idx[0]]
p1 = args.cell_arg.positions[face_idx[1]]
p2 = args.cell_arg.positions[face_idx[2]]
p3 = args.cell_arg.positions[face_idx[3]]
w_p = coords
w_m = Coords(1.0) - coords
v1 = w_m[1] * (p1 - p0) + w_p[1] * (p2 - p3)
v2 = w_p[0] * (p2 - p1) + w_m[0] * (p3 - p0)
return v1, v2
@wp.func
def side_deformation_gradient(args: SideArg, s: Sample):
"""Transposed side deformation gradient at `coords`"""
v1, v2 = Hexmesh._side_deformation_vecs(args, s.element_index, s.element_coords)
return _mat32(v1, v2)
@wp.func
def side_inner_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Hexmesh.side_inner_cell_index(args, s.element_index)
cell_coords = Hexmesh.side_inner_cell_coords(args, s.element_index, s.element_coords)
return Hexmesh.cell_inverse_deformation_gradient(args.cell_arg, make_free_sample(cell_index, cell_coords))
@wp.func
def side_outer_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Hexmesh.side_outer_cell_index(args, s.element_index)
cell_coords = Hexmesh.side_outer_cell_coords(args, s.element_index, s.element_coords)
return Hexmesh.cell_inverse_deformation_gradient(args.cell_arg, make_free_sample(cell_index, cell_coords))
@wp.func
def side_measure(args: SideArg, s: Sample):
v1, v2 = Hexmesh._side_deformation_vecs(args, s.element_index, s.element_coords)
return wp.length(wp.cross(v1, v2))
@wp.func
def side_measure_ratio(args: SideArg, s: Sample):
inner = Hexmesh.side_inner_cell_index(args, s.element_index)
outer = Hexmesh.side_outer_cell_index(args, s.element_index)
inner_coords = Hexmesh.side_inner_cell_coords(args, s.element_index, s.element_coords)
outer_coords = Hexmesh.side_outer_cell_coords(args, s.element_index, s.element_coords)
return Hexmesh.side_measure(args, s) / wp.min(
Hexmesh.cell_measure(args.cell_arg, make_free_sample(inner, inner_coords)),
Hexmesh.cell_measure(args.cell_arg, make_free_sample(outer, outer_coords)),
)
@wp.func
def side_normal(args: SideArg, s: Sample):
v1, v2 = Hexmesh._side_deformation_vecs(args, s.element_index, s.element_coords)
return wp.normalize(wp.cross(v1, v2))
@wp.func
def side_inner_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.face_hex_indices[side_index][0]
@wp.func
def side_outer_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.face_hex_indices[side_index][1]
@wp.func
def _hex_local_face_coords(hex_coords: Coords, face_index: int):
# Coordinatex in local face coordinates system
# Sign of last coordinate (out of face)
face_coords = wp.vec2(
hex_coords[_FACE_COORD_INDICES[face_index, 0]], hex_coords[_FACE_COORD_INDICES[face_index, 1]]
)
normal_coord = hex_coords[_FACE_COORD_INDICES[face_index, 2]]
normal_coord = wp.select(_FACE_COORD_INDICES[face_index, 3] == 0, normal_coord - 1.0, -normal_coord)
return face_coords, normal_coord
@wp.func
def _local_face_hex_coords(face_coords: wp.vec2, face_index: int):
# Coordinates in hex from local face coordinates system
hex_coords = Coords()
hex_coords[_FACE_COORD_INDICES[face_index, 0]] = face_coords[0]
hex_coords[_FACE_COORD_INDICES[face_index, 1]] = face_coords[1]
hex_coords[_FACE_COORD_INDICES[face_index, 2]] = wp.select(_FACE_COORD_INDICES[face_index, 3] == 0, 1.0, 0.0)
return hex_coords
@wp.func
def _local_from_oriented_face_coords(ori: int, oriented_coords: Coords):
fv = ori // 2
return (oriented_coords[0] - _FACE_TRANSLATION_F[fv, 0]) * _FACE_ORIENTATION_F[2 * ori] + (
oriented_coords[1] - _FACE_TRANSLATION_F[fv, 1]
) * _FACE_ORIENTATION_F[2 * ori + 1]
@wp.func
def _local_to_oriented_face_coords(ori: int, coords: wp.vec2):
fv = ori // 2
return Coords(
wp.dot(_FACE_ORIENTATION_F[2 * ori], coords) + _FACE_TRANSLATION_F[fv, 0],
wp.dot(_FACE_ORIENTATION_F[2 * ori + 1], coords) + _FACE_TRANSLATION_F[fv, 1],
0.0,
)
@wp.func
def face_to_hex_coords(local_face_index: int, face_orientation: int, side_coords: Coords):
local_coords = Hexmesh._local_from_oriented_face_coords(face_orientation, side_coords)
return Hexmesh._local_face_hex_coords(local_coords, local_face_index)
@wp.func
def side_inner_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
local_face_index = args.face_hex_face_orientation[side_index][0]
face_orientation = args.face_hex_face_orientation[side_index][1]
return Hexmesh.face_to_hex_coords(local_face_index, face_orientation, side_coords)
@wp.func
def side_outer_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
local_face_index = args.face_hex_face_orientation[side_index][2]
face_orientation = args.face_hex_face_orientation[side_index][3]
return Hexmesh.face_to_hex_coords(local_face_index, face_orientation, side_coords)
@wp.func
def side_from_cell_coords(args: SideArg, side_index: ElementIndex, hex_index: ElementIndex, hex_coords: Coords):
if Hexmesh.side_inner_cell_index(args, side_index) == hex_index:
local_face_index = args.face_hex_face_orientation[side_index][0]
face_orientation = args.face_hex_face_orientation[side_index][1]
else:
local_face_index = args.face_hex_face_orientation[side_index][2]
face_orientation = args.face_hex_face_orientation[side_index][3]
face_coords, normal_coord = Hexmesh._hex_local_face_coords(hex_coords, local_face_index)
return wp.select(
normal_coord == 0.0, Coords(OUTSIDE), Hexmesh._local_to_oriented_face_coords(face_orientation, face_coords)
)
@wp.func
def side_to_cell_arg(side_arg: SideArg):
return side_arg.cell_arg
def _build_topology(self, temporary_store: TemporaryStore):
from warp.fem.utils import compress_node_indices, masked_indices
from warp.utils import array_scan
device = self.hex_vertex_indices.device
vertex_hex_offsets, vertex_hex_indices, _, __ = compress_node_indices(
self.vertex_count(), self.hex_vertex_indices, temporary_store=temporary_store
)
self._vertex_hex_offsets = vertex_hex_offsets.detach()
self._vertex_hex_indices = vertex_hex_indices.detach()
vertex_start_face_count = borrow_temporary(temporary_store, dtype=int, device=device, shape=self.vertex_count())
vertex_start_face_count.array.zero_()
vertex_start_face_offsets = borrow_temporary_like(vertex_start_face_count, temporary_store=temporary_store)
vertex_face_other_vs = borrow_temporary(
temporary_store, dtype=wp.vec3i, device=device, shape=(8 * self.cell_count())
)
vertex_face_hexes = borrow_temporary(
temporary_store, dtype=int, device=device, shape=(8 * self.cell_count(), 2)
)
# Count face edges starting at each vertex
wp.launch(
kernel=Hexmesh._count_starting_faces_kernel,
device=device,
dim=self.cell_count(),
inputs=[self.hex_vertex_indices, vertex_start_face_count.array],
)
array_scan(in_array=vertex_start_face_count.array, out_array=vertex_start_face_offsets.array, inclusive=False)
# Count number of unique edges (deduplicate across faces)
vertex_unique_face_count = vertex_start_face_count
wp.launch(
kernel=Hexmesh._count_unique_starting_faces_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_hex_offsets,
self._vertex_hex_indices,
self.hex_vertex_indices,
vertex_start_face_offsets.array,
vertex_unique_face_count.array,
vertex_face_other_vs.array,
vertex_face_hexes.array,
],
)
vertex_unique_face_offsets = borrow_temporary_like(vertex_start_face_offsets, temporary_store=temporary_store)
array_scan(in_array=vertex_start_face_count.array, out_array=vertex_unique_face_offsets.array, inclusive=False)
# Get back edge count to host
if device.is_cuda:
face_count = borrow_temporary(temporary_store, shape=(1,), dtype=int, device="cpu", pinned=True)
# Last vertex will not own any edge, so its count will be zero; just fetching last prefix count is ok
wp.copy(
dest=face_count.array, src=vertex_unique_face_offsets.array, src_offset=self.vertex_count() - 1, count=1
)
wp.synchronize_stream(wp.get_stream(device))
face_count = int(face_count.array.numpy()[0])
else:
face_count = int(vertex_unique_face_offsets.array.numpy()[self.vertex_count() - 1])
self._face_vertex_indices = wp.empty(shape=(face_count,), dtype=wp.vec4i, device=device)
self._face_hex_indices = wp.empty(shape=(face_count,), dtype=wp.vec2i, device=device)
self._face_hex_face_orientation = wp.empty(shape=(face_count,), dtype=wp.vec4i, device=device)
boundary_mask = borrow_temporary(temporary_store, shape=(face_count,), dtype=int, device=device)
# Compress edge data
wp.launch(
kernel=Hexmesh._compress_faces_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
vertex_start_face_offsets.array,
vertex_unique_face_offsets.array,
vertex_unique_face_count.array,
vertex_face_other_vs.array,
vertex_face_hexes.array,
self._face_vertex_indices,
self._face_hex_indices,
boundary_mask.array,
],
)
vertex_start_face_offsets.release()
vertex_unique_face_offsets.release()
vertex_unique_face_count.release()
vertex_face_other_vs.release()
vertex_face_hexes.release()
# Flip normals if necessary
wp.launch(
kernel=Hexmesh._flip_face_normals,
device=device,
dim=self.side_count(),
inputs=[self._face_vertex_indices, self._face_hex_indices, self.hex_vertex_indices, self.positions],
)
# Compute and store face orientation
wp.launch(
kernel=Hexmesh._compute_face_orientation,
device=device,
dim=self.side_count(),
inputs=[
self._face_vertex_indices,
self._face_hex_indices,
self.hex_vertex_indices,
self._face_hex_face_orientation,
],
)
boundary_face_indices, _ = masked_indices(boundary_mask.array)
self._boundary_face_indices = boundary_face_indices.detach()
def _compute_hex_edges(self, temporary_store: Optional[TemporaryStore] = None):
from warp.utils import array_scan
device = self.hex_vertex_indices.device
vertex_start_edge_count = borrow_temporary(temporary_store, dtype=int, device=device, shape=self.vertex_count())
vertex_start_edge_count.array.zero_()
vertex_start_edge_offsets = borrow_temporary_like(vertex_start_edge_count, temporary_store=temporary_store)
vertex_edge_ends = borrow_temporary(temporary_store, dtype=int, device=device, shape=(12 * self.cell_count()))
# Count face edges starting at each vertex
wp.launch(
kernel=Hexmesh._count_starting_edges_kernel,
device=device,
dim=self.cell_count(),
inputs=[self.hex_vertex_indices, vertex_start_edge_count.array],
)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_start_edge_offsets.array, inclusive=False)
# Count number of unique edges (deduplicate across faces)
vertex_unique_edge_count = vertex_start_edge_count
wp.launch(
kernel=Hexmesh._count_unique_starting_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_hex_offsets,
self._vertex_hex_indices,
self.hex_vertex_indices,
vertex_start_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
],
)
vertex_unique_edge_offsets = borrow_temporary_like(
vertex_start_edge_offsets.array, temporary_store=temporary_store
)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_unique_edge_offsets.array, inclusive=False)
# Get back edge count to host
if device.is_cuda:
edge_count = borrow_temporary(temporary_store, shape=(1,), dtype=int, device="cpu", pinned=True)
# Last vertex will not own any edge, so its count will be zero; just fetching last prefix count is ok
wp.copy(
dest=edge_count.array,
src=vertex_unique_edge_offsets.array,
src_offset=self.vertex_count() - 1,
count=1,
)
wp.synchronize_stream(wp.get_stream(device))
self._edge_count = int(edge_count.array.numpy()[0])
else:
self._edge_count = int(vertex_unique_edge_offsets.array.numpy()[self.vertex_count() - 1])
self._hex_edge_indices = wp.empty(
dtype=int, device=self.hex_vertex_indices.device, shape=(self.cell_count(), 12)
)
# Compress edge data
wp.launch(
kernel=Hexmesh._compress_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_hex_offsets,
self._vertex_hex_indices,
self.hex_vertex_indices,
vertex_start_edge_offsets.array,
vertex_unique_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
self._hex_edge_indices,
],
)
vertex_start_edge_offsets.release()
vertex_unique_edge_offsets.release()
vertex_unique_edge_count.release()
vertex_edge_ends.release()
@wp.kernel
def _count_starting_faces_kernel(
hex_vertex_indices: wp.array2d(dtype=int), vertex_start_face_count: wp.array(dtype=int)
):
t = wp.tid()
for k in range(6):
vi = wp.vec4i(
hex_vertex_indices[t, FACE_VERTEX_INDICES[k, 0]],
hex_vertex_indices[t, FACE_VERTEX_INDICES[k, 1]],
hex_vertex_indices[t, FACE_VERTEX_INDICES[k, 2]],
hex_vertex_indices[t, FACE_VERTEX_INDICES[k, 3]],
)
vm = wp.min(vi)
for i in range(4):
if vm == vi[i]:
wp.atomic_add(vertex_start_face_count, vm, 1)
@wp.func
def _face_sort(vidx: wp.vec4i, min_k: int):
v1 = vidx[(min_k + 1) % 4]
v2 = vidx[(min_k + 2) % 4]
v3 = vidx[(min_k + 3) % 4]
if v1 < v3:
return wp.vec3i(v1, v2, v3)
return wp.vec3i(v3, v2, v1)
@wp.func
def _find_face(
needle: wp.vec3i,
values: wp.array(dtype=wp.vec3i),
beg: int,
end: int,
):
for i in range(beg, end):
if values[i] == needle:
return i
return -1
@wp.kernel
def _count_unique_starting_faces_kernel(
vertex_hex_offsets: wp.array(dtype=int),
vertex_hex_indices: wp.array(dtype=int),
hex_vertex_indices: wp.array2d(dtype=int),
vertex_start_face_offsets: wp.array(dtype=int),
vertex_start_face_count: wp.array(dtype=int),
face_other_vs: wp.array(dtype=wp.vec3i),
face_hexes: wp.array2d(dtype=int),
):
v = wp.tid()
face_beg = vertex_start_face_offsets[v]
hex_beg = vertex_hex_offsets[v]
hex_end = vertex_hex_offsets[v + 1]
face_cur = face_beg
for hexa in range(hex_beg, hex_end):
hx = vertex_hex_indices[hexa]
for k in range(6):
vi = wp.vec4i(
hex_vertex_indices[hx, FACE_VERTEX_INDICES[k, 0]],
hex_vertex_indices[hx, FACE_VERTEX_INDICES[k, 1]],
hex_vertex_indices[hx, FACE_VERTEX_INDICES[k, 2]],
hex_vertex_indices[hx, FACE_VERTEX_INDICES[k, 3]],
)
min_i = int(wp.argmin(vi))
if v == vi[min_i]:
other_v = Hexmesh._face_sort(vi, min_i)
# Check if other_v has been seen
seen_idx = Hexmesh._find_face(other_v, face_other_vs, face_beg, face_cur)
if seen_idx == -1:
face_other_vs[face_cur] = other_v
face_hexes[face_cur, 0] = hx
face_hexes[face_cur, 1] = hx
face_cur += 1
else:
face_hexes[seen_idx, 1] = hx
vertex_start_face_count[v] = face_cur - face_beg
@wp.kernel
def _compress_faces_kernel(
vertex_start_face_offsets: wp.array(dtype=int),
vertex_unique_face_offsets: wp.array(dtype=int),
vertex_unique_face_count: wp.array(dtype=int),
uncompressed_face_other_vs: wp.array(dtype=wp.vec3i),
uncompressed_face_hexes: wp.array2d(dtype=int),
face_vertex_indices: wp.array(dtype=wp.vec4i),
face_hex_indices: wp.array(dtype=wp.vec2i),
boundary_mask: wp.array(dtype=int),
):
v = wp.tid()
start_beg = vertex_start_face_offsets[v]
unique_beg = vertex_unique_face_offsets[v]
unique_count = vertex_unique_face_count[v]
for f in range(unique_count):
src_index = start_beg + f
face_index = unique_beg + f
face_vertex_indices[face_index] = wp.vec4i(
v,
uncompressed_face_other_vs[src_index][0],
uncompressed_face_other_vs[src_index][1],
uncompressed_face_other_vs[src_index][2],
)
hx0 = uncompressed_face_hexes[src_index, 0]
hx1 = uncompressed_face_hexes[src_index, 1]
face_hex_indices[face_index] = wp.vec2i(hx0, hx1)
if hx0 == hx1:
boundary_mask[face_index] = 1
else:
boundary_mask[face_index] = 0
@wp.kernel
def _flip_face_normals(
face_vertex_indices: wp.array(dtype=wp.vec4i),
face_hex_indices: wp.array(dtype=wp.vec2i),
hex_vertex_indices: wp.array2d(dtype=int),
positions: wp.array(dtype=wp.vec3),
):
f = wp.tid()
hexa = face_hex_indices[f][0]
hex_vidx = hex_vertex_indices[hexa]
face_vidx = face_vertex_indices[f]
hex_centroid = (
positions[hex_vidx[0]]
+ positions[hex_vidx[1]]
+ positions[hex_vidx[2]]
+ positions[hex_vidx[3]]
+ positions[hex_vidx[4]]
+ positions[hex_vidx[5]]
+ positions[hex_vidx[6]]
+ positions[hex_vidx[7]]
) / 8.0
v0 = positions[face_vidx[0]]
v1 = positions[face_vidx[1]]
v2 = positions[face_vidx[2]]
v3 = positions[face_vidx[3]]
face_center = (v1 + v0 + v2 + v3) / 4.0
face_normal = wp.cross(v2 - v0, v3 - v1)
# if face normal points toward first tet centroid, flip indices
if wp.dot(hex_centroid - face_center, face_normal) > 0.0:
face_vertex_indices[f] = wp.vec4i(face_vidx[0], face_vidx[3], face_vidx[2], face_vidx[1])
@wp.func
def _find_face_orientation(face_vidx: wp.vec4i, hex_index: int, hex_vertex_indices: wp.array2d(dtype=int)):
hex_vidx = hex_vertex_indices[hex_index]
# Find local index in hex corresponding to face
face_min_i = int(wp.argmin(face_vidx))
face_other_v = Hexmesh._face_sort(face_vidx, face_min_i)
for k in range(6):
hex_face_vi = wp.vec4i(
hex_vidx[FACE_VERTEX_INDICES[k, 0]],
hex_vidx[FACE_VERTEX_INDICES[k, 1]],
hex_vidx[FACE_VERTEX_INDICES[k, 2]],
hex_vidx[FACE_VERTEX_INDICES[k, 3]],
)
hex_min_i = int(wp.argmin(hex_face_vi))
hex_other_v = Hexmesh._face_sort(hex_face_vi, hex_min_i)
if hex_other_v == face_other_v:
local_face_index = k
break
# Find starting vertex index
for k in range(4):
if face_vidx[k] == hex_face_vi[0]:
face_orientation = 2 * k
if face_vidx[(k + 1) % 4] != hex_face_vi[1]:
face_orientation += 1
return local_face_index, face_orientation
@wp.kernel
def _compute_face_orientation(
face_vertex_indices: wp.array(dtype=wp.vec4i),
face_hex_indices: wp.array(dtype=wp.vec2i),
hex_vertex_indices: wp.array2d(dtype=int),
face_hex_face_ori: wp.array(dtype=wp.vec4i),
):
f = wp.tid()
face_vidx = face_vertex_indices[f]
hx0 = face_hex_indices[f][0]
local_face_0, ori_0 = Hexmesh._find_face_orientation(face_vidx, hx0, hex_vertex_indices)
hx1 = face_hex_indices[f][1]
if hx0 == hx1:
face_hex_face_ori[f] = wp.vec4i(local_face_0, ori_0, local_face_0, ori_0)
else:
local_face_1, ori_1 = Hexmesh._find_face_orientation(face_vidx, hx1, hex_vertex_indices)
face_hex_face_ori[f] = wp.vec4i(local_face_0, ori_0, local_face_1, ori_1)
@wp.kernel
def _count_starting_edges_kernel(
hex_vertex_indices: wp.array2d(dtype=int), vertex_start_edge_count: wp.array(dtype=int)
):
t = wp.tid()
for k in range(12):
v0 = hex_vertex_indices[t, EDGE_VERTEX_INDICES[k, 0]]
v1 = hex_vertex_indices[t, EDGE_VERTEX_INDICES[k, 1]]
if v0 < v1:
wp.atomic_add(vertex_start_edge_count, v0, 1)
else:
wp.atomic_add(vertex_start_edge_count, v1, 1)
@wp.func
def _find_edge(
needle: int,
values: wp.array(dtype=int),
beg: int,
end: int,
):
for i in range(beg, end):
if values[i] == needle:
return i
return -1
@wp.kernel
def _count_unique_starting_edges_kernel(
vertex_hex_offsets: wp.array(dtype=int),
vertex_hex_indices: wp.array(dtype=int),
hex_vertex_indices: wp.array2d(dtype=int),
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_start_edge_count: wp.array(dtype=int),
edge_ends: wp.array(dtype=int),
):
v = wp.tid()
edge_beg = vertex_start_edge_offsets[v]
hex_beg = vertex_hex_offsets[v]
hex_end = vertex_hex_offsets[v + 1]
edge_cur = edge_beg
for tet in range(hex_beg, hex_end):
t = vertex_hex_indices[tet]
for k in range(12):
v0 = hex_vertex_indices[t, EDGE_VERTEX_INDICES[k, 0]]
v1 = hex_vertex_indices[t, EDGE_VERTEX_INDICES[k, 1]]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
if Hexmesh._find_edge(other_v, edge_ends, edge_beg, edge_cur) == -1:
edge_ends[edge_cur] = other_v
edge_cur += 1
vertex_start_edge_count[v] = edge_cur - edge_beg
@wp.kernel
def _compress_edges_kernel(
vertex_hex_offsets: wp.array(dtype=int),
vertex_hex_indices: wp.array(dtype=int),
hex_vertex_indices: wp.array2d(dtype=int),
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_count: wp.array(dtype=int),
uncompressed_edge_ends: wp.array(dtype=int),
hex_edge_indices: wp.array2d(dtype=int),
):
v = wp.tid()
uncompressed_beg = vertex_start_edge_offsets[v]
unique_beg = vertex_unique_edge_offsets[v]
unique_count = vertex_unique_edge_count[v]
hex_beg = vertex_hex_offsets[v]
hex_end = vertex_hex_offsets[v + 1]
for tet in range(hex_beg, hex_end):
t = vertex_hex_indices[tet]
for k in range(12):
v0 = hex_vertex_indices[t, EDGE_VERTEX_INDICES[k, 0]]
v1 = hex_vertex_indices[t, EDGE_VERTEX_INDICES[k, 1]]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
edge_id = (
Hexmesh._find_edge(
other_v, uncompressed_edge_ends, uncompressed_beg, uncompressed_beg + unique_count
)
- uncompressed_beg
+ unique_beg
)
hex_edge_indices[t][k] = edge_id
| 34,831 | Python | 35.51153 | 124 | 0.569234 |
NVIDIA/warp/warp/fem/geometry/trimesh_2d.py | from typing import Optional
import warp as wp
from warp.fem.cache import (
TemporaryStore,
borrow_temporary,
borrow_temporary_like,
cached_arg_value,
)
from warp.fem.types import (
NULL_ELEMENT_INDEX,
OUTSIDE,
Coords,
ElementIndex,
Sample,
make_free_sample,
)
from .closest_point import project_on_tri_at_origin
from .element import LinearEdge, Triangle
from .geometry import Geometry
@wp.struct
class Trimesh2DCellArg:
tri_vertex_indices: wp.array2d(dtype=int)
positions: wp.array(dtype=wp.vec2)
# for neighbor cell lookup
vertex_tri_offsets: wp.array(dtype=int)
vertex_tri_indices: wp.array(dtype=int)
deformation_gradients: wp.array(dtype=wp.mat22f)
@wp.struct
class Trimesh2DSideArg:
cell_arg: Trimesh2DCellArg
edge_vertex_indices: wp.array(dtype=wp.vec2i)
edge_tri_indices: wp.array(dtype=wp.vec2i)
class Trimesh2D(Geometry):
"""Two-dimensional triangular mesh geometry"""
dimension = 2
def __init__(
self, tri_vertex_indices: wp.array, positions: wp.array, temporary_store: Optional[TemporaryStore] = None
):
"""
Constructs a two-dimensional triangular mesh.
Args:
tri_vertex_indices: warp array of shape (num_tris, 3) containing vertex indices for each tri
positions: warp array of shape (num_vertices, 2) containing 2d position for each vertex
temporary_store: shared pool from which to allocate temporary arrays
"""
self.tri_vertex_indices = tri_vertex_indices
self.positions = positions
self._edge_vertex_indices: wp.array = None
self._edge_tri_indices: wp.array = None
self._vertex_tri_offsets: wp.array = None
self._vertex_tri_indices: wp.array = None
self._build_topology(temporary_store)
self._deformation_gradients: wp.array = None
self._compute_deformation_gradients()
def cell_count(self):
return self.tri_vertex_indices.shape[0]
def vertex_count(self):
return self.positions.shape[0]
def side_count(self):
return self._edge_vertex_indices.shape[0]
def boundary_side_count(self):
return self._boundary_edge_indices.shape[0]
def reference_cell(self) -> Triangle:
return Triangle()
def reference_side(self) -> LinearEdge:
return LinearEdge()
@property
def edge_tri_indices(self) -> wp.array:
return self._edge_tri_indices
@property
def edge_vertex_indices(self) -> wp.array:
return self._edge_vertex_indices
CellArg = Trimesh2DCellArg
SideArg = Trimesh2DSideArg
@wp.struct
class SideIndexArg:
boundary_edge_indices: wp.array(dtype=int)
# Geometry device interface
@cached_arg_value
def cell_arg_value(self, device) -> CellArg:
args = self.CellArg()
args.tri_vertex_indices = self.tri_vertex_indices.to(device)
args.positions = self.positions.to(device)
args.vertex_tri_offsets = self._vertex_tri_offsets.to(device)
args.vertex_tri_indices = self._vertex_tri_indices.to(device)
args.deformation_gradients = self._deformation_gradients.to(device)
return args
@wp.func
def cell_position(args: CellArg, s: Sample):
tri_idx = args.tri_vertex_indices[s.element_index]
return (
s.element_coords[0] * args.positions[tri_idx[0]]
+ s.element_coords[1] * args.positions[tri_idx[1]]
+ s.element_coords[2] * args.positions[tri_idx[2]]
)
@wp.func
def cell_deformation_gradient(args: CellArg, s: Sample):
return args.deformation_gradients[s.element_index]
@wp.func
def cell_inverse_deformation_gradient(args: CellArg, s: Sample):
return wp.inverse(args.deformation_gradients[s.element_index])
@wp.func
def _project_on_tri(args: CellArg, pos: wp.vec2, tri_index: int):
p0 = args.positions[args.tri_vertex_indices[tri_index, 0]]
q = pos - p0
e1 = args.positions[args.tri_vertex_indices[tri_index, 1]] - p0
e2 = args.positions[args.tri_vertex_indices[tri_index, 2]] - p0
dist, coords = project_on_tri_at_origin(q, e1, e2)
return dist, coords
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec2, guess: Sample):
closest_tri = int(NULL_ELEMENT_INDEX)
closest_coords = Coords(OUTSIDE)
closest_dist = float(1.0e8)
for v in range(3):
vtx = args.tri_vertex_indices[guess.element_index, v]
tri_beg = args.vertex_tri_offsets[vtx]
tri_end = args.vertex_tri_offsets[vtx + 1]
for t in range(tri_beg, tri_end):
tri = args.vertex_tri_indices[t]
dist, coords = Trimesh2D._project_on_tri(args, pos, tri)
if dist <= closest_dist:
closest_dist = dist
closest_tri = tri
closest_coords = coords
return make_free_sample(closest_tri, closest_coords)
@wp.func
def cell_measure(args: CellArg, s: Sample):
return 0.5 * wp.abs(wp.determinant(args.deformation_gradients[s.element_index]))
@wp.func
def cell_normal(args: CellArg, s: Sample):
return wp.vec2(0.0)
@cached_arg_value
def side_index_arg_value(self, device) -> SideIndexArg:
args = self.SideIndexArg()
args.boundary_edge_indices = self._boundary_edge_indices.to(device)
return args
@wp.func
def boundary_side_index(args: SideIndexArg, boundary_side_index: int):
"""Boundary side to side index"""
return args.boundary_edge_indices[boundary_side_index]
@cached_arg_value
def side_arg_value(self, device) -> CellArg:
args = self.SideArg()
args.cell_arg = self.cell_arg_value(device)
args.edge_vertex_indices = self._edge_vertex_indices.to(device)
args.edge_tri_indices = self._edge_tri_indices.to(device)
return args
@wp.func
def side_position(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
return (1.0 - s.element_coords[0]) * args.cell_arg.positions[edge_idx[0]] + s.element_coords[
0
] * args.cell_arg.positions[edge_idx[1]]
@wp.func
def side_deformation_gradient(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
v0 = args.cell_arg.positions[edge_idx[0]]
v1 = args.cell_arg.positions[edge_idx[1]]
return v1 - v0
@wp.func
def side_inner_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Trimesh2D.side_inner_cell_index(args, s.element_index)
return wp.inverse(args.cell_arg.deformation_gradients[cell_index])
@wp.func
def side_outer_inverse_deformation_gradient(args: SideArg, s: Sample):
cell_index = Trimesh2D.side_outer_cell_index(args, s.element_index)
return wp.inverse(args.cell_arg.deformation_gradients[cell_index])
@wp.func
def side_measure(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
v0 = args.cell_arg.positions[edge_idx[0]]
v1 = args.cell_arg.positions[edge_idx[1]]
return wp.length(v1 - v0)
@wp.func
def side_measure_ratio(args: SideArg, s: Sample):
inner = Trimesh2D.side_inner_cell_index(args, s.element_index)
outer = Trimesh2D.side_outer_cell_index(args, s.element_index)
return Trimesh2D.side_measure(args, s) / wp.min(
Trimesh2D.cell_measure(args.cell_arg, make_free_sample(inner, Coords())),
Trimesh2D.cell_measure(args.cell_arg, make_free_sample(outer, Coords())),
)
@wp.func
def side_normal(args: SideArg, s: Sample):
edge_idx = args.edge_vertex_indices[s.element_index]
v0 = args.cell_arg.positions[edge_idx[0]]
v1 = args.cell_arg.positions[edge_idx[1]]
e = v1 - v0
return wp.normalize(wp.vec2(-e[1], e[0]))
@wp.func
def side_inner_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.edge_tri_indices[side_index][0]
@wp.func
def side_outer_cell_index(arg: SideArg, side_index: ElementIndex):
return arg.edge_tri_indices[side_index][1]
@wp.func
def edge_to_tri_coords(args: SideArg, side_index: ElementIndex, tri_index: ElementIndex, side_coords: Coords):
edge_vidx = args.edge_vertex_indices[side_index]
tri_vidx = args.cell_arg.tri_vertex_indices[tri_index]
v0 = tri_vidx[0]
v1 = tri_vidx[1]
cx = float(0.0)
cy = float(0.0)
cz = float(0.0)
if edge_vidx[0] == v0:
cx = 1.0 - side_coords[0]
elif edge_vidx[0] == v1:
cy = 1.0 - side_coords[0]
else:
cz = 1.0 - side_coords[0]
if edge_vidx[1] == v0:
cx = side_coords[0]
elif edge_vidx[1] == v1:
cy = side_coords[0]
else:
cz = side_coords[0]
return Coords(cx, cy, cz)
@wp.func
def side_inner_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
inner_cell_index = Trimesh2D.side_inner_cell_index(args, side_index)
return Trimesh2D.edge_to_tri_coords(args, side_index, inner_cell_index, side_coords)
@wp.func
def side_outer_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
outer_cell_index = Trimesh2D.side_outer_cell_index(args, side_index)
return Trimesh2D.edge_to_tri_coords(args, side_index, outer_cell_index, side_coords)
@wp.func
def side_from_cell_coords(
args: SideArg,
side_index: ElementIndex,
tri_index: ElementIndex,
tri_coords: Coords,
):
edge_vidx = args.edge_vertex_indices[side_index]
tri_vidx = args.cell_arg.tri_vertex_indices[tri_index]
start = int(2)
end = int(2)
for k in range(2):
v = tri_vidx[k]
if edge_vidx[1] == v:
end = k
elif edge_vidx[0] == v:
start = k
return wp.select(
tri_coords[start] + tri_coords[end] > 0.999, Coords(OUTSIDE), Coords(tri_coords[end], 0.0, 0.0)
)
@wp.func
def side_to_cell_arg(side_arg: SideArg):
return side_arg.cell_arg
def _build_topology(self, temporary_store: TemporaryStore):
from warp.fem.utils import compress_node_indices, masked_indices
from warp.utils import array_scan
device = self.tri_vertex_indices.device
vertex_tri_offsets, vertex_tri_indices, _, __ = compress_node_indices(
self.vertex_count(), self.tri_vertex_indices, temporary_store=temporary_store
)
self._vertex_tri_offsets = vertex_tri_offsets.detach()
self._vertex_tri_indices = vertex_tri_indices.detach()
vertex_start_edge_count = borrow_temporary(temporary_store, dtype=int, device=device, shape=self.vertex_count())
vertex_start_edge_count.array.zero_()
vertex_start_edge_offsets = borrow_temporary_like(vertex_start_edge_count, temporary_store=temporary_store)
vertex_edge_ends = borrow_temporary(temporary_store, dtype=int, device=device, shape=(3 * self.cell_count()))
vertex_edge_tris = borrow_temporary(temporary_store, dtype=int, device=device, shape=(3 * self.cell_count(), 2))
# Count face edges starting at each vertex
wp.launch(
kernel=Trimesh2D._count_starting_edges_kernel,
device=device,
dim=self.cell_count(),
inputs=[self.tri_vertex_indices, vertex_start_edge_count.array],
)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_start_edge_offsets.array, inclusive=False)
# Count number of unique edges (deduplicate across faces)
vertex_unique_edge_count = vertex_start_edge_count
wp.launch(
kernel=Trimesh2D._count_unique_starting_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
self._vertex_tri_offsets,
self._vertex_tri_indices,
self.tri_vertex_indices,
vertex_start_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
vertex_edge_tris.array,
],
)
vertex_unique_edge_offsets = borrow_temporary_like(vertex_start_edge_offsets, temporary_store=temporary_store)
array_scan(in_array=vertex_start_edge_count.array, out_array=vertex_unique_edge_offsets.array, inclusive=False)
# Get back edge count to host
if device.is_cuda:
edge_count = borrow_temporary(temporary_store, shape=(1,), dtype=int, device="cpu", pinned=True)
# Last vertex will not own any edge, so its count will be zero; just fetching last prefix count is ok
wp.copy(
dest=edge_count.array, src=vertex_unique_edge_offsets.array, src_offset=self.vertex_count() - 1, count=1
)
wp.synchronize_stream(wp.get_stream(device))
edge_count = int(edge_count.array.numpy()[0])
else:
edge_count = int(vertex_unique_edge_offsets.array.numpy()[self.vertex_count() - 1])
self._edge_vertex_indices = wp.empty(shape=(edge_count,), dtype=wp.vec2i, device=device)
self._edge_tri_indices = wp.empty(shape=(edge_count,), dtype=wp.vec2i, device=device)
boundary_mask = borrow_temporary(temporary_store=temporary_store, shape=(edge_count,), dtype=int, device=device)
# Compress edge data
wp.launch(
kernel=Trimesh2D._compress_edges_kernel,
device=device,
dim=self.vertex_count(),
inputs=[
vertex_start_edge_offsets.array,
vertex_unique_edge_offsets.array,
vertex_unique_edge_count.array,
vertex_edge_ends.array,
vertex_edge_tris.array,
self._edge_vertex_indices,
self._edge_tri_indices,
boundary_mask.array,
],
)
vertex_start_edge_offsets.release()
vertex_unique_edge_offsets.release()
vertex_unique_edge_count.release()
vertex_edge_ends.release()
vertex_edge_tris.release()
# Flip normals if necessary
wp.launch(
kernel=Trimesh2D._flip_edge_normals,
device=device,
dim=self.side_count(),
inputs=[self._edge_vertex_indices, self._edge_tri_indices, self.tri_vertex_indices, self.positions],
)
boundary_edge_indices, _ = masked_indices(boundary_mask.array, temporary_store=temporary_store)
self._boundary_edge_indices = boundary_edge_indices.detach()
boundary_mask.release()
def _compute_deformation_gradients(self):
self._deformation_gradients = wp.empty(dtype=wp.mat22f, device=self.positions.device, shape=(self.cell_count()))
wp.launch(
kernel=Trimesh2D._compute_deformation_gradients_kernel,
dim=self._deformation_gradients.shape,
device=self._deformation_gradients.device,
inputs=[self.tri_vertex_indices, self.positions, self._deformation_gradients],
)
@wp.kernel
def _count_starting_edges_kernel(
tri_vertex_indices: wp.array2d(dtype=int), vertex_start_edge_count: wp.array(dtype=int)
):
t = wp.tid()
for k in range(3):
v0 = tri_vertex_indices[t, k]
v1 = tri_vertex_indices[t, (k + 1) % 3]
if v0 < v1:
wp.atomic_add(vertex_start_edge_count, v0, 1)
else:
wp.atomic_add(vertex_start_edge_count, v1, 1)
@wp.func
def _find(
needle: int,
values: wp.array(dtype=int),
beg: int,
end: int,
):
for i in range(beg, end):
if values[i] == needle:
return i
return -1
@wp.kernel
def _count_unique_starting_edges_kernel(
vertex_tri_offsets: wp.array(dtype=int),
vertex_tri_indices: wp.array(dtype=int),
tri_vertex_indices: wp.array2d(dtype=int),
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_start_edge_count: wp.array(dtype=int),
edge_ends: wp.array(dtype=int),
edge_tris: wp.array2d(dtype=int),
):
v = wp.tid()
edge_beg = vertex_start_edge_offsets[v]
tri_beg = vertex_tri_offsets[v]
tri_end = vertex_tri_offsets[v + 1]
edge_cur = edge_beg
for tri in range(tri_beg, tri_end):
t = vertex_tri_indices[tri]
for k in range(3):
v0 = tri_vertex_indices[t, k]
v1 = tri_vertex_indices[t, (k + 1) % 3]
if v == wp.min(v0, v1):
other_v = wp.max(v0, v1)
# Check if other_v has been seen
seen_idx = Trimesh2D._find(other_v, edge_ends, edge_beg, edge_cur)
if seen_idx == -1:
edge_ends[edge_cur] = other_v
edge_tris[edge_cur, 0] = t
edge_tris[edge_cur, 1] = t
edge_cur += 1
else:
edge_tris[seen_idx, 1] = t
vertex_start_edge_count[v] = edge_cur - edge_beg
@wp.kernel
def _compress_edges_kernel(
vertex_start_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_offsets: wp.array(dtype=int),
vertex_unique_edge_count: wp.array(dtype=int),
uncompressed_edge_ends: wp.array(dtype=int),
uncompressed_edge_tris: wp.array2d(dtype=int),
edge_vertex_indices: wp.array(dtype=wp.vec2i),
edge_tri_indices: wp.array(dtype=wp.vec2i),
boundary_mask: wp.array(dtype=int),
):
v = wp.tid()
start_beg = vertex_start_edge_offsets[v]
unique_beg = vertex_unique_edge_offsets[v]
unique_count = vertex_unique_edge_count[v]
for e in range(unique_count):
src_index = start_beg + e
edge_index = unique_beg + e
edge_vertex_indices[edge_index] = wp.vec2i(v, uncompressed_edge_ends[src_index])
t0 = uncompressed_edge_tris[src_index, 0]
t1 = uncompressed_edge_tris[src_index, 1]
edge_tri_indices[edge_index] = wp.vec2i(t0, t1)
if t0 == t1:
boundary_mask[edge_index] = 1
else:
boundary_mask[edge_index] = 0
@wp.kernel
def _flip_edge_normals(
edge_vertex_indices: wp.array(dtype=wp.vec2i),
edge_tri_indices: wp.array(dtype=wp.vec2i),
tri_vertex_indices: wp.array2d(dtype=int),
positions: wp.array(dtype=wp.vec2),
):
e = wp.tid()
tri = edge_tri_indices[e][0]
tri_vidx = tri_vertex_indices[tri]
edge_vidx = edge_vertex_indices[e]
tri_centroid = (positions[tri_vidx[0]] + positions[tri_vidx[1]] + positions[tri_vidx[2]]) / 3.0
v0 = positions[edge_vidx[0]]
v1 = positions[edge_vidx[1]]
edge_center = 0.5 * (v1 + v0)
edge_vec = v1 - v0
edge_normal = wp.vec2(-edge_vec[1], edge_vec[0])
# if edge normal points toward first triangle centroid, flip indices
if wp.dot(tri_centroid - edge_center, edge_normal) > 0.0:
edge_vertex_indices[e] = wp.vec2i(edge_vidx[1], edge_vidx[0])
@wp.kernel
def _compute_deformation_gradients_kernel(
tri_vertex_indices: wp.array2d(dtype=int),
positions: wp.array(dtype=wp.vec2f),
transforms: wp.array(dtype=wp.mat22f),
):
t = wp.tid()
p0 = positions[tri_vertex_indices[t, 0]]
p1 = positions[tri_vertex_indices[t, 1]]
p2 = positions[tri_vertex_indices[t, 2]]
e1 = p1 - p0
e2 = p2 - p0
transforms[t] = wp.mat22(e1, e2)
| 20,109 | Python | 33.792387 | 120 | 0.602715 |
NVIDIA/warp/warp/fem/geometry/__init__.py | from .deformed_geometry import DeformedGeometry
from .element import Element
from .geometry import Geometry
from .grid_2d import Grid2D
from .grid_3d import Grid3D
from .hexmesh import Hexmesh
from .nanogrid import Nanogrid
from .partition import (
ExplicitGeometryPartition,
GeometryPartition,
LinearGeometryPartition,
WholeGeometryPartition,
)
from .quadmesh_2d import Quadmesh2D
from .tetmesh import Tetmesh
from .trimesh_2d import Trimesh2D
| 461 | Python | 26.176469 | 47 | 0.81128 |
NVIDIA/warp/warp/fem/geometry/nanogrid.py | from typing import Optional
import numpy as np
import warp as wp
from warp.fem import cache, utils
from warp.fem.types import NULL_ELEMENT_INDEX, OUTSIDE, Coords, ElementIndex, Sample, make_free_sample
from .element import Cube, Square
from .geometry import Geometry
# Flag used for building edge/face grids to disambiguiate axis within the grid
GRID_AXIS_FLAG = wp.constant(wp.int32(1 << 20))
FACE_AXIS_MASK = wp.constant(wp.uint8((1 << 3) - 1))
FACE_INNER_OFFSET_BIT = wp.constant(wp.uint8(3))
FACE_OUTER_OFFSET_BIT = wp.constant(wp.uint8(4))
_mat32 = wp.mat(shape=(3, 2), dtype=float)
@wp.func
def _add_axis_flag(ijk: wp.vec3i, axis: int):
coord = ijk[axis]
ijk[axis] = wp.select(coord < 0, coord | GRID_AXIS_FLAG, coord & (~GRID_AXIS_FLAG))
return ijk
@wp.func
def _extract_axis_flag(ijk: wp.vec3i):
for ax in range(3):
coord = ijk[ax]
if coord < 0:
if (ijk[ax] & GRID_AXIS_FLAG) == 0:
ijk[ax] = ijk[ax] | GRID_AXIS_FLAG
return ax, ijk
else:
if (ijk[ax] & GRID_AXIS_FLAG) != 0:
ijk[ax] = ijk[ax] & (~GRID_AXIS_FLAG)
return ax, ijk
return -1, ijk
@wp.struct
class NanogridCellArg:
# Utility device functions
cell_grid: wp.uint64
cell_ijk: wp.array(dtype=wp.vec3i)
inverse_transform: wp.mat33
cell_volume: float
@wp.struct
class NanogridSideArg:
# Utility device functions
cell_arg: NanogridCellArg
face_ijk: wp.array(dtype=wp.vec3i)
face_flags: wp.array(dtype=wp.uint8)
face_areas: wp.vec3
class Nanogrid(Geometry):
dimension = 3
def __init__(self, grid: wp.Volume, temporary_store: Optional[cache.TemporaryStore] = None):
self._cell_grid = grid
self._cell_grid_info = grid.get_grid_info()
device = grid.device
cell_count = grid.get_voxel_count()
self._cell_ijk = wp.array(shape=(cell_count,), dtype=wp.vec3i, device=device)
grid.get_voxels(out=self._cell_ijk)
self._node_grid = _build_node_grid(self._cell_ijk, grid, temporary_store)
node_count = self._node_grid.get_voxel_count()
self._node_ijk = wp.array(shape=(node_count,), dtype=wp.vec3i, device=device)
self._node_grid.get_voxels(out=self._node_ijk)
self._face_grid = _build_face_grid(self._cell_ijk, grid, temporary_store)
face_count = self._face_grid.get_voxel_count()
self._face_ijk = wp.array(shape=(face_count,), dtype=wp.vec3i, device=device)
self._face_grid.get_voxels(out=self._face_ijk)
self._face_flags = wp.array(shape=(face_count,), dtype=wp.uint8, device=device)
boundary_face_mask = cache.borrow_temporary(temporary_store, shape=(face_count,), dtype=wp.int32, device=device)
wp.launch(
_build_face_flags,
dim=face_count,
device=device,
inputs=[grid.id, self._face_ijk, self._face_flags, boundary_face_mask.array],
)
boundary_face_indices, _ = utils.masked_indices(boundary_face_mask.array)
self._boundary_face_indices = boundary_face_indices.detach()
self._edge_grid = None
self._edge_ijk = None
def _build_edge_grid(self, temporary_store: Optional[cache.TemporaryStore] = None):
self._edge_grid = _build_edge_grid(self._cell_ijk, self._cell_grid, temporary_store)
edge_count = self._edge_grid.get_voxel_count()
self._edge_ijk = wp.array(shape=(edge_count,), dtype=wp.vec3i, device=self._edge_grid.device)
self._edge_grid.get_voxels(out=self._edge_ijk)
def cell_count(self):
return self._cell_ijk.shape[0]
def vertex_count(self):
return self._node_ijk.shape[0]
def side_count(self):
return self._face_ijk.shape[0]
def edge_count(self):
if self._edge_ijk is None:
self._build_edge_grid()
return self._edge_ijk.shape[0]
def boundary_side_count(self):
return self._boundary_face_indices.shape[0]
def reference_cell(self) -> Cube:
return Cube()
def reference_side(self) -> Square:
return Square()
CellArg = NanogridCellArg
@cache.cached_arg_value
def cell_arg_value(self, device) -> CellArg:
args = self.CellArg()
args.cell_grid = self._cell_grid.id
args.cell_ijk = self._cell_ijk
transform = np.array(self._cell_grid_info.transform_matrix).reshape(3, 3)
args.inverse_transform = wp.mat33f(np.linalg.inv(transform))
args.cell_volume = abs(np.linalg.det(transform))
return args
@wp.func
def cell_position(args: CellArg, s: Sample):
uvw = wp.vec3(args.cell_ijk[s.element_index]) + s.element_coords
return wp.volume_index_to_world(args.cell_grid, uvw)
@wp.func
def cell_deformation_gradient(args: CellArg, s: Sample):
return wp.inverse(args.inverse_transform)
@wp.func
def cell_inverse_deformation_gradient(args: CellArg, s: Sample):
return args.inverse_transform
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec3):
uvw = wp.volume_world_to_index(args.cell_grid, pos)
ijk = wp.vec3i(int(wp.floor(uvw[0])), int(wp.floor(uvw[1])), int(wp.floor(uvw[2])))
element_index = wp.volume_lookup_index(args.cell_grid, ijk[0], ijk[1], ijk[2])
return wp.select(
element_index == -1,
make_free_sample(element_index, uvw - wp.vec3(ijk)),
make_free_sample(NULL_ELEMENT_INDEX, Coords(OUTSIDE)),
)
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec3, guess: Sample):
return Nanogrid.cell_lookup(args, pos)
@wp.func
def cell_measure(args: CellArg, s: Sample):
return args.cell_volume
@wp.func
def cell_normal(args: CellArg, s: Sample):
return wp.vec3(0.0)
SideArg = NanogridSideArg
@cache.cached_arg_value
def side_arg_value(self, device) -> SideArg:
args = self.SideArg()
args.cell_arg = self.cell_arg_value(device)
args.face_ijk = self._face_ijk.to(device)
args.face_flags = self._face_flags.to(device)
transform = np.array(self._cell_grid_info.transform_matrix).reshape(3, 3)
args.face_areas = wp.vec3(
tuple(np.linalg.norm(np.cross(transform[:, k - 2], transform[:, k - 1])) for k in range(3))
)
return args
@wp.struct
class SideIndexArg:
boundary_face_indices: wp.array(dtype=int)
@cache.cached_arg_value
def side_index_arg_value(self, device) -> SideIndexArg:
args = self.SideIndexArg()
args.boundary_face_indices = self._boundary_face_indices.to(device)
return args
@wp.func
def boundary_side_index(args: SideIndexArg, boundary_side_index: int):
return args.boundary_face_indices[boundary_side_index]
@wp.func
def _side_to_cell_coords(axis: int, inner: float, side_coords: Coords):
uvw = wp.vec3()
uvw[axis] = inner
uvw[(axis + 1) % 3] = side_coords[0]
uvw[(axis + 2) % 3] = side_coords[1]
return uvw
@wp.func
def _get_face_axis(flags: wp.uint8):
return wp.int32(flags & FACE_AXIS_MASK)
@wp.func
def _get_face_inner_offset(flags: wp.uint8):
return wp.int32(flags >> FACE_INNER_OFFSET_BIT) & 1
@wp.func
def _get_face_outer_offset(flags: wp.uint8):
return wp.int32(flags >> FACE_OUTER_OFFSET_BIT) & 1
@wp.func
def side_position(args: SideArg, s: Sample):
ijk = args.face_ijk[s.element_index]
axis = Nanogrid._get_face_axis(args.face_flags[s.element_index])
uvw = wp.vec3(ijk) + Nanogrid._side_to_cell_coords(axis, 0.0, s.element_coords)
cell_grid = args.cell_arg.cell_grid
return wp.volume_index_to_world(cell_grid, uvw)
@wp.func
def _face_tangent_vecs(args: SideArg, axis: int, flip: int):
u_axis = utils.unit_element(wp.vec3(), (axis + 1 + flip) % 3)
v_axis = utils.unit_element(wp.vec3(), (axis + 2 - flip) % 3)
cell_grid = args.cell_arg.cell_grid
return wp.volume_index_to_world_dir(cell_grid, u_axis), wp.volume_index_to_world_dir(cell_grid, v_axis)
@wp.func
def side_deformation_gradient(args: SideArg, s: Sample):
flags = args.face_flags[s.element_index]
axis = Nanogrid._get_face_axis(flags)
flip = Nanogrid._get_face_inner_offset(flags)
v1, v2 = Nanogrid._face_tangent_vecs(args, axis, flip)
return _mat32(v1, v2)
@wp.func
def side_inner_inverse_deformation_gradient(args: SideArg, s: Sample):
return Nanogrid.cell_inverse_deformation_gradient(args.cell_arg, s)
@wp.func
def side_outer_inverse_deformation_gradient(args: SideArg, s: Sample):
return Nanogrid.cell_inverse_deformation_gradient(args.cell_arg, s)
@wp.func
def side_measure(args: SideArg, s: Sample):
axis = Nanogrid._get_face_axis(args.face_flags[s.element_index])
return args.face_areas[axis]
@wp.func
def side_measure_ratio(args: SideArg, s: Sample):
axis = Nanogrid._get_face_axis(args.face_flags[s.element_index])
return args.face_areas[axis] / args.cell_arg.cell_volume
@wp.func
def side_normal(args: SideArg, s: Sample):
flags = args.face_flags[s.element_index]
axis = Nanogrid._get_face_axis(flags)
flip = Nanogrid._get_face_inner_offset(flags)
v1, v2 = Nanogrid._face_tangent_vecs(args, axis, flip)
return wp.cross(v1, v2) / args.face_areas[axis]
@wp.func
def side_inner_cell_index(args: SideArg, side_index: ElementIndex):
ijk = args.face_ijk[side_index]
flags = args.face_flags[side_index]
axis = Nanogrid._get_face_axis(flags)
offset = Nanogrid._get_face_inner_offset(flags)
ijk[axis] += offset - 1
cell_grid = args.cell_arg.cell_grid
return wp.volume_lookup_index(cell_grid, ijk[0], ijk[1], ijk[2])
@wp.func
def side_outer_cell_index(args: SideArg, side_index: ElementIndex):
ijk = args.face_ijk[side_index]
flags = args.face_flags[side_index]
axis = Nanogrid._get_face_axis(flags)
offset = Nanogrid._get_face_outer_offset(flags)
ijk[axis] -= offset
cell_grid = args.cell_arg.cell_grid
return wp.volume_lookup_index(cell_grid, ijk[0], ijk[1], ijk[2])
@wp.func
def side_inner_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
flags = args.face_flags[side_index]
axis = Nanogrid._get_face_axis(flags)
offset = float(Nanogrid._get_face_inner_offset(flags))
return Nanogrid._side_to_cell_coords(axis, 1.0 - offset, side_coords)
@wp.func
def side_outer_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
flags = args.face_flags[side_index]
axis = Nanogrid._get_face_axis(flags)
offset = float(Nanogrid._get_face_outer_offset(flags))
return Nanogrid._side_to_cell_coords(axis, offset, side_coords)
@wp.func
def side_from_cell_coords(
args: SideArg,
side_index: ElementIndex,
element_index: ElementIndex,
element_coords: Coords,
):
flags = args.face_flags[side_index]
axis = Nanogrid._get_face_axis(flags)
cell_ijk = args.cell_arg.cell_ijk[element_index]
side_ijk = args.face_ijk[side_index]
on_side = float(side_ijk[axis] - cell_ijk[axis]) == element_coords[axis]
return wp.select(
on_side, Coords(OUTSIDE), Coords(element_coords[(axis + 1) % 3], element_coords[(axis + 2) % 3], 0.0)
)
@wp.func
def side_to_cell_arg(side_arg: SideArg):
return side_arg.cell_arg
@wp.kernel
def _cell_node_indices(
cell_ijk: wp.array(dtype=wp.vec3i),
node_ijk: wp.array2d(dtype=wp.vec3i),
):
cell, n = wp.tid()
node_ijk[cell, n] = cell_ijk[cell] + wp.vec3i((n & 4) >> 2, (n & 2) >> 1, n & 1)
@wp.kernel
def _cell_face_indices(
cell_ijk: wp.array(dtype=wp.vec3i),
node_ijk: wp.array2d(dtype=wp.vec3i),
):
cell = wp.tid()
ijk = cell_ijk[cell]
node_ijk[cell, 0] = _add_axis_flag(ijk, 0)
node_ijk[cell, 1] = _add_axis_flag(ijk, 1)
node_ijk[cell, 2] = _add_axis_flag(ijk, 2)
node_ijk[cell, 3] = _add_axis_flag(ijk + wp.vec3i(1, 0, 0), 0)
node_ijk[cell, 4] = _add_axis_flag(ijk + wp.vec3i(0, 1, 0), 1)
node_ijk[cell, 5] = _add_axis_flag(ijk + wp.vec3i(0, 0, 1), 2)
@wp.kernel
def _cell_edge_indices(
cell_ijk: wp.array(dtype=wp.vec3i),
edge_ijk: wp.array2d(dtype=wp.vec3i),
):
cell = wp.tid()
ijk = cell_ijk[cell]
edge_ijk[cell, 0] = _add_axis_flag(ijk, 0)
edge_ijk[cell, 1] = _add_axis_flag(ijk, 1)
edge_ijk[cell, 2] = _add_axis_flag(ijk, 2)
edge_ijk[cell, 3] = _add_axis_flag(ijk + wp.vec3i(0, 1, 0), 0)
edge_ijk[cell, 4] = _add_axis_flag(ijk + wp.vec3i(0, 0, 1), 1)
edge_ijk[cell, 5] = _add_axis_flag(ijk + wp.vec3i(1, 0, 0), 2)
edge_ijk[cell, 6] = _add_axis_flag(ijk + wp.vec3i(0, 1, 1), 0)
edge_ijk[cell, 7] = _add_axis_flag(ijk + wp.vec3i(1, 0, 1), 1)
edge_ijk[cell, 8] = _add_axis_flag(ijk + wp.vec3i(1, 1, 0), 2)
edge_ijk[cell, 9] = _add_axis_flag(ijk + wp.vec3i(0, 0, 1), 0)
edge_ijk[cell, 10] = _add_axis_flag(ijk + wp.vec3i(1, 0, 0), 1)
edge_ijk[cell, 11] = _add_axis_flag(ijk + wp.vec3i(0, 1, 0), 2)
def _build_node_grid(cell_ijk, grid: wp.Volume, temporary_store: cache.TemporaryStore):
cell_count = cell_ijk.shape[0]
cell_nodes = cache.borrow_temporary(temporary_store, shape=(cell_count, 8), dtype=wp.vec3i, device=cell_ijk.device)
wp.launch(
_cell_node_indices, dim=cell_nodes.array.shape, inputs=[cell_ijk, cell_nodes.array], device=cell_ijk.device
)
node_grid = wp.Volume.allocate_by_voxels(
cell_nodes.array.flatten(), voxel_size=grid.get_voxel_size()[0], device=cell_ijk.device
)
return node_grid
def _build_face_grid(cell_ijk, grid: wp.Volume, temporary_store: cache.TemporaryStore):
cell_count = cell_ijk.shape[0]
cell_faces = cache.borrow_temporary(temporary_store, shape=(cell_count, 6), dtype=wp.vec3i, device=cell_ijk.device)
wp.launch(_cell_face_indices, dim=cell_count, inputs=[cell_ijk, cell_faces.array], device=cell_ijk.device)
face_grid = wp.Volume.allocate_by_voxels(
cell_faces.array.flatten(), voxel_size=grid.get_voxel_size()[0], device=cell_ijk.device
)
return face_grid
def _build_edge_grid(cell_ijk, grid: wp.Volume, temporary_store: cache.TemporaryStore):
cell_count = cell_ijk.shape[0]
cell_edges = cache.borrow_temporary(temporary_store, shape=(cell_count, 12), dtype=wp.vec3i, device=cell_ijk.device)
wp.launch(_cell_edge_indices, dim=cell_count, inputs=[cell_ijk, cell_edges.array], device=cell_ijk.device)
edge_grid = wp.Volume.allocate_by_voxels(
cell_edges.array.flatten(), voxel_size=grid.get_voxel_size()[0], device=cell_ijk.device
)
return edge_grid
@wp.kernel
def _build_face_flags(
cell_grid: wp.uint64,
face_ijk: wp.array(dtype=wp.vec3i),
face_flags: wp.array(dtype=wp.uint8),
boundary_face_mask: wp.array(dtype=int),
):
face = wp.tid()
axis, ijk = _extract_axis_flag(face_ijk[face])
ijk_minus = ijk
ijk_minus[axis] -= 1
plus_cell_index = wp.volume_lookup_index(cell_grid, ijk[0], ijk[1], ijk[2])
minus_cell_index = wp.volume_lookup_index(cell_grid, ijk_minus[0], ijk_minus[1], ijk_minus[2])
plus_boundary = wp.uint8(wp.select(plus_cell_index == -1, 0, 1)) << FACE_OUTER_OFFSET_BIT
minus_boundary = wp.uint8(wp.select(minus_cell_index == -1, 0, 1)) << FACE_INNER_OFFSET_BIT
face_ijk[face] = ijk
face_flags[face] = wp.uint8(axis) | plus_boundary | minus_boundary
boundary_face_mask[face] = wp.select((plus_boundary | minus_boundary) == 0, 1, 0)
| 15,902 | Python | 33.875 | 120 | 0.629229 |
NVIDIA/warp/warp/fem/geometry/deformed_geometry.py | from typing import Any
import warp as wp
from warp.fem import cache
from warp.fem.types import Coords, ElementIndex, Sample, make_free_sample
from .geometry import Geometry
_mat32 = wp.mat(shape=(3, 2), dtype=float)
class DeformedGeometry(Geometry):
def __init__(self, field):
"""Constructs a Deformed Geometry from a displacement field defined over a base geometry"""
from warp.fem.field import DiscreteField
self.field: DiscreteField = field
self.base = self.field.space.geometry
self.dimension = self.base.dimension
if not wp.types.type_is_vector(field.dtype) or wp.types.type_length(field.dtype) != self.dimension:
raise ValueError("Invalid value type for position field")
self.CellArg = self.field.ElementEvalArg
self.field_trace = field.trace()
self.SideArg = self._make_side_arg()
self.SideIndexArg = self.base.SideIndexArg
self.cell_count = self.base.cell_count
self.vertex_count = self.base.vertex_count
self.side_count = self.base.side_count
self.boundary_side_count = self.base.boundary_side_count
self.reference_cell = self.base.reference_cell
self.reference_side = self.base.reference_side
self.side_index_arg_value = self.base.side_index_arg_value
self.cell_position = self._make_cell_position()
self.cell_deformation_gradient = self._make_cell_deformation_gradient()
self.cell_inverse_deformation_gradient = self._make_cell_inverse_deformation_gradient()
self.cell_measure = self._make_cell_measure()
self.boundary_side_index = self.base.boundary_side_index
self.side_to_cell_arg = self._make_side_to_cell_arg()
self.side_position = self._make_side_position()
self.side_deformation_gradient = self._make_side_deformation_gradient()
self.side_inner_cell_index = self._make_side_inner_cell_index()
self.side_outer_cell_index = self._make_side_outer_cell_index()
self.side_inner_cell_coords = self._make_side_inner_cell_coords()
self.side_outer_cell_coords = self._make_side_outer_cell_coords()
self.side_from_cell_coords = self._make_side_from_cell_coords()
self.side_inner_inverse_deformation_gradient = self._make_side_inner_inverse_deformation_gradient()
self.side_outer_inverse_deformation_gradient = self._make_side_outer_inverse_deformation_gradient()
self.side_measure = self._make_side_measure()
self.side_measure_ratio = self._make_side_measure_ratio()
self.side_normal = self._make_side_normal()
@property
def name(self):
return f"DefGeo_{self.field.name}"
# Geometry device interface
@cache.cached_arg_value
def cell_arg_value(self, device) -> "DeformedGeometry.CellArg":
args = self.CellArg()
args.elt_arg = self.base.cell_arg_value(device)
args.eval_arg = self.field.eval_arg_value(device)
return args
def _make_cell_position(self):
@cache.dynamic_func(suffix=self.name)
def cell_position(cell_arg: self.CellArg, s: Sample):
return self.field.eval_inner(cell_arg, s) + self.base.cell_position(cell_arg.elt_arg, s)
return cell_position
def _make_cell_deformation_gradient(self):
@cache.dynamic_func(suffix=self.name)
def cell_deformation_gradient(cell_arg: self.CellArg, s: Sample):
return self.field.eval_reference_grad_inner(cell_arg, s) + self.base.cell_deformation_gradient(
cell_arg.elt_arg, s
)
return cell_deformation_gradient
def _make_cell_inverse_deformation_gradient(self):
@cache.dynamic_func(suffix=self.name)
def cell_inverse_deformation_gradient(cell_arg: self.CellArg, s: Sample):
return wp.inverse(self.cell_deformation_gradient(cell_arg, s))
return cell_inverse_deformation_gradient
def _make_cell_measure(self):
REF_MEASURE = wp.constant(self.reference_cell().measure())
@cache.dynamic_func(suffix=self.name)
def cell_measure(args: self.CellArg, s: Sample):
return wp.abs(wp.determinant(self.cell_deformation_gradient(args, s))) * REF_MEASURE
return cell_measure
@wp.func
def cell_normal(args: Any, s: Sample):
return wp.vec2(0.0)
def _make_side_arg(self):
@cache.dynamic_struct(suffix=self.name)
class SideArg:
base_arg: self.base.SideArg
trace_arg: self.field_trace.EvalArg
field_arg: self.field.EvalArg
return SideArg
@cache.cached_arg_value
def side_arg_value(self, device) -> "DeformedGeometry.SideArg":
args = self.SideArg()
args.base_arg = self.base.side_arg_value(device)
args.field_arg = self.field.eval_arg_value(device)
args.trace_arg = self.field_trace.eval_arg_value(device)
return args
def _make_side_position(self):
@cache.dynamic_func(suffix=self.name)
def side_position(args: self.SideArg, s: Sample):
trace_arg = self.field_trace.ElementEvalArg(args.base_arg, args.trace_arg)
return self.field_trace.eval_inner(trace_arg, s) + self.base.side_position(args.base_arg, s)
return side_position
def _make_side_deformation_gradient(self):
@cache.dynamic_func(suffix=self.name)
def side_deformation_gradient(args: self.SideArg, s: Sample):
base_def_grad = self.base.side_deformation_gradient(args.base_arg, s)
trace_arg = self.field_trace.ElementEvalArg(args.base_arg, args.trace_arg)
Du = self.field_trace.eval_grad_inner(trace_arg, s)
return base_def_grad + Du * base_def_grad
return side_deformation_gradient
def _make_side_inner_inverse_deformation_gradient(self):
@cache.dynamic_func(suffix=self.name)
def side_inner_inverse_deformation_gradient(args: self.SideArg, s: Sample):
cell_index = self.side_inner_cell_index(args, s.element_index)
cell_coords = self.side_inner_cell_coords(args, s.element_index, s.element_coords)
cell_arg = self.side_to_cell_arg(args)
return self.cell_inverse_deformation_gradient(cell_arg, make_free_sample(cell_index, cell_coords))
def _make_side_outer_inverse_deformation_gradient(self):
@cache.dynamic_func(suffix=self.name)
def side_outer_inverse_deformation_gradient(args: self.SideArg, s: Sample):
cell_index = self.side_outer_cell_index(args, s.element_index)
cell_coords = self.side_outer_cell_coords(args, s.element_index, s.element_coords)
cell_arg = self.side_to_cell_arg(args)
return self.cell_inverse_deformation_gradient(cell_arg, make_free_sample(cell_index, cell_coords))
@wp.func
def _side_measure(F: wp.vec2):
return wp.length(F)
@wp.func
def _side_measure(F: _mat32):
Fcross = wp.vec3(
F[1, 0] * F[2, 1] - F[2, 0] * F[1, 1],
F[2, 0] * F[0, 1] - F[0, 0] * F[2, 1],
F[0, 0] * F[1, 1] - F[1, 0] * F[0, 1],
)
return wp.length(Fcross)
@wp.func
def _side_normal(F: wp.vec2):
return wp.normalize(wp.vec2(-F[1], F[0]))
@wp.func
def _side_normal(F: _mat32):
Fcross = wp.vec3(
F[1, 0] * F[2, 1] - F[2, 0] * F[1, 1],
F[2, 0] * F[0, 1] - F[0, 0] * F[2, 1],
F[0, 0] * F[1, 1] - F[1, 0] * F[0, 1],
)
return wp.normalize(Fcross)
def _make_side_measure(self):
REF_MEASURE = wp.constant(self.reference_side().measure())
@cache.dynamic_func(suffix=self.name)
def side_measure(args: self.SideArg, s: Sample):
F = self.side_deformation_gradient(args, s)
return DeformedGeometry._side_measure(F) * REF_MEASURE
return side_measure
def _make_side_measure_ratio(self):
@cache.dynamic_func(suffix=self.name)
def side_measure_ratio(args: self.SideArg, s: Sample):
inner = self.side_inner_cell_index(args, s.element_index)
outer = self.side_outer_cell_index(args, s.element_index)
inner_coords = self.side_inner_cell_coords(args, s.element_index, s.element_coords)
outer_coords = self.side_outer_cell_coords(args, s.element_index, s.element_coords)
cell_arg = self.side_to_cell_arg(args)
return self.side_measure(args, s) / wp.min(
self.cell_measure(cell_arg, make_free_sample(inner, inner_coords)),
self.cell_measure(cell_arg, make_free_sample(outer, outer_coords)),
)
return side_measure_ratio
def _make_side_normal(self):
@cache.dynamic_func(suffix=self.name)
def side_normal(args: self.SideArg, s: Sample):
F = self.side_deformation_gradient(args, s)
return DeformedGeometry._side_normal(F)
return side_normal
def _make_side_inner_cell_index(self):
@cache.dynamic_func(suffix=self.name)
def side_inner_cell_index(args: self.SideArg, side_index: ElementIndex):
return self.base.side_inner_cell_index(args.base_arg, side_index)
return side_inner_cell_index
def _make_side_outer_cell_index(self):
@cache.dynamic_func(suffix=self.name)
def side_outer_cell_index(args: self.SideArg, side_index: ElementIndex):
return self.base.side_outer_cell_index(args.base_arg, side_index)
return side_outer_cell_index
def _make_side_inner_cell_coords(self):
@cache.dynamic_func(suffix=self.name)
def side_inner_cell_coords(args: self.SideArg, side_index: ElementIndex, side_coords: Coords):
return self.base.side_inner_cell_coords(args.base_arg, side_index, side_coords)
return side_inner_cell_coords
def _make_side_outer_cell_coords(self):
@cache.dynamic_func(suffix=self.name)
def side_outer_cell_coords(args: self.SideArg, side_index: ElementIndex, side_coords: Coords):
return self.base.side_outer_cell_coords(args.base_arg, side_index, side_coords)
return side_outer_cell_coords
def _make_side_from_cell_coords(self):
@cache.dynamic_func(suffix=self.name)
def side_from_cell_coords(
args: self.SideArg,
side_index: ElementIndex,
cell_index: ElementIndex,
cell_coords: Coords,
):
return self.base.side_from_cell_coords(args.base_arg, side_index, cell_index, cell_coords)
return side_from_cell_coords
def _make_side_to_cell_arg(self):
@cache.dynamic_func(suffix=self.name)
def side_to_cell_arg(side_arg: self.SideArg):
return self.CellArg(self.base.side_to_cell_arg(side_arg.base_arg), side_arg.field_arg)
return side_to_cell_arg
| 10,929 | Python | 39.332103 | 110 | 0.639949 |
NVIDIA/warp/warp/fem/geometry/grid_2d.py | from typing import Optional
import warp as wp
from warp.fem.cache import cached_arg_value
from warp.fem.types import OUTSIDE, Coords, ElementIndex, Sample, make_free_sample
from .element import LinearEdge, Square
from .geometry import Geometry
@wp.struct
class Grid2DCellArg:
res: wp.vec2i
cell_size: wp.vec2
origin: wp.vec2
class Grid2D(Geometry):
"""Two-dimensional regular grid geometry"""
dimension = 2
Permutation = wp.types.matrix(shape=(2, 2), dtype=int)
ROTATION = wp.constant(Permutation(0, 1, 1, 0))
def __init__(self, res: wp.vec2i, bounds_lo: Optional[wp.vec2] = None, bounds_hi: Optional[wp.vec2] = None):
"""Constructs a dense 2D grid
Args:
res: Resolution of the grid along each dimension
bounds_lo: Position of the lower bound of the axis-aligned grid
bounds_up: Position of the upper bound of the axis-aligned grid
"""
if bounds_lo is None:
bounds_lo = wp.vec2(0.0)
if bounds_hi is None:
bounds_hi = wp.vec2(1.0)
self.bounds_lo = bounds_lo
self.bounds_hi = bounds_hi
self._res = res
@property
def extents(self) -> wp.vec3:
# Avoid using native sub due to higher over of calling builtins from Python
return wp.vec2(
self.bounds_hi[0] - self.bounds_lo[0],
self.bounds_hi[1] - self.bounds_lo[1],
)
@property
def cell_size(self) -> wp.vec2:
ex = self.extents
return wp.vec2(
ex[0] / self.res[0],
ex[1] / self.res[1],
)
def cell_count(self):
return self.res[0] * self.res[1]
def vertex_count(self):
return (self.res[0] + 1) * (self.res[1] + 1)
def side_count(self):
return 2 * self.cell_count() + self.res[0] + self.res[1]
def boundary_side_count(self):
return 2 * (self.res[0] + self.res[1])
def reference_cell(self) -> Square:
return Square()
def reference_side(self) -> LinearEdge:
return LinearEdge()
@property
def res(self):
return self._res
@property
def origin(self):
return self.bounds_lo
@property
def strides(self):
return wp.vec2i(self.res[1], 1)
# Utility device functions
CellArg = Grid2DCellArg
Cell = wp.vec2i
@wp.func
def _to_2d_index(x_stride: int, index: int):
x = index // x_stride
y = index - x_stride * x
return wp.vec2i(x, y)
@wp.func
def _from_2d_index(x_stride: int, index: wp.vec2i):
return x_stride * index[0] + index[1]
@wp.func
def cell_index(res: wp.vec2i, cell: Cell):
return Grid2D._from_2d_index(res[1], cell)
@wp.func
def get_cell(res: wp.vec2i, cell_index: ElementIndex):
return Grid2D._to_2d_index(res[1], cell_index)
@wp.struct
class Side:
axis: int # normal; 0: horizontal, 1: vertical
origin: wp.vec2i # index of vertex at corner (0,0)
@wp.struct
class SideArg:
cell_count: int
axis_offsets: wp.vec2i
cell_arg: Grid2DCellArg
SideIndexArg = SideArg
@wp.func
def _rotate(axis: int, vec: wp.vec2i):
return wp.vec2i(
vec[Grid2D.ROTATION[axis, 0]],
vec[Grid2D.ROTATION[axis, 1]],
)
@wp.func
def _rotate(axis: int, vec: wp.vec2):
return wp.vec2(
vec[Grid2D.ROTATION[axis, 0]],
vec[Grid2D.ROTATION[axis, 1]],
)
@wp.func
def side_index(arg: SideArg, side: Side):
alt_axis = Grid2D.ROTATION[side.axis, 0]
if side.origin[0] == arg.cell_arg.res[alt_axis]:
# Upper-boundary side
longitude = side.origin[1]
return 2 * arg.cell_count + arg.axis_offsets[side.axis] + longitude
cell_index = Grid2D.cell_index(arg.cell_arg.res, Grid2D._rotate(side.axis, side.origin))
return side.axis * arg.cell_count + cell_index
@wp.func
def get_side(arg: SideArg, side_index: ElementIndex):
if side_index < 2 * arg.cell_count:
axis = side_index // arg.cell_count
cell_index = side_index - axis * arg.cell_count
origin = Grid2D._rotate(axis, Grid2D.get_cell(arg.cell_arg.res, cell_index))
return Grid2D.Side(axis, origin)
axis_side_index = side_index - 2 * arg.cell_count
if axis_side_index < arg.axis_offsets[1]:
axis = 0
else:
axis = 1
altitude = arg.cell_arg.res[Grid2D.ROTATION[axis, 0]]
longitude = axis_side_index - arg.axis_offsets[axis]
origin_loc = wp.vec2i(altitude, longitude)
return Grid2D.Side(axis, origin_loc)
# Geometry device interface
@cached_arg_value
def cell_arg_value(self, device) -> CellArg:
args = self.CellArg()
args.res = self.res
args.cell_size = self.cell_size
args.origin = self.bounds_lo
return args
@wp.func
def cell_position(args: CellArg, s: Sample):
cell = Grid2D.get_cell(args.res, s.element_index)
return (
wp.vec2(
(float(cell[0]) + s.element_coords[0]) * args.cell_size[0],
(float(cell[1]) + s.element_coords[1]) * args.cell_size[1],
)
+ args.origin
)
@wp.func
def cell_deformation_gradient(args: CellArg, s: Sample):
return wp.diag(args.cell_size)
@wp.func
def cell_inverse_deformation_gradient(args: CellArg, s: Sample):
return wp.diag(wp.cw_div(wp.vec2(1.0), args.cell_size))
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec2):
loc_pos = wp.cw_div(pos - args.origin, args.cell_size)
x = wp.clamp(loc_pos[0], 0.0, float(args.res[0]))
y = wp.clamp(loc_pos[1], 0.0, float(args.res[1]))
x_cell = wp.min(wp.floor(x), float(args.res[0]) - 1.0)
y_cell = wp.min(wp.floor(y), float(args.res[1]) - 1.0)
coords = Coords(x - x_cell, y - y_cell, 0.0)
cell_index = Grid2D.cell_index(args.res, Grid2D.Cell(int(x_cell), int(y_cell)))
return make_free_sample(cell_index, coords)
@wp.func
def cell_lookup(args: CellArg, pos: wp.vec2, guess: Sample):
return Grid2D.cell_lookup(args, pos)
@wp.func
def cell_measure(args: CellArg, s: Sample):
return args.cell_size[0] * args.cell_size[1]
@wp.func
def cell_normal(args: CellArg, s: Sample):
return wp.vec2(0.0)
@cached_arg_value
def side_arg_value(self, device) -> SideArg:
args = self.SideArg()
args.axis_offsets = wp.vec2i(
0,
self.res[0],
)
args.cell_count = self.cell_count()
args.cell_arg = self.cell_arg_value(device)
return args
def side_index_arg_value(self, device) -> SideIndexArg:
return self.side_arg_value(device)
@wp.func
def boundary_side_index(args: SideArg, boundary_side_index: int):
"""Boundary side to side index"""
axis_side_index = boundary_side_index // 2
border = boundary_side_index - 2 * axis_side_index
if axis_side_index < args.axis_offsets[1]:
axis = 0
else:
axis = 1
longitude = axis_side_index - args.axis_offsets[axis]
altitude = border * args.cell_arg.res[axis]
side = Grid2D.Side(axis, wp.vec2i(altitude, longitude))
return Grid2D.side_index(args, side)
@wp.func
def side_position(args: SideArg, s: Sample):
side = Grid2D.get_side(args, s.element_index)
coord = wp.select((side.origin[0] == 0) == (side.axis == 0), 1.0 - s.element_coords[0], s.element_coords[0])
local_pos = wp.vec2(
float(side.origin[0]),
float(side.origin[1]) + coord,
)
pos = args.cell_arg.origin + wp.cw_mul(Grid2D._rotate(side.axis, local_pos), args.cell_arg.cell_size)
return pos
@wp.func
def side_deformation_gradient(args: SideArg, s: Sample):
side = Grid2D.get_side(args, s.element_index)
sign = wp.select((side.origin[0] == 0) == (side.axis == 0), -1.0, 1.0)
return wp.cw_mul(Grid2D._rotate(side.axis, wp.vec2(0.0, sign)), args.cell_arg.cell_size)
@wp.func
def side_inner_inverse_deformation_gradient(args: SideArg, s: Sample):
return Grid2D.cell_inverse_deformation_gradient(args.cell_arg, s)
@wp.func
def side_outer_inverse_deformation_gradient(args: SideArg, s: Sample):
return Grid2D.cell_inverse_deformation_gradient(args.cell_arg, s)
@wp.func
def side_measure(args: SideArg, s: Sample):
side = Grid2D.get_side(args, s.element_index)
long_axis = Grid2D.ROTATION[side.axis, 1]
return args.cell_arg.cell_size[long_axis]
@wp.func
def side_measure_ratio(args: SideArg, s: Sample):
side = Grid2D.get_side(args, s.element_index)
alt_axis = Grid2D.ROTATION[side.axis, 0]
return 1.0 / args.cell_arg.cell_size[alt_axis]
@wp.func
def side_normal(args: SideArg, s: Sample):
side = Grid2D.get_side(args, s.element_index)
sign = wp.select(side.origin[0] == 0, 1.0, -1.0)
local_n = wp.vec2(sign, 0.0)
return Grid2D._rotate(side.axis, local_n)
@wp.func
def side_inner_cell_index(arg: SideArg, side_index: ElementIndex):
side = Grid2D.get_side(arg, side_index)
inner_alt = wp.select(side.origin[0] == 0, side.origin[0] - 1, 0)
inner_origin = wp.vec2i(inner_alt, side.origin[1])
cell = Grid2D._rotate(side.axis, inner_origin)
return Grid2D.cell_index(arg.cell_arg.res, cell)
@wp.func
def side_outer_cell_index(arg: SideArg, side_index: ElementIndex):
side = Grid2D.get_side(arg, side_index)
alt_axis = Grid2D.ROTATION[side.axis, 0]
outer_alt = wp.select(
side.origin[0] == arg.cell_arg.res[alt_axis], side.origin[0], arg.cell_arg.res[alt_axis] - 1
)
outer_origin = wp.vec2i(outer_alt, side.origin[1])
cell = Grid2D._rotate(side.axis, outer_origin)
return Grid2D.cell_index(arg.cell_arg.res, cell)
@wp.func
def side_inner_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
side = Grid2D.get_side(args, side_index)
inner_alt = wp.select(side.origin[0] == 0, 1.0, 0.0)
side_coord = wp.select((side.origin[0] == 0) == (side.axis == 0), 1.0 - side_coords[0], side_coords[0])
coords = Grid2D._rotate(side.axis, wp.vec2(inner_alt, side_coord))
return Coords(coords[0], coords[1], 0.0)
@wp.func
def side_outer_cell_coords(args: SideArg, side_index: ElementIndex, side_coords: Coords):
side = Grid2D.get_side(args, side_index)
alt_axis = Grid2D.ROTATION[side.axis, 0]
outer_alt = wp.select(side.origin[0] == args.cell_arg.res[alt_axis], 0.0, 1.0)
side_coord = wp.select((side.origin[0] == 0) == (side.axis == 0), 1.0 - side_coords[0], side_coords[0])
coords = Grid2D._rotate(side.axis, wp.vec2(outer_alt, side_coord))
return Coords(coords[0], coords[1], 0.0)
@wp.func
def side_from_cell_coords(
args: SideArg,
side_index: ElementIndex,
element_index: ElementIndex,
element_coords: Coords,
):
side = Grid2D.get_side(args, side_index)
cell = Grid2D.get_cell(args.cell_arg.res, element_index)
if float(side.origin[0] - cell[side.axis]) == element_coords[side.axis]:
long_axis = Grid2D.ROTATION[side.axis, 1]
axis_coord = element_coords[long_axis]
side_coord = wp.select((side.origin[0] == 0) == (side.axis == 0), 1.0 - axis_coord, axis_coord)
return Coords(side_coord, 0.0, 0.0)
return Coords(OUTSIDE)
@wp.func
def side_to_cell_arg(side_arg: SideArg):
return side_arg.cell_arg
| 11,950 | Python | 30.367454 | 116 | 0.591799 |
NVIDIA/warp/warp/fem/geometry/element.py | from typing import List, Tuple
from warp.fem.polynomial import Polynomial, quadrature_1d
from warp.fem.types import Coords
class Element:
def measure() -> float:
"""Measure (area, volume, ...) of the reference element"""
raise NotImplementedError
@staticmethod
def instantiate_quadrature(order: int, family: Polynomial) -> Tuple[List[Coords], List[float]]:
"""Returns a quadrature of a given order for a prototypical element"""
raise NotImplementedError
def center(self) -> Tuple[float]:
coords, _ = self.instantiate_quadrature(order=0, family=None)
return coords[0]
def _point_count_from_order(order: int, family: Polynomial):
if family == Polynomial.GAUSS_LEGENDRE:
point_count = max(1, order // 2 + 1)
elif family == Polynomial.LOBATTO_GAUSS_LEGENDRE:
point_count = max(2, order // 2 + 2)
elif family == Polynomial.EQUISPACED_CLOSED:
point_count = max(2, 2 * (order // 2) + 1)
elif family == Polynomial.EQUISPACED_OPEN:
point_count = max(1, 2 * (order // 2) + 1)
return point_count
class Cube(Element):
@staticmethod
def measure() -> float:
return 1.0
@staticmethod
def instantiate_quadrature(order: int, family: Polynomial):
if family is None:
family = Polynomial.GAUSS_LEGENDRE
point_count = _point_count_from_order(order=order, family=family)
gauss_1d, weights_1d = quadrature_1d(point_count=point_count, family=family)
coords = [Coords(x, y, z) for x in gauss_1d for y in gauss_1d for z in gauss_1d]
weights = [wx * wy * wz for wx in weights_1d for wy in weights_1d for wz in weights_1d]
return coords, weights
class Square(Element):
@staticmethod
def measure() -> float:
return 1.0
@staticmethod
def instantiate_quadrature(order: int, family: Polynomial):
if family is None:
family = Polynomial.GAUSS_LEGENDRE
point_count = _point_count_from_order(order=order, family=family)
gauss_1d, weights_1d = quadrature_1d(point_count=point_count, family=family)
coords = [Coords(x, y, 0.0) for x in gauss_1d for y in gauss_1d]
weights = [wx * wy for wx in weights_1d for wy in weights_1d]
return coords, weights
class LinearEdge(Element):
@staticmethod
def measure() -> float:
return 1.0
@staticmethod
def instantiate_quadrature(order: int, family: Polynomial):
if family is None:
family = Polynomial.GAUSS_LEGENDRE
point_count = _point_count_from_order(order=order, family=family)
gauss_1d, weights_1d = quadrature_1d(point_count=point_count, family=family)
coords = [Coords(x, 0.0, 0.0) for x in gauss_1d]
return coords, weights_1d
class Triangle(Element):
@staticmethod
def measure() -> float:
return 0.5
@staticmethod
def instantiate_quadrature(order: int, family: Polynomial):
if family is not None:
# Duffy transformation from square to triangle
point_count = _point_count_from_order(order=order + 1, family=family)
gauss_1d, weights_1d = quadrature_1d(point_count=point_count, family=family)
coords = [Coords(1.0 - x - y + x * y, x, y * (1.0 - x)) for x in gauss_1d for y in gauss_1d]
# Scale weight by 2.0 so that they sum up to 1
weights = [2.0 * wx * (1.0 - x) * wy for x, wx in zip(gauss_1d, weights_1d) for wy in weights_1d]
return coords, weights
if order <= 1:
weights = [1.0]
coords = [Coords(1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0)]
elif order <= 2:
weights = [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]
coords = [
Coords(2.0 / 3.0, 1.0 / 6.0, 1.0 / 6.0),
Coords(1.0 / 6.0, 2.0 / 3.0, 1.0 / 6.0),
Coords(1.0 / 6.0, 1.0 / 6.0, 2.0 / 3.0),
]
elif order <= 3:
# Hillion 1977,
# "Numerical Integration on a Triangle"
weights = [
3.18041381743977225049491153185954e-01,
3.18041381743977225049491153185954e-01,
1.81958618256022719439357615556219e-01,
1.81958618256022719439357615556219e-01,
]
coords = [
Coords(
6.66390246014701426169324349757517e-01,
1.78558728263616461884311092944699e-01,
1.55051025721682111946364557297784e-01,
),
Coords(
1.78558728263616461884311092944699e-01,
6.66390246014701426169324349757517e-01,
1.55051025721682056435213326039957e-01,
),
Coords(
2.80019915499074012465996474929852e-01,
7.50311102226081383381739442484104e-02,
6.44948974278317876951405196450651e-01,
),
Coords(
7.50311102226081383381739442484104e-02,
2.80019915499074012465996474929852e-01,
6.44948974278317876951405196450651e-01,
),
]
elif order <= 4:
# Witherden and Vincent 2015,
# "On the identification of symmetric quadrature rules for finite element methods"
# https://doi.org/10.1016/j.camwa.2015.03.017
weights = [
2.23381589678011471811203136894619e-01,
2.23381589678011471811203136894619e-01,
2.23381589678011471811203136894619e-01,
1.09951743655321870773988734981685e-01,
1.09951743655321870773988734981685e-01,
1.09951743655321870773988734981685e-01,
]
coords = [
Coords(
4.45948490915964890213274429697776e-01,
4.45948490915964890213274429697776e-01,
1.08103018168070219573451140604448e-01,
),
Coords(
4.45948490915964890213274429697776e-01,
1.08103018168070219573451140604448e-01,
4.45948490915964890213274429697776e-01,
),
Coords(
1.08103018168070219573451140604448e-01,
4.45948490915964890213274429697776e-01,
4.45948490915964890213274429697776e-01,
),
Coords(
9.15762135097707430375635340169538e-02,
9.15762135097707430375635340169538e-02,
8.16847572980458513924872931966092e-01,
),
Coords(
9.15762135097707430375635340169538e-02,
8.16847572980458513924872931966092e-01,
9.15762135097707430375635340169538e-02,
),
Coords(
8.16847572980458513924872931966092e-01,
9.15762135097707430375635340169538e-02,
9.15762135097707430375635340169538e-02,
),
]
elif order <= 5:
weights = [
2.25000000000000005551115123125783e-01,
1.25939180544827139529573400977824e-01,
1.25939180544827139529573400977824e-01,
1.25939180544827139529573400977824e-01,
1.32394152788506191953388224646915e-01,
1.32394152788506191953388224646915e-01,
1.32394152788506191953388224646915e-01,
]
coords = [
Coords(
3.33333333333333314829616256247391e-01,
3.33333333333333314829616256247391e-01,
3.33333333333333314829616256247391e-01,
),
Coords(
1.01286507323456342888334802410100e-01,
1.01286507323456342888334802410100e-01,
7.97426985353087314223330395179801e-01,
),
Coords(
1.01286507323456342888334802410100e-01,
7.97426985353087314223330395179801e-01,
1.01286507323456342888334802410100e-01,
),
Coords(
7.97426985353087314223330395179801e-01,
1.01286507323456342888334802410100e-01,
1.01286507323456342888334802410100e-01,
),
Coords(
4.70142064105115109473587153843255e-01,
4.70142064105115109473587153843255e-01,
5.97158717897697810528256923134904e-02,
),
Coords(
4.70142064105115109473587153843255e-01,
5.97158717897697810528256923134904e-02,
4.70142064105115109473587153843255e-01,
),
Coords(
5.97158717897697810528256923134904e-02,
4.70142064105115109473587153843255e-01,
4.70142064105115109473587153843255e-01,
),
]
elif order <= 6:
weights = [
5.08449063702068326797700592578622e-02,
5.08449063702068326797700592578622e-02,
5.08449063702068326797700592578622e-02,
1.16786275726379396022736045779311e-01,
1.16786275726379396022736045779311e-01,
1.16786275726379396022736045779311e-01,
8.28510756183735846969184990484791e-02,
8.28510756183735846969184990484791e-02,
8.28510756183735846969184990484791e-02,
8.28510756183735846969184990484791e-02,
8.28510756183735846969184990484791e-02,
8.28510756183735846969184990484791e-02,
]
coords = [
Coords(
6.30890144915022266225435032538371e-02,
6.30890144915022266225435032538371e-02,
8.73821971016995546754912993492326e-01,
),
Coords(
6.30890144915022266225435032538371e-02,
8.73821971016995546754912993492326e-01,
6.30890144915022266225435032538371e-02,
),
Coords(
8.73821971016995546754912993492326e-01,
6.30890144915022266225435032538371e-02,
6.30890144915022266225435032538371e-02,
),
Coords(
2.49286745170910428726074314909056e-01,
2.49286745170910428726074314909056e-01,
5.01426509658179142547851370181888e-01,
),
Coords(
2.49286745170910428726074314909056e-01,
5.01426509658179142547851370181888e-01,
2.49286745170910428726074314909056e-01,
),
Coords(
5.01426509658179142547851370181888e-01,
2.49286745170910428726074314909056e-01,
2.49286745170910428726074314909056e-01,
),
Coords(
5.31450498448169383891581674106419e-02,
3.10352451033784393352732422499685e-01,
6.36502499121398668258109410089673e-01,
),
Coords(
5.31450498448169383891581674106419e-02,
6.36502499121398668258109410089673e-01,
3.10352451033784393352732422499685e-01,
),
Coords(
3.10352451033784393352732422499685e-01,
5.31450498448169383891581674106419e-02,
6.36502499121398668258109410089673e-01,
),
Coords(
3.10352451033784393352732422499685e-01,
6.36502499121398668258109410089673e-01,
5.31450498448169383891581674106419e-02,
),
Coords(
6.36502499121398668258109410089673e-01,
5.31450498448169383891581674106419e-02,
3.10352451033784393352732422499685e-01,
),
Coords(
6.36502499121398668258109410089673e-01,
3.10352451033784393352732422499685e-01,
5.31450498448169383891581674106419e-02,
),
]
else:
# Order 8
weights = [
1.44315607677787172136163462710101e-01,
9.50916342672846193195823616406415e-02,
9.50916342672846193195823616406415e-02,
9.50916342672846193195823616406415e-02,
1.03217370534718244634575512463925e-01,
1.03217370534718244634575512463925e-01,
1.03217370534718244634575512463925e-01,
3.24584976231980792960030157701112e-02,
3.24584976231980792960030157701112e-02,
3.24584976231980792960030157701112e-02,
2.72303141744349927466650740370824e-02,
2.72303141744349927466650740370824e-02,
2.72303141744349927466650740370824e-02,
2.72303141744349927466650740370824e-02,
2.72303141744349927466650740370824e-02,
2.72303141744349927466650740370824e-02,
]
coords = [
Coords(
3.33333333333333314829616256247391e-01,
3.33333333333333314829616256247391e-01,
3.33333333333333314829616256247391e-01,
),
Coords(
4.59292588292723125142913431773195e-01,
4.59292588292723125142913431773195e-01,
8.14148234145537497141731364536099e-02,
),
Coords(
4.59292588292723125142913431773195e-01,
8.14148234145537497141731364536099e-02,
4.59292588292723125142913431773195e-01,
),
Coords(
8.14148234145537497141731364536099e-02,
4.59292588292723125142913431773195e-01,
4.59292588292723125142913431773195e-01,
),
Coords(
1.70569307751760212976677166807349e-01,
1.70569307751760212976677166807349e-01,
6.58861384496479574046645666385302e-01,
),
Coords(
1.70569307751760212976677166807349e-01,
6.58861384496479574046645666385302e-01,
1.70569307751760212976677166807349e-01,
),
Coords(
6.58861384496479574046645666385302e-01,
1.70569307751760212976677166807349e-01,
1.70569307751760212976677166807349e-01,
),
Coords(
5.05472283170309566457945038564503e-02,
5.05472283170309566457945038564503e-02,
8.98905543365938086708410992287099e-01,
),
Coords(
5.05472283170309566457945038564503e-02,
8.98905543365938086708410992287099e-01,
5.05472283170309566457945038564503e-02,
),
Coords(
8.98905543365938086708410992287099e-01,
5.05472283170309566457945038564503e-02,
5.05472283170309566457945038564503e-02,
),
Coords(
8.39477740995758781039626228448469e-03,
2.63112829634638112352718053443823e-01,
7.28492392955404355348036915529519e-01,
),
Coords(
8.39477740995758781039626228448469e-03,
7.28492392955404355348036915529519e-01,
2.63112829634638112352718053443823e-01,
),
Coords(
2.63112829634638112352718053443823e-01,
8.39477740995758781039626228448469e-03,
7.28492392955404355348036915529519e-01,
),
Coords(
2.63112829634638112352718053443823e-01,
7.28492392955404355348036915529519e-01,
8.39477740995758781039626228448469e-03,
),
Coords(
7.28492392955404355348036915529519e-01,
8.39477740995758781039626228448469e-03,
2.63112829634638112352718053443823e-01,
),
Coords(
7.28492392955404355348036915529519e-01,
2.63112829634638112352718053443823e-01,
8.39477740995758781039626228448469e-03,
),
]
return coords, weights
class Tetrahedron(Element):
@staticmethod
def measure() -> float:
return 1.0 / 6.0
@staticmethod
def instantiate_quadrature(order: int, family: Polynomial):
if family is not None:
# Duffy transformation from square to triangle
point_count = _point_count_from_order(order=order + 1, family=family)
gauss_1d, weights_1d = quadrature_1d(point_count=point_count, family=family)
coords = [
Coords(x, y * (1.0 - x), z * (1.0 - x) * (1.0 - y))
for x in gauss_1d
for y in gauss_1d
for z in gauss_1d
]
# Scale weight by 6.0 so that they sum up to 1
weights = [
6.0 * wx * wy * wz * (1.0 - x) * (1.0 - x) * (1.0 - y)
for x, wx in zip(gauss_1d, weights_1d)
for y, wy in zip(gauss_1d, weights_1d)
for wz in weights_1d
]
return coords, weights
# Shunn and Ham 2012
# "Symmetric quadrature rules for tetrahedra based on a cubic close-packed lattice arrangement"
# https://doi.org/10.1016/j.cam.2012.03.032
# TODO: add Witherden and Vincent 2015,
if order <= 1:
weights = [1.0]
coords = [Coords(1.0 / 4.0, 1.0 / 4.0, 1.0 / 4.0)]
elif order <= 2:
weights = [1.0 / 4.0, 1.0 / 4.0, 1.0 / 4.0, 1.0 / 4.0]
coords = [
Coords(0.1381966011250110, 0.1381966011250110, 0.1381966011250110),
Coords(0.5854101966249680, 0.1381966011250110, 0.1381966011250110),
Coords(0.1381966011250110, 0.5854101966249680, 0.1381966011250110),
Coords(0.1381966011250110, 0.1381966011250110, 0.5854101966249680),
]
elif order <= 3:
weights = [
0.0476331348432089,
0.0476331348432089,
0.0476331348432089,
0.0476331348432089,
0.1349112434378610,
0.1349112434378610,
0.1349112434378610,
0.1349112434378610,
0.1349112434378610,
0.1349112434378610,
]
coords = [
Coords(0.0738349017262234, 0.0738349017262234, 0.0738349017262234),
Coords(0.7784952948213300, 0.0738349017262234, 0.0738349017262234),
Coords(0.0738349017262234, 0.7784952948213300, 0.0738349017262234),
Coords(0.0738349017262234, 0.0738349017262234, 0.7784952948213300),
Coords(0.4062443438840510, 0.0937556561159491, 0.0937556561159491),
Coords(0.0937556561159491, 0.4062443438840510, 0.0937556561159491),
Coords(0.0937556561159491, 0.0937556561159491, 0.4062443438840510),
Coords(0.4062443438840510, 0.4062443438840510, 0.0937556561159491),
Coords(0.4062443438840510, 0.0937556561159491, 0.4062443438840510),
Coords(0.0937556561159491, 0.4062443438840510, 0.4062443438840510),
]
elif order <= 4:
weights = [
0.0070670747944695,
0.0070670747944695,
0.0070670747944695,
0.0070670747944695,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.0469986689718877,
0.1019369182898680,
0.1019369182898680,
0.1019369182898680,
0.1019369182898680,
]
coords = [
Coords(0.0323525947272439, 0.0323525947272439, 0.0323525947272439),
Coords(0.9029422158182680, 0.0323525947272439, 0.0323525947272439),
Coords(0.0323525947272439, 0.9029422158182680, 0.0323525947272439),
Coords(0.0323525947272439, 0.0323525947272439, 0.9029422158182680),
Coords(0.6165965330619370, 0.0603604415251421, 0.0603604415251421),
Coords(0.2626825838877790, 0.0603604415251421, 0.0603604415251421),
Coords(0.0603604415251421, 0.6165965330619370, 0.0603604415251421),
Coords(0.0603604415251421, 0.2626825838877790, 0.0603604415251421),
Coords(0.0603604415251421, 0.0603604415251421, 0.6165965330619370),
Coords(0.0603604415251421, 0.0603604415251421, 0.2626825838877790),
Coords(0.2626825838877790, 0.6165965330619370, 0.0603604415251421),
Coords(0.6165965330619370, 0.2626825838877790, 0.0603604415251421),
Coords(0.2626825838877790, 0.0603604415251421, 0.6165965330619370),
Coords(0.6165965330619370, 0.0603604415251421, 0.2626825838877790),
Coords(0.0603604415251421, 0.2626825838877790, 0.6165965330619370),
Coords(0.0603604415251421, 0.6165965330619370, 0.2626825838877790),
Coords(0.3097693042728620, 0.3097693042728620, 0.0706920871814129),
Coords(0.3097693042728620, 0.0706920871814129, 0.3097693042728620),
Coords(0.0706920871814129, 0.3097693042728620, 0.3097693042728620),
Coords(0.3097693042728620, 0.3097693042728620, 0.3097693042728620),
]
elif order <= 5:
weights = [
0.0021900463965388,
0.0021900463965388,
0.0021900463965388,
0.0021900463965388,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0143395670177665,
0.0250305395686746,
0.0250305395686746,
0.0250305395686746,
0.0250305395686746,
0.0250305395686746,
0.0250305395686746,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0479839333057554,
0.0931745731195340,
]
coords = [
Coords(0.0267367755543735, 0.0267367755543735, 0.0267367755543735),
Coords(0.9197896733368800, 0.0267367755543735, 0.0267367755543735),
Coords(0.0267367755543735, 0.9197896733368800, 0.0267367755543735),
Coords(0.0267367755543735, 0.0267367755543735, 0.9197896733368800),
Coords(0.7477598884818090, 0.0391022406356488, 0.0391022406356488),
Coords(0.1740356302468940, 0.0391022406356488, 0.0391022406356488),
Coords(0.0391022406356488, 0.7477598884818090, 0.0391022406356488),
Coords(0.0391022406356488, 0.1740356302468940, 0.0391022406356488),
Coords(0.0391022406356488, 0.0391022406356488, 0.7477598884818090),
Coords(0.0391022406356488, 0.0391022406356488, 0.1740356302468940),
Coords(0.1740356302468940, 0.7477598884818090, 0.0391022406356488),
Coords(0.7477598884818090, 0.1740356302468940, 0.0391022406356488),
Coords(0.1740356302468940, 0.0391022406356488, 0.7477598884818090),
Coords(0.7477598884818090, 0.0391022406356488, 0.1740356302468940),
Coords(0.0391022406356488, 0.1740356302468940, 0.7477598884818090),
Coords(0.0391022406356488, 0.7477598884818090, 0.1740356302468940),
Coords(0.4547545999844830, 0.0452454000155172, 0.0452454000155172),
Coords(0.0452454000155172, 0.4547545999844830, 0.0452454000155172),
Coords(0.0452454000155172, 0.0452454000155172, 0.4547545999844830),
Coords(0.4547545999844830, 0.4547545999844830, 0.0452454000155172),
Coords(0.4547545999844830, 0.0452454000155172, 0.4547545999844830),
Coords(0.0452454000155172, 0.4547545999844830, 0.4547545999844830),
Coords(0.2232010379623150, 0.2232010379623150, 0.0504792790607720),
Coords(0.5031186450145980, 0.2232010379623150, 0.0504792790607720),
Coords(0.2232010379623150, 0.5031186450145980, 0.0504792790607720),
Coords(0.2232010379623150, 0.0504792790607720, 0.2232010379623150),
Coords(0.5031186450145980, 0.0504792790607720, 0.2232010379623150),
Coords(0.2232010379623150, 0.0504792790607720, 0.5031186450145980),
Coords(0.0504792790607720, 0.2232010379623150, 0.2232010379623150),
Coords(0.0504792790607720, 0.5031186450145980, 0.2232010379623150),
Coords(0.0504792790607720, 0.2232010379623150, 0.5031186450145980),
Coords(0.5031186450145980, 0.2232010379623150, 0.2232010379623150),
Coords(0.2232010379623150, 0.5031186450145980, 0.2232010379623150),
Coords(0.2232010379623150, 0.2232010379623150, 0.5031186450145980),
Coords(0.2500000000000000, 0.2500000000000000, 0.2500000000000000),
]
elif order <= 6:
weights = [
0.0010373112336140,
0.0010373112336140,
0.0010373112336140,
0.0010373112336140,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0096016645399480,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0164493976798232,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0153747766513310,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0293520118375230,
0.0366291366405108,
0.0366291366405108,
0.0366291366405108,
0.0366291366405108,
]
coords = [
Coords(0.0149520651530592, 0.0149520651530592, 0.0149520651530592),
Coords(0.9551438045408220, 0.0149520651530592, 0.0149520651530592),
Coords(0.0149520651530592, 0.9551438045408220, 0.0149520651530592),
Coords(0.0149520651530592, 0.0149520651530592, 0.9551438045408220),
Coords(0.1518319491659370, 0.0340960211962615, 0.0340960211962615),
Coords(0.7799760084415400, 0.0340960211962615, 0.0340960211962615),
Coords(0.0340960211962615, 0.1518319491659370, 0.0340960211962615),
Coords(0.0340960211962615, 0.7799760084415400, 0.0340960211962615),
Coords(0.0340960211962615, 0.0340960211962615, 0.1518319491659370),
Coords(0.0340960211962615, 0.0340960211962615, 0.7799760084415400),
Coords(0.7799760084415400, 0.1518319491659370, 0.0340960211962615),
Coords(0.1518319491659370, 0.7799760084415400, 0.0340960211962615),
Coords(0.7799760084415400, 0.0340960211962615, 0.1518319491659370),
Coords(0.1518319491659370, 0.0340960211962615, 0.7799760084415400),
Coords(0.0340960211962615, 0.7799760084415400, 0.1518319491659370),
Coords(0.0340960211962615, 0.1518319491659370, 0.7799760084415400),
Coords(0.5526556431060170, 0.0462051504150017, 0.0462051504150017),
Coords(0.3549340560639790, 0.0462051504150017, 0.0462051504150017),
Coords(0.0462051504150017, 0.5526556431060170, 0.0462051504150017),
Coords(0.0462051504150017, 0.3549340560639790, 0.0462051504150017),
Coords(0.0462051504150017, 0.0462051504150017, 0.5526556431060170),
Coords(0.0462051504150017, 0.0462051504150017, 0.3549340560639790),
Coords(0.3549340560639790, 0.5526556431060170, 0.0462051504150017),
Coords(0.5526556431060170, 0.3549340560639790, 0.0462051504150017),
Coords(0.3549340560639790, 0.0462051504150017, 0.5526556431060170),
Coords(0.5526556431060170, 0.0462051504150017, 0.3549340560639790),
Coords(0.0462051504150017, 0.3549340560639790, 0.5526556431060170),
Coords(0.0462051504150017, 0.5526556431060170, 0.3549340560639790),
Coords(0.2281904610687610, 0.2281904610687610, 0.0055147549744775),
Coords(0.5381043228880020, 0.2281904610687610, 0.0055147549744775),
Coords(0.2281904610687610, 0.5381043228880020, 0.0055147549744775),
Coords(0.2281904610687610, 0.0055147549744775, 0.2281904610687610),
Coords(0.5381043228880020, 0.0055147549744775, 0.2281904610687610),
Coords(0.2281904610687610, 0.0055147549744775, 0.5381043228880020),
Coords(0.0055147549744775, 0.2281904610687610, 0.2281904610687610),
Coords(0.0055147549744775, 0.5381043228880020, 0.2281904610687610),
Coords(0.0055147549744775, 0.2281904610687610, 0.5381043228880020),
Coords(0.5381043228880020, 0.2281904610687610, 0.2281904610687610),
Coords(0.2281904610687610, 0.5381043228880020, 0.2281904610687610),
Coords(0.2281904610687610, 0.2281904610687610, 0.5381043228880020),
Coords(0.3523052600879940, 0.3523052600879940, 0.0992057202494530),
Coords(0.1961837595745600, 0.3523052600879940, 0.0992057202494530),
Coords(0.3523052600879940, 0.1961837595745600, 0.0992057202494530),
Coords(0.3523052600879940, 0.0992057202494530, 0.3523052600879940),
Coords(0.1961837595745600, 0.0992057202494530, 0.3523052600879940),
Coords(0.3523052600879940, 0.0992057202494530, 0.1961837595745600),
Coords(0.0992057202494530, 0.3523052600879940, 0.3523052600879940),
Coords(0.0992057202494530, 0.1961837595745600, 0.3523052600879940),
Coords(0.0992057202494530, 0.3523052600879940, 0.1961837595745600),
Coords(0.1961837595745600, 0.3523052600879940, 0.3523052600879940),
Coords(0.3523052600879940, 0.1961837595745600, 0.3523052600879940),
Coords(0.3523052600879940, 0.3523052600879940, 0.1961837595745600),
Coords(0.1344783347929940, 0.1344783347929940, 0.1344783347929940),
Coords(0.5965649956210170, 0.1344783347929940, 0.1344783347929940),
Coords(0.1344783347929940, 0.5965649956210170, 0.1344783347929940),
Coords(0.1344783347929940, 0.1344783347929940, 0.5965649956210170),
]
else:
raise NotImplementedError
return coords, weights
| 34,180 | Python | 44.635514 | 109 | 0.570246 |
NVIDIA/warp/warp/fem/quadrature/quadrature.py | from typing import Any
import warp as wp
from warp.fem import cache, domain
from warp.fem.space import FunctionSpace
from warp.fem.types import Coords, ElementIndex
from ..polynomial import Polynomial
class Quadrature:
"""Interface class for quadrature rules"""
@wp.struct
class Arg:
"""Structure containing arguments to be passed to device functions"""
pass
def __init__(self, domain: domain.GeometryDomain):
self._domain = domain
@property
def domain(self):
"""Domain over which this quadrature is defined"""
return self._domain
def arg_value(self, device) -> "Arg":
"""
Value of the argument to be passed to device
"""
arg = RegularQuadrature.Arg()
return arg
def total_point_count(self):
"""Total number of quadrature points over the domain"""
raise NotImplementedError()
def points_per_element(self):
"""Number of points per element if constant, or ``None`` if varying"""
return None
@staticmethod
def point_count(elt_arg: "domain.GeometryDomain.ElementArg", qp_arg: Arg, element_index: ElementIndex):
"""Number of quadrature points for a given element"""
raise NotImplementedError()
@staticmethod
def point_coords(
elt_arg: "domain.GeometryDomain.ElementArg", qp_arg: Arg, element_index: ElementIndex, qp_index: int
):
"""Coordinates in element of the element's qp_index'th quadrature point"""
raise NotImplementedError()
@staticmethod
def point_weight(
elt_arg: "domain.GeometryDomain.ElementArg", qp_arg: Arg, element_index: ElementIndex, qp_index: int
):
"""Weight of the element's qp_index'th quadrature point"""
raise NotImplementedError()
@staticmethod
def point_index(
elt_arg: "domain.GeometryDomain.ElementArg", qp_arg: Arg, element_index: ElementIndex, qp_index: int
):
"""Global index of the element's qp_index'th quadrature point"""
raise NotImplementedError()
def __str__(self) -> str:
return self.name
class RegularQuadrature(Quadrature):
"""Regular quadrature formula, using a constant set of quadrature points per element"""
def __init__(
self,
domain: domain.GeometryDomain,
order: int,
family: Polynomial = None,
):
super().__init__(domain)
self.family = family
self.order = order
self._element_quadrature = domain.reference_element().instantiate_quadrature(order, family)
self._N = wp.constant(len(self.points))
WeightVec = wp.vec(length=self._N, dtype=wp.float32)
CoordMat = wp.mat(shape=(self._N, 3), dtype=wp.float32)
self._POINTS = wp.constant(CoordMat(self.points))
self._WEIGHTS = wp.constant(WeightVec(self.weights))
self.point_count = self._make_point_count()
self.point_index = self._make_point_index()
self.point_coords = self._make_point_coords()
self.point_weight = self._make_point_weight()
@property
def name(self):
return f"{self.__class__.__name__}_{self.domain.name}_{self.family}_{self.order}"
def total_point_count(self):
return len(self.points) * self.domain.geometry_element_count()
def points_per_element(self):
return self._N
@property
def points(self):
return self._element_quadrature[0]
@property
def weights(self):
return self._element_quadrature[1]
def _make_point_count(self):
N = self._N
@cache.dynamic_func(suffix=self.name)
def point_count(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex):
return N
return point_count
def _make_point_coords(self):
POINTS = self._POINTS
@cache.dynamic_func(suffix=self.name)
def point_coords(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex, qp_index: int):
return Coords(POINTS[qp_index, 0], POINTS[qp_index, 1], POINTS[qp_index, 2])
return point_coords
def _make_point_weight(self):
WEIGHTS = self._WEIGHTS
@cache.dynamic_func(suffix=self.name)
def point_weight(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex, qp_index: int):
return WEIGHTS[qp_index]
return point_weight
def _make_point_index(self):
N = self._N
@cache.dynamic_func(suffix=self.name)
def point_index(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex, qp_index: int):
return N * element_index + qp_index
return point_index
class NodalQuadrature(Quadrature):
"""Quadrature using space node points as quadrature points
Note that in contrast to the `nodal=True` flag for :func:`integrate`, this quadrature odes not make any assumption
about orthogonality of shape functions, and is thus safe to use for arbitrary integrands.
"""
def __init__(self, domain: domain.GeometryDomain, space: FunctionSpace):
super().__init__(domain)
self._space = space
self.Arg = self._make_arg()
self.point_count = self._make_point_count()
self.point_index = self._make_point_index()
self.point_coords = self._make_point_coords()
self.point_weight = self._make_point_weight()
@property
def name(self):
return f"{self.__class__.__name__}_{self._space.name}"
def total_point_count(self):
return self._space.node_count()
def points_per_element(self):
return self._space.topology.NODES_PER_ELEMENT
def _make_arg(self):
@cache.dynamic_struct(suffix=self.name)
class Arg:
space_arg: self._space.SpaceArg
topo_arg: self._space.topology.TopologyArg
return Arg
@cache.cached_arg_value
def arg_value(self, device):
arg = self.Arg()
arg.space_arg = self._space.space_arg_value(device)
arg.topo_arg = self._space.topology.topo_arg_value(device)
return arg
def _make_point_count(self):
N = self._space.topology.NODES_PER_ELEMENT
@cache.dynamic_func(suffix=self.name)
def point_count(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex):
return N
return point_count
def _make_point_coords(self):
@cache.dynamic_func(suffix=self.name)
def point_coords(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex, qp_index: int):
return self._space.node_coords_in_element(elt_arg, qp_arg.space_arg, element_index, qp_index)
return point_coords
def _make_point_weight(self):
@cache.dynamic_func(suffix=self.name)
def point_weight(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex, qp_index: int):
return self._space.node_quadrature_weight(elt_arg, qp_arg.space_arg, element_index, qp_index)
return point_weight
def _make_point_index(self):
@cache.dynamic_func(suffix=self.name)
def point_index(elt_arg: self.domain.ElementArg, qp_arg: self.Arg, element_index: ElementIndex, qp_index: int):
return self._space.topology.element_node_index(elt_arg, qp_arg.topo_arg, element_index, qp_index)
return point_index
class ExplicitQuadrature(Quadrature):
"""Quadrature using explicit per-cell points and weights. The number of quadrature points per cell is assumed
to be constant and deduced from the shape of the points and weights arrays.
Args:
domain: Domain of definition of the quadrature formula
points: 2d array of shape ``(domain.geometry_element-count(), points_per_cell)`` containing the coordinates of each quadrature point.
weights: 2d array of shape ``(domain.geometry_element-count(), points_per_cell)`` containing the weight for each quadrature point.
See also: :class:`PicQuadrature`
"""
@wp.struct
class Arg:
points_per_cell: int
points: wp.array2d(dtype=Coords)
weights: wp.array2d(dtype=float)
def __init__(
self, domain: domain.GeometryDomain, points: "wp.array2d(dtype=Coords)", weights: "wp.array2d(dtype=float)"
):
super().__init__(domain)
if points.shape != weights.shape:
raise ValueError("Points and weights arrays must have the same shape")
self._points_per_cell = points.shape[1]
self._points = points
self._weights = weights
@property
def name(self):
return f"{self.__class__.__name__}"
def total_point_count(self):
return self._weights.size
def points_per_element(self):
return self._points_per_cell
@cache.cached_arg_value
def arg_value(self, device):
arg = self.Arg()
arg.points_per_cell = self._points_per_cell
arg.points = self._points.to(device)
arg.weights = self._weights.to(device)
return arg
@wp.func
def point_count(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex):
return qp_arg.points_per_cell
@wp.func
def point_coords(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex, qp_index: int):
return qp_arg.points[element_index, qp_index]
@wp.func
def point_weight(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex, qp_index: int):
return qp_arg.weights[element_index, qp_index]
@wp.func
def point_index(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex, qp_index: int):
return qp_arg.points_per_cell * element_index + qp_index
| 9,746 | Python | 31.929054 | 141 | 0.644162 |
NVIDIA/warp/warp/fem/quadrature/__init__.py | from .pic_quadrature import PicQuadrature
from .quadrature import ExplicitQuadrature, NodalQuadrature, Quadrature, RegularQuadrature
| 133 | Python | 43.666652 | 90 | 0.87218 |
NVIDIA/warp/warp/fem/quadrature/pic_quadrature.py | from typing import Any, Optional, Tuple, Union
import warp as wp
from warp.fem.cache import TemporaryStore, borrow_temporary, cached_arg_value, dynamic_kernel
from warp.fem.domain import GeometryDomain
from warp.fem.types import Coords, ElementIndex, make_free_sample
from warp.fem.utils import compress_node_indices
from .quadrature import Quadrature
wp.set_module_options({"enable_backward": False})
class PicQuadrature(Quadrature):
"""Particle-based quadrature formula, using a global set of points unevenly spread out over geometry elements.
Useful for Particle-In-Cell and derived methods.
Args:
domain: Underlying domain for the quadrature
positions: Either an array containing the world positions of all particles, or a tuple of arrays containing
the cell indices and coordinates for each particle. Note that the former requires the underlying geometry to
define a global :meth:`Geometry.cell_lookup` method; currently this is only available for :class:`Grid2D` and :class:`Grid3D`.
measures: Array containing the measure (area/volume) of each particle, used to defined the integration weights.
If ``None``, defaults to the cell measure divided by the number of particles in the cell.
temporary_store: shared pool from which to allocate temporary arrays
"""
def __init__(
self,
domain: GeometryDomain,
positions: Union[
"wp.array(dtype=wp.vecXd)",
Tuple[
"wp.array(dtype=ElementIndex)",
"wp.array(dtype=Coords)",
],
],
measures: Optional["wp.array(dtype=float)"] = None,
temporary_store: TemporaryStore = None,
):
super().__init__(domain)
self._bin_particles(positions, measures, temporary_store)
@property
def name(self):
return f"{self.__class__.__name__}"
@Quadrature.domain.setter
def domain(self, domain: GeometryDomain):
# Allow changing the quadrature domain as long as underlying geometry and element kind are the same
if self.domain is not None and (
domain.geometry != self.domain.geometry or domain.element_kind != self.domain.element_kind
):
raise RuntimeError(
"Cannot change the domain to that of a different Geometry and/or using different element kinds."
)
self._domain = domain
@wp.struct
class Arg:
cell_particle_offsets: wp.array(dtype=int)
cell_particle_indices: wp.array(dtype=int)
particle_fraction: wp.array(dtype=float)
particle_coords: wp.array(dtype=Coords)
@cached_arg_value
def arg_value(self, device) -> Arg:
arg = PicQuadrature.Arg()
arg.cell_particle_offsets = self._cell_particle_offsets.array.to(device)
arg.cell_particle_indices = self._cell_particle_indices.array.to(device)
arg.particle_fraction = self._particle_fraction.to(device)
arg.particle_coords = self._particle_coords.to(device)
return arg
def total_point_count(self):
return self._particle_coords.shape[0]
def active_cell_count(self):
"""Number of cells containing at least one particle"""
return self._cell_count
@wp.func
def point_count(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex):
return qp_arg.cell_particle_offsets[element_index + 1] - qp_arg.cell_particle_offsets[element_index]
@wp.func
def point_coords(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex, index: int):
particle_index = qp_arg.cell_particle_indices[qp_arg.cell_particle_offsets[element_index] + index]
return qp_arg.particle_coords[particle_index]
@wp.func
def point_weight(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex, index: int):
particle_index = qp_arg.cell_particle_indices[qp_arg.cell_particle_offsets[element_index] + index]
return qp_arg.particle_fraction[particle_index]
@wp.func
def point_index(elt_arg: Any, qp_arg: Arg, element_index: ElementIndex, index: int):
particle_index = qp_arg.cell_particle_indices[qp_arg.cell_particle_offsets[element_index] + index]
return particle_index
def fill_element_mask(self, mask: "wp.array(dtype=int)"):
"""Fills a mask array such that all non-empty elements are set to 1, all empty elements to zero.
Args:
mask: Int warp array with size at least equal to `self.domain.geometry_element_count()`
"""
wp.launch(
kernel=PicQuadrature._fill_mask_kernel,
dim=self.domain.geometry_element_count(),
device=mask.device,
inputs=[self._cell_particle_offsets.array, mask],
)
@wp.kernel
def _fill_mask_kernel(
element_particle_offsets: wp.array(dtype=int),
element_mask: wp.array(dtype=int),
):
i = wp.tid()
element_mask[i] = wp.select(element_particle_offsets[i] == element_particle_offsets[i + 1], 1, 0)
@wp.kernel
def _compute_uniform_fraction(
cell_index: wp.array(dtype=ElementIndex),
cell_particle_offsets: wp.array(dtype=int),
cell_fraction: wp.array(dtype=float),
):
p = wp.tid()
cell = cell_index[p]
cell_particle_count = cell_particle_offsets[cell + 1] - cell_particle_offsets[cell]
cell_fraction[p] = 1.0 / float(cell_particle_count)
def _bin_particles(self, positions, measures, temporary_store: TemporaryStore):
if wp.types.is_array(positions):
# Initialize from positions
@dynamic_kernel(suffix=f"{self.domain.name}")
def bin_particles(
cell_arg_value: self.domain.ElementArg,
positions: wp.array(dtype=positions.dtype),
cell_index: wp.array(dtype=ElementIndex),
cell_coords: wp.array(dtype=Coords),
):
p = wp.tid()
sample = self.domain.element_lookup(cell_arg_value, positions[p])
cell_index[p] = sample.element_index
cell_coords[p] = sample.element_coords
device = positions.device
cell_index_temp = borrow_temporary(temporary_store, shape=positions.shape, dtype=int, device=device)
cell_index = cell_index_temp.array
self._particle_coords_temp = borrow_temporary(
temporary_store, shape=positions.shape, dtype=Coords, device=device
)
self._particle_coords = self._particle_coords_temp.array
wp.launch(
dim=positions.shape[0],
kernel=bin_particles,
inputs=[
self.domain.element_arg_value(device),
positions,
cell_index,
self._particle_coords,
],
device=device,
)
else:
cell_index, self._particle_coords = positions
if cell_index.shape != self._particle_coords.shape:
raise ValueError("Cell index and coordinates arrays must have the same shape")
cell_index_temp = None
self._particle_coords_temp = None
self._cell_particle_offsets, self._cell_particle_indices, self._cell_count, _ = compress_node_indices(
self.domain.geometry_element_count(), cell_index
)
self._compute_fraction(cell_index, measures, temporary_store)
def _compute_fraction(self, cell_index, measures, temporary_store: TemporaryStore):
device = cell_index.device
self._particle_fraction_temp = borrow_temporary(
temporary_store, shape=cell_index.shape, dtype=float, device=device
)
self._particle_fraction = self._particle_fraction_temp.array
if measures is None:
# Split fraction uniformly over all particles in cell
wp.launch(
dim=cell_index.shape,
kernel=PicQuadrature._compute_uniform_fraction,
inputs=[
cell_index,
self._cell_particle_offsets.array,
self._particle_fraction,
],
device=device,
)
else:
# Fraction from particle measure
if measures.shape != cell_index.shape:
raise ValueError("Measures should be an 1d array or length equal to particle count")
@dynamic_kernel(suffix=f"{self.domain.name}")
def compute_fraction(
cell_arg_value: self.domain.ElementArg,
measures: wp.array(dtype=float),
cell_index: wp.array(dtype=ElementIndex),
cell_coords: wp.array(dtype=Coords),
cell_fraction: wp.array(dtype=float),
):
p = wp.tid()
sample = make_free_sample(cell_index[p], cell_coords[p])
cell_fraction[p] = measures[p] / self.domain.element_measure(cell_arg_value, sample)
wp.launch(
dim=measures.shape[0],
kernel=compute_fraction,
inputs=[
self.domain.element_arg_value(device),
measures,
cell_index,
self._particle_coords,
self._particle_fraction,
],
device=device,
)
| 9,515 | Python | 38 | 135 | 0.607252 |
NVIDIA/warp/warp/tests/test_rounding.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
compare_to_numpy = False
print_results = False
@wp.kernel
def test_kernel(
x: wp.array(dtype=float),
x_round: wp.array(dtype=float),
x_rint: wp.array(dtype=float),
x_trunc: wp.array(dtype=float),
x_cast: wp.array(dtype=float),
x_floor: wp.array(dtype=float),
x_ceil: wp.array(dtype=float),
x_frac: wp.array(dtype=float),
):
tid = wp.tid()
x_round[tid] = wp.round(x[tid])
x_rint[tid] = wp.rint(x[tid])
x_trunc[tid] = wp.trunc(x[tid])
x_cast[tid] = float(int(x[tid]))
x_floor[tid] = wp.floor(x[tid])
x_ceil[tid] = wp.ceil(x[tid])
x_frac[tid] = wp.frac(x[tid])
def test_rounding(test, device):
nx = np.array(
[
4.9,
4.5,
4.1,
3.9,
3.5,
3.1,
2.9,
2.5,
2.1,
1.9,
1.5,
1.1,
0.9,
0.5,
0.1,
-0.1,
-0.5,
-0.9,
-1.1,
-1.5,
-1.9,
-2.1,
-2.5,
-2.9,
-3.1,
-3.5,
-3.9,
-4.1,
-4.5,
-4.9,
],
dtype=np.float32,
)
x = wp.array(nx, device=device)
N = len(x)
x_round = wp.empty(N, dtype=float, device=device)
x_rint = wp.empty(N, dtype=float, device=device)
x_trunc = wp.empty(N, dtype=float, device=device)
x_cast = wp.empty(N, dtype=float, device=device)
x_floor = wp.empty(N, dtype=float, device=device)
x_ceil = wp.empty(N, dtype=float, device=device)
x_frac = wp.empty(N, dtype=float, device=device)
wp.launch(
kernel=test_kernel, dim=N, inputs=[x, x_round, x_rint, x_trunc, x_cast, x_floor, x_ceil, x_frac], device=device
)
wp.synchronize()
nx_round = x_round.numpy().reshape(N)
nx_rint = x_rint.numpy().reshape(N)
nx_trunc = x_trunc.numpy().reshape(N)
nx_cast = x_cast.numpy().reshape(N)
nx_floor = x_floor.numpy().reshape(N)
nx_ceil = x_ceil.numpy().reshape(N)
nx_frac = x_frac.numpy().reshape(N)
tab = np.stack([nx, nx_round, nx_rint, nx_trunc, nx_cast, nx_floor, nx_ceil, nx_frac], axis=1)
golden = np.array(
[
[4.9, 5.0, 5.0, 4.0, 4.0, 4.0, 5.0, 0.9],
[4.5, 5.0, 4.0, 4.0, 4.0, 4.0, 5.0, 0.5],
[4.1, 4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 0.1],
[3.9, 4.0, 4.0, 3.0, 3.0, 3.0, 4.0, 0.9],
[3.5, 4.0, 4.0, 3.0, 3.0, 3.0, 4.0, 0.5],
[3.1, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 0.1],
[2.9, 3.0, 3.0, 2.0, 2.0, 2.0, 3.0, 0.9],
[2.5, 3.0, 2.0, 2.0, 2.0, 2.0, 3.0, 0.5],
[2.1, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 0.1],
[1.9, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 0.9],
[1.5, 2.0, 2.0, 1.0, 1.0, 1.0, 2.0, 0.5],
[1.1, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 0.1],
[0.9, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.9],
[0.5, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.5],
[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.1],
[-0.1, -0.0, -0.0, -0.0, 0.0, -1.0, -0.0, -0.1],
[-0.5, -1.0, -0.0, -0.0, 0.0, -1.0, -0.0, -0.5],
[-0.9, -1.0, -1.0, -0.0, 0.0, -1.0, -0.0, -0.9],
[-1.1, -1.0, -1.0, -1.0, -1.0, -2.0, -1.0, -0.1],
[-1.5, -2.0, -2.0, -1.0, -1.0, -2.0, -1.0, -0.5],
[-1.9, -2.0, -2.0, -1.0, -1.0, -2.0, -1.0, -0.9],
[-2.1, -2.0, -2.0, -2.0, -2.0, -3.0, -2.0, -0.1],
[-2.5, -3.0, -2.0, -2.0, -2.0, -3.0, -2.0, -0.5],
[-2.9, -3.0, -3.0, -2.0, -2.0, -3.0, -2.0, -0.9],
[-3.1, -3.0, -3.0, -3.0, -3.0, -4.0, -3.0, -0.1],
[-3.5, -4.0, -4.0, -3.0, -3.0, -4.0, -3.0, -0.5],
[-3.9, -4.0, -4.0, -3.0, -3.0, -4.0, -3.0, -0.9],
[-4.1, -4.0, -4.0, -4.0, -4.0, -5.0, -4.0, -0.1],
[-4.5, -5.0, -4.0, -4.0, -4.0, -5.0, -4.0, -0.5],
[-4.9, -5.0, -5.0, -4.0, -4.0, -5.0, -4.0, -0.9],
],
dtype=np.float32,
)
assert_np_equal(tab, golden, tol=1e-6)
if print_results:
np.set_printoptions(formatter={"float": lambda x: "{:6.1f}".format(x).replace(".0", ".")})
print("----------------------------------------------")
print(" %5s %5s %5s %5s %5s %5s %5s" % ("x ", "round", "rint", "trunc", "cast", "floor", "ceil"))
print(tab)
print("----------------------------------------------")
if compare_to_numpy:
nx_round = np.round(nx)
nx_rint = np.rint(nx)
nx_trunc = np.trunc(nx)
nx_fix = np.fix(nx)
nx_floor = np.floor(nx)
nx_ceil = np.ceil(nx)
nx_frac = np.modf(nx)[0]
tab = np.stack([nx, nx_round, nx_rint, nx_trunc, nx_fix, nx_floor, nx_ceil, nx_frac], axis=1)
print(" %5s %5s %5s %5s %5s %5s %5s" % ("x ", "round", "rint", "trunc", "fix", "floor", "ceil"))
print(tab)
print("----------------------------------------------")
class TestRounding(unittest.TestCase):
pass
devices = get_test_devices()
add_function_test(TestRounding, "test_rounding", test_rounding, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,784 | Python | 31.5 | 119 | 0.449343 |
NVIDIA/warp/warp/tests/test_launch.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
dim_x = wp.constant(2)
dim_y = wp.constant(2)
dim_z = wp.constant(2)
dim_w = wp.constant(2)
@wp.kernel
def kernel1d(a: wp.array(dtype=int, ndim=1)):
i = wp.tid()
wp.expect_eq(a[i], i)
@wp.kernel
def kernel2d(a: wp.array(dtype=int, ndim=2)):
i, j = wp.tid()
wp.expect_eq(a[i, j], i * dim_y + j)
@wp.kernel
def kernel3d(a: wp.array(dtype=int, ndim=3)):
i, j, k = wp.tid()
wp.expect_eq(a[i, j, k], i * dim_y * dim_z + j * dim_z + k)
@wp.kernel
def kernel4d(a: wp.array(dtype=int, ndim=4)):
i, j, k, l = wp.tid()
wp.expect_eq(a[i, j, k, l], i * dim_y * dim_z * dim_w + j * dim_z * dim_w + k * dim_w + l)
def test1d(test, device):
a = np.arange(0, dim_x).reshape(dim_x)
wp.launch(kernel1d, dim=a.shape, inputs=[wp.array(a, dtype=int, device=device)], device=device)
def test2d(test, device):
a = np.arange(0, dim_x * dim_y).reshape(dim_x, dim_y)
wp.launch(kernel2d, dim=a.shape, inputs=[wp.array(a, dtype=int, device=device)], device=device)
def test3d(test, device):
a = np.arange(0, dim_x * dim_y * dim_z).reshape(dim_x, dim_y, dim_z)
wp.launch(kernel3d, dim=a.shape, inputs=[wp.array(a, dtype=int, device=device)], device=device)
def test4d(test, device):
a = np.arange(0, dim_x * dim_y * dim_z * dim_w).reshape(dim_x, dim_y, dim_z, dim_w)
wp.launch(kernel4d, dim=a.shape, inputs=[wp.array(a, dtype=int, device=device)], device=device)
@wp.struct
class Params:
a: wp.array(dtype=int)
i: int
f: float
@wp.kernel
def kernel_cmd(params: Params, i: int, f: float, v: wp.vec3, m: wp.mat33, out: wp.array(dtype=int)):
tid = wp.tid()
wp.expect_eq(params.i, i)
wp.expect_eq(params.f, f)
wp.expect_eq(i, int(f))
wp.expect_eq(v[0], f)
wp.expect_eq(v[1], f)
wp.expect_eq(v[2], f)
wp.expect_eq(m[0, 0], f)
wp.expect_eq(m[1, 1], f)
wp.expect_eq(m[2, 2], f)
out[tid] = tid + i
def test_launch_cmd(test, device):
n = 1
ref = np.arange(0, n)
out = wp.zeros(n, dtype=int, device=device)
params = Params()
params.i = 1
params.f = 1.0
v = wp.vec3(params.f, params.f, params.f)
m = wp.mat33(params.f, 0.0, 0.0, 0.0, params.f, 0.0, 0.0, 0.0, params.f)
# standard launch
wp.launch(kernel_cmd, dim=n, inputs=[params, params.i, params.f, v, m, out], device=device)
assert_np_equal(out.numpy(), ref + params.i)
# cmd launch
out.zero_()
cmd = wp.launch(kernel_cmd, dim=n, inputs=[params, params.i, params.f, v, m, out], device=device, record_cmd=True)
cmd.launch()
assert_np_equal(out.numpy(), ref + params.i)
def test_launch_cmd_set_param(test, device):
n = 1
ref = np.arange(0, n)
params = Params()
v = wp.vec3()
m = wp.mat33()
cmd = wp.launch(kernel_cmd, dim=n, inputs=[params, 0, 0.0, v, m, None], device=device, record_cmd=True)
# cmd param modification
out = wp.zeros(n, dtype=int, device=device)
params.i = 13
params.f = 13.0
v = wp.vec3(params.f, params.f, params.f)
m = wp.mat33(params.f, 0.0, 0.0, 0.0, params.f, 0.0, 0.0, 0.0, params.f)
cmd.set_param_at_index(0, params)
cmd.set_param_at_index(1, params.i)
cmd.set_param_at_index(2, params.f)
cmd.set_param_at_index(3, v)
cmd.set_param_at_index(4, m)
cmd.set_param_by_name("out", out)
cmd.launch()
assert_np_equal(out.numpy(), ref + params.i)
# test changing params after launch directly
# because we now cache the ctypes object inside the wp.struct
# instance the command buffer will be automatically updated
params.i = 14
params.f = 14.0
v = wp.vec3(params.f, params.f, params.f)
m = wp.mat33(params.f, 0.0, 0.0, 0.0, params.f, 0.0, 0.0, 0.0, params.f)
# this is the line we explicitly leave out to
# ensure that param changes are reflected in the launch
# launch.set_param_at_index(0, params)
cmd.set_param_at_index(1, params.i)
cmd.set_param_at_index(2, params.f)
cmd.set_param_at_index(3, v)
cmd.set_param_at_index(4, m)
cmd.set_param_by_name("out", out)
cmd.launch()
assert_np_equal(out.numpy(), ref + params.i)
def test_launch_cmd_set_ctype(test, device):
n = 1
ref = np.arange(0, n)
params = Params()
v = wp.vec3()
m = wp.mat33()
cmd = wp.launch(kernel_cmd, dim=n, inputs=[params, 0, 0.0, v, m, None], device=device, record_cmd=True)
# cmd param modification
out = wp.zeros(n, dtype=int, device=device)
# cmd param modification
out.zero_()
params.i = 13
params.f = 13.0
v = wp.vec3(params.f, params.f, params.f)
m = wp.mat33(params.f, 0.0, 0.0, 0.0, params.f, 0.0, 0.0, 0.0, params.f)
cmd.set_param_at_index_from_ctype(0, params.__ctype__())
cmd.set_param_at_index_from_ctype(1, params.i)
cmd.set_param_at_index_from_ctype(2, params.f)
cmd.set_param_at_index_from_ctype(3, v)
cmd.set_param_at_index_from_ctype(4, m)
cmd.set_param_by_name_from_ctype("out", out.__ctype__())
cmd.launch()
assert_np_equal(out.numpy(), ref + params.i)
@wp.kernel
def arange(out: wp.array(dtype=int)):
tid = wp.tid()
out[tid] = tid
def test_launch_cmd_set_dim(test, device):
n = 10
ref = np.arange(0, n, dtype=int)
out = wp.zeros(n, dtype=int, device=device)
cmd = wp.launch(arange, dim=n, inputs=[out], device=device, record_cmd=True)
cmd.set_dim(5)
cmd.launch()
# check first half the array is filled while rest is still zero
assert_np_equal(out.numpy()[0:5], ref[0:5])
assert_np_equal(out.numpy()[5:], np.zeros(5))
out.zero_()
cmd.set_dim(10)
cmd.launch()
# check the whole array was filled
assert_np_equal(out.numpy(), ref)
def test_launch_cmd_empty(test, device):
n = 10
ref = np.arange(0, n, dtype=int)
out = wp.zeros(n, dtype=int, device=device)
cmd = wp.Launch(arange, device)
cmd.set_dim(5)
cmd.set_param_by_name("out", out)
cmd.launch()
# check first half the array is filled while rest is still zero
assert_np_equal(out.numpy()[0:5], ref[0:5])
assert_np_equal(out.numpy()[5:], np.zeros(5))
out.zero_()
cmd.set_dim(10)
cmd.launch()
# check the whole array was filled
assert_np_equal(out.numpy(), ref)
@wp.kernel
def kernel_mul(
values: wp.array(dtype=int),
coeff: int,
out: wp.array(dtype=int),
):
tid = wp.tid()
out[tid] = values[tid] * coeff
def test_launch_tuple_args(test, device):
values = wp.array(np.arange(0, 4), dtype=int, device=device)
coeff = 3
out = wp.empty_like(values)
wp.launch(
kernel_mul,
dim=len(values),
inputs=(
values,
coeff,
),
outputs=(out,),
device=device,
)
assert_np_equal(out.numpy(), np.array((0, 3, 6, 9)))
wp.launch(
kernel_mul,
dim=len(values),
inputs=(
values,
coeff,
out,
),
device=device,
)
assert_np_equal(out.numpy(), np.array((0, 3, 6, 9)))
wp.launch(
kernel_mul,
dim=len(values),
outputs=(
values,
coeff,
out,
),
device=device,
)
assert_np_equal(out.numpy(), np.array((0, 3, 6, 9)))
devices = get_test_devices()
class TestLaunch(unittest.TestCase):
pass
add_function_test(TestLaunch, "test_launch_1d", test1d, devices=devices)
add_function_test(TestLaunch, "test_launch_2d", test2d, devices=devices)
add_function_test(TestLaunch, "test_launch_3d", test3d, devices=devices)
add_function_test(TestLaunch, "test_launch_4d", test4d, devices=devices)
add_function_test(TestLaunch, "test_launch_cmd", test_launch_cmd, devices=devices)
add_function_test(TestLaunch, "test_launch_cmd_set_param", test_launch_cmd_set_param, devices=devices)
add_function_test(TestLaunch, "test_launch_cmd_set_ctype", test_launch_cmd_set_ctype, devices=devices)
add_function_test(TestLaunch, "test_launch_cmd_set_dim", test_launch_cmd_set_dim, devices=devices)
add_function_test(TestLaunch, "test_launch_cmd_empty", test_launch_cmd_empty, devices=devices)
add_function_test(TestLaunch, "test_launch_tuple_args", test_launch_tuple_args, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 8,862 | Python | 24.107649 | 118 | 0.619499 |
NVIDIA/warp/warp/tests/test_large.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
from typing import Any
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def conditional_sum(result: wp.array(dtype=wp.uint64)):
i, j, k = wp.tid()
if i == 0:
wp.atomic_add(result, 0, wp.uint64(1))
def test_large_launch_large_kernel(test, device):
"""Test tid() on kernel launch of 2**33 threads.
The function conditional sum will add 1 to result for every thread that has an i index of 0.
Due to the size of the grid, this test is not run on CPUs
"""
test_result = wp.zeros(shape=(1,), dtype=wp.uint64, device=device)
large_dim_length = 2**16
half_result = large_dim_length * large_dim_length
wp.launch(kernel=conditional_sum, dim=[2, large_dim_length, large_dim_length], inputs=[test_result], device=device)
test.assertEqual(test_result.numpy()[0], half_result)
@wp.kernel
def count_elements(result: wp.array(dtype=wp.uint64)):
wp.atomic_add(result, 0, wp.uint64(1))
def test_large_launch_max_blocks(test, device):
# Loop over 1000x1x1 elements using a grid of 256 threads
test_result = wp.zeros(shape=(1,), dtype=wp.uint64, device=device)
wp.launch(count_elements, (1000,), inputs=[test_result], max_blocks=1, device=device)
test.assertEqual(test_result.numpy()[0], 1000)
# Loop over 2x10x10 elements using a grid of 256 threads, using the tid() index to count half the elements
test_result.zero_()
wp.launch(
conditional_sum,
(
2,
50,
10,
),
inputs=[test_result],
max_blocks=1,
device=device,
)
test.assertEqual(test_result.numpy()[0], 500)
def test_large_launch_very_large_kernel(test, device):
"""Due to the size of the grid, this test is not run on CPUs"""
# Dim is chosen to be larger than the maximum CUDA one-dimensional grid size (total threads)
dim = (2**31 - 1) * 256 + 1
test_result = wp.zeros(shape=(1,), dtype=wp.uint64, device=device)
wp.launch(count_elements, (dim,), inputs=[test_result], device=device)
test.assertEqual(test_result.numpy()[0], dim)
def test_large_arrays_slow(test, device):
# The goal of this test is to use arrays just large enough to know
# if there's a flaw in handling arrays with more than 2**31-1 elements
# Unfortunately, it takes a long time to run so it won't be run automatically
# without changes to support how frequently a test may be run
total_elements = 2**31 + 8
# 2-D to 4-D arrays: test zero_, fill_, then zero_ for scalar data types:
for total_dims in range(2, 5):
dim_x = math.ceil(total_elements ** (1 / total_dims))
shape_tuple = tuple([dim_x] * total_dims)
for wptype in wp.types.scalar_types:
a1 = wp.zeros(shape_tuple, dtype=wptype, device=device)
assert_np_equal(a1.numpy(), np.zeros_like(a1.numpy()))
a1.fill_(127)
assert_np_equal(a1.numpy(), 127 * np.ones_like(a1.numpy()))
a1.zero_()
assert_np_equal(a1.numpy(), np.zeros_like(a1.numpy()))
@wp.kernel
def check_array_equal_value(data: wp.array2d(dtype=Any), expect: Any):
i, j = wp.tid()
wp.expect_eq(data[i, j], expect)
def test_large_arrays_fast(test, device):
# A truncated version of test_large_arrays_slow meant to catch basic errors
# Make is so that a (dim_x, dim_x) array has more than 2**31 elements
dim_x = math.ceil(math.sqrt(2**31))
a1 = wp.zeros((dim_x, dim_x), dtype=wp.int8, device=device)
a1.fill_(127)
wp.launch(check_array_equal_value, a1.shape, inputs=[a1, wp.int8(127)], device=device)
a1.zero_()
wp.launch(check_array_equal_value, a1.shape, inputs=[a1, wp.int8(0)], device=device)
def test_large_array_excessive_zeros(test, device):
# Tests the allocation of an array with length exceeding 2**31-1 in a dimension
with test.assertRaisesRegex(
ValueError, "Array shapes must not exceed the maximum representable value of a signed 32-bit integer"
):
_ = wp.zeros((2**31), dtype=int, device=device)
def test_large_array_excessive_numpy(test, device):
# Tests the allocation of an array from a numpy array with length exceeding 2**31-1 in a dimension
large_np_array = np.empty((2**31), dtype=int)
with test.assertRaisesRegex(
ValueError, "Array shapes must not exceed the maximum representable value of a signed 32-bit integer"
):
_ = wp.array(large_np_array, device=device)
devices = get_test_devices()
class TestLarge(unittest.TestCase):
pass
add_function_test(
TestLarge,
"test_large_launch_large_kernel",
test_large_launch_large_kernel,
devices=get_selected_cuda_test_devices(),
)
add_function_test(TestLarge, "test_large_launch_max_blocks", test_large_launch_max_blocks, devices=devices)
add_function_test(
TestLarge,
"test_large_launch_very_large_kernel",
test_large_launch_very_large_kernel,
devices=get_selected_cuda_test_devices(),
)
add_function_test(TestLarge, "test_large_arrays_fast", test_large_arrays_fast, devices=devices)
add_function_test(TestLarge, "test_large_array_excessive_zeros", test_large_array_excessive_zeros, devices=devices)
add_function_test(TestLarge, "test_large_array_excessive_numpy", test_large_array_excessive_numpy, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,873 | Python | 33.552941 | 119 | 0.680742 |
NVIDIA/warp/warp/tests/test_examples.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Test Warp examples with unittest.
This module tests the Warp examples registered in it using the unittest
framework. When registering tests with add_example_test(), three optional
dictionaries can be provided: test_options, test_options_cuda, and
test_options_cpu. These are added to the command line arguments in-order, so
if a parameter is specified in both test_options and test_options_cuda, the
one in test_options_cuda will take precedence due to how argparse works.
Generally the test_options[_cpu,_cuda] dictionaries should be used to prevent
graphical windows from being open by the example {"headless": True} and to
override example defaults so the example can run in less than ten seconds.
Use {"usd_required": True} and {"torch_required": True} to skip running the test
if usd-core or torch are not found in the Python environment.
Use the "num_frames" and "train_iters" keys to control the number of steps.
"""
import os
import subprocess
import sys
import unittest
from typing import Any, Dict, Optional, Type
import warp as wp
import warp.tests.unittest_utils
from warp.tests.unittest_utils import (
USD_AVAILABLE,
get_selected_cuda_test_devices,
get_test_devices,
sanitize_identifier,
)
def _build_command_line_options(test_options: Dict[str, Any]) -> list:
"""Helper function to build command-line options from the test options dictionary."""
additional_options = []
for key, value in test_options.items():
if key == "headless" and value:
additional_options.extend(["--headless"])
else:
# Just add --key value
additional_options.extend(["--" + key, str(value)])
return additional_options
def _merge_options(base_options: Dict[str, Any], device_options: Dict[str, Any]) -> Dict[str, Any]:
"""Helper function to merge base test options with device-specific test options."""
merged_options = base_options.copy()
# Update options with device-specific dictionary, overwriting existing keys with the more-specific values
merged_options.update(device_options)
return merged_options
def add_example_test(
cls: Type,
name: str,
devices: Optional[list] = None,
test_options: Optional[Dict[str, Any]] = None,
test_options_cpu: Optional[Dict[str, Any]] = None,
test_options_cuda: Optional[Dict[str, Any]] = None,
):
"""Registers a Warp example to run on ``devices`` as a TestCase."""
if test_options is None:
test_options = {}
if test_options_cpu is None:
test_options_cpu = {}
if test_options_cuda is None:
test_options_cuda = {}
def run(test, device):
if wp.get_device(device).is_cuda:
options = _merge_options(test_options, test_options_cuda)
else:
options = _merge_options(test_options, test_options_cpu)
# Mark the test as skipped if Torch is not installed but required
torch_required = options.pop("torch_required", False)
if torch_required:
try:
import torch
if wp.get_device(device).is_cuda and not torch.cuda.is_available():
# Ensure torch has CUDA support
test.skipTest("Torch not compiled with CUDA support")
except Exception as e:
test.skipTest(f"{e}")
# Mark the test as skipped if USD is not installed but required
usd_required = options.pop("usd_required", False)
if usd_required and not USD_AVAILABLE:
test.skipTest("Requires usd-core")
# Find the current Warp cache
warp_cache_path = wp.config.kernel_cache_dir
env_vars = os.environ.copy()
if warp_cache_path is not None:
env_vars["WARP_CACHE_PATH"] = warp_cache_path
if warp.tests.unittest_utils.coverage_enabled:
import tempfile
# Generate a random coverage data file name - file is deleted along with containing directory
with tempfile.NamedTemporaryFile(
dir=warp.tests.unittest_utils.coverage_temp_dir, delete=False
) as coverage_file:
pass
command = ["coverage", "run", f"--data-file={coverage_file.name}"]
if warp.tests.unittest_utils.coverage_branch:
command.append("--branch")
else:
command = [sys.executable]
# Append Warp commands
command.extend(["-m", f"warp.examples.{name}", "--device", str(device)])
stage_path = (
options.pop(
"stage_path",
os.path.join(os.path.dirname(__file__), f"outputs/{name}_{sanitize_identifier(device)}.usd"),
)
if USD_AVAILABLE
else "None"
)
if stage_path:
command.extend(["--stage_path", stage_path])
try:
os.remove(stage_path)
except OSError:
pass
command.extend(_build_command_line_options(options))
# with wp.ScopedTimer(f"{name}_{sanitize_identifier(device)}"):
# Run the script as a subprocess
result = subprocess.run(command, capture_output=True, text=True, env=env_vars)
# Check the return code (0 is standard for success)
test.assertEqual(
result.returncode,
0,
msg=f"Failed with return code {result.returncode}, command: {' '.join(command)}\n\nOutput:\n{result.stdout}\n{result.stderr}",
)
# If the test succeeded, try to clean up the output by default
if stage_path and result.returncode == 0:
try:
os.remove(stage_path)
except OSError:
pass
from warp.tests.unittest_utils import add_function_test
add_function_test(cls, f"test_{name}", run, devices=devices, check_output=False)
cuda_test_devices = get_selected_cuda_test_devices(mode="basic") # Don't test on multiple GPUs to save time
test_devices = get_test_devices(mode="basic")
# NOTE: To give the parallel test runner more opportunities to parallelize test cases,
# we break up the tests into multiple TestCase classes
class TestCoreExamples(unittest.TestCase):
pass
# Exclude unless we can run headless somehow
# add_example_test(TestCoreExamples, name="example_render_opengl")
add_example_test(TestCoreExamples, name="core.example_dem", devices=test_devices, test_options_cpu={"num_frames": 2})
add_example_test(
TestCoreExamples,
name="core.example_fluid",
devices=test_devices,
test_options={"num_frames": 100, "headless": True},
)
add_example_test(
TestCoreExamples,
name="core.example_graph_capture",
devices=test_devices,
test_options={"headless": True},
test_options_cpu={"num_frames": 100},
)
add_example_test(TestCoreExamples, name="core.example_marching_cubes", devices=cuda_test_devices)
add_example_test(TestCoreExamples, name="core.example_mesh", devices=test_devices, test_options={"usd_required": True})
add_example_test(
TestCoreExamples, name="core.example_mesh_intersect", devices=test_devices, test_options={"usd_required": True}
)
add_example_test(TestCoreExamples, name="core.example_nvdb", devices=test_devices)
add_example_test(
TestCoreExamples,
name="core.example_raycast",
devices=test_devices,
test_options={"usd_required": True, "headless": True},
)
add_example_test(
TestCoreExamples,
name="core.example_raymarch",
devices=test_devices,
test_options={"height": 512, "width": 1024, "headless": True},
)
add_example_test(TestCoreExamples, name="core.example_sph", devices=test_devices, test_options_cpu={"num_frames": 1})
add_example_test(
TestCoreExamples,
name="core.example_torch",
devices=test_devices,
test_options={"headless": True, "num_frames": 1000, "torch_required": True},
)
add_example_test(TestCoreExamples, name="core.example_wave", devices=test_devices)
class TestOptimExamples(unittest.TestCase):
pass
add_example_test(
TestOptimExamples, name="optim.example_bounce", devices=test_devices, test_options_cpu={"train_iters": 3}
)
add_example_test(
TestOptimExamples,
name="optim.example_drone",
devices=test_devices,
test_options={"headless": True},
test_options_cpu={"num_frames": 10},
)
add_example_test(
TestOptimExamples, name="optim.example_cloth_throw", devices=test_devices, test_options_cpu={"train_iters": 3}
)
add_example_test(
TestOptimExamples,
name="optim.example_diffray",
devices=test_devices,
test_options={"usd_required": True, "headless": True},
test_options_cpu={"train_iters": 2},
)
add_example_test(TestOptimExamples, name="optim.example_inverse_kinematics", devices=test_devices)
add_example_test(
TestOptimExamples,
name="optim.example_inverse_kinematics_torch",
devices=test_devices,
test_options={"torch_required": True},
)
add_example_test(TestOptimExamples, name="optim.example_spring_cage", devices=test_devices)
add_example_test(
TestOptimExamples,
name="optim.example_trajectory",
devices=test_devices,
test_options={"headless": True, "train_iters": 50},
)
# NOTE: This example uses CUTLASS and will run orders of magnitude slower when Warp is built in debug mode
add_example_test(
TestOptimExamples,
name="optim.example_walker",
devices=test_devices,
test_options={"usd_required": True},
test_options_cuda={
"train_iters": 1 if warp.context.runtime.core.is_debug_enabled() else 3,
"num_frames": 1 if warp.context.runtime.core.is_debug_enabled() else 60,
},
test_options_cpu={"train_iters": 1, "num_frames": 30},
)
class TestSimExamples(unittest.TestCase):
pass
add_example_test(TestSimExamples, name="sim.example_cartpole", devices=test_devices)
add_example_test(
TestSimExamples,
name="sim.example_cloth",
devices=test_devices,
test_options={"usd_required": True},
test_options_cpu={"num_frames": 10},
)
add_example_test(
TestSimExamples, name="sim.example_granular", devices=test_devices, test_options_cpu={"num_frames": 10}
)
add_example_test(TestSimExamples, name="sim.example_granular_collision_sdf", devices=cuda_test_devices)
add_example_test(TestSimExamples, name="sim.example_jacobian_ik", devices=test_devices)
add_example_test(TestSimExamples, name="sim.example_particle_chain", devices=test_devices)
add_example_test(
TestSimExamples, name="sim.example_quadruped", devices=test_devices, test_options_cpu={"num_frames": 100}
)
add_example_test(TestSimExamples, name="sim.example_rigid_chain", devices=test_devices)
add_example_test(
TestSimExamples,
name="sim.example_rigid_contact",
devices=test_devices,
test_options={"usd_required": True},
test_options_cpu={"num_frames": 3},
)
add_example_test(
TestSimExamples, name="sim.example_rigid_soft_contact", devices=test_devices, test_options_cpu={"num_frames": 10}
)
add_example_test(TestSimExamples, name="sim.example_rigid_force", devices=test_devices)
add_example_test(TestSimExamples, name="sim.example_rigid_gyroscopic", devices=test_devices)
add_example_test(
TestSimExamples, name="sim.example_soft_body", devices=test_devices, test_options_cpu={"num_frames": 10}
)
class TestFemExamples(unittest.TestCase):
pass
class TestFemDiffusionExamples(unittest.TestCase):
pass
add_example_test(
TestFemDiffusionExamples,
name="fem.example_diffusion_mgpu",
devices=get_selected_cuda_test_devices(mode="basic"),
test_options={"headless": True},
)
add_example_test(
TestFemExamples,
name="fem.example_apic_fluid",
devices=get_selected_cuda_test_devices(),
test_options={"num_frames": 5, "voxel_size": 2.0},
)
# The following examples do not need CUDA
add_example_test(
TestFemDiffusionExamples,
name="fem.example_diffusion",
devices=test_devices,
test_options={"resolution": 10, "mesh": "tri", "headless": True},
)
add_example_test(
TestFemDiffusionExamples, name="fem.example_diffusion_3d", devices=test_devices, test_options={"headless": True}
)
add_example_test(
TestFemExamples,
name="fem.example_deformed_geometry",
devices=test_devices,
test_options={"resolution": 10, "mesh": "tri", "headless": True},
)
add_example_test(
TestFemExamples,
name="fem.example_convection_diffusion",
devices=test_devices,
test_options={"resolution": 20, "headless": True},
)
add_example_test(
TestFemExamples,
name="fem.example_burgers",
devices=test_devices,
test_options={"resolution": 20, "num_frames": 25, "degree": 1, "headless": True},
)
add_example_test(
TestFemExamples,
name="fem.example_convection_diffusion_dg",
devices=test_devices,
test_options={"resolution": 20, "num_frames": 25, "mesh": "quad", "headless": True},
)
add_example_test(
TestFemExamples,
name="fem.example_mixed_elasticity",
devices=test_devices,
test_options={"nonconforming_stresses": True, "mesh": "quad", "headless": True},
)
add_example_test(
TestFemExamples, name="fem.example_stokes_transfer", devices=test_devices, test_options={"headless": True}
)
add_example_test(
TestFemExamples,
name="fem.example_stokes",
devices=test_devices,
test_options={"resolution": 10, "nonconforming_pressures": True, "headless": True},
)
add_example_test(
TestFemExamples,
name="fem.example_navier_stokes",
devices=test_devices,
test_options={"num_frames": 101, "resolution": 10, "tri_mesh": True, "headless": True},
)
if __name__ == "__main__":
# force rebuild of all kernels
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 14,108 | Python | 33.923267 | 138 | 0.683513 |
NVIDIA/warp/warp/tests/test_generics.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.func
def generic_adder(a: Any, b: Any):
return a + b
@wp.kernel
def test_generic_adder():
wp.expect_eq(generic_adder(17, 25), 42)
wp.expect_eq(generic_adder(7.0, 10.0), 17.0)
v1 = wp.vec3(1.0, 2.0, 3.0)
v2 = wp.vec3(10.0, 20.0, 30.0)
wp.expect_eq(generic_adder(v1, v2), wp.vec3(11.0, 22.0, 33.0))
# regular functions for floats
@wp.func
def specialized_func(a: float, b: float):
return a * b
@wp.func
def specialized_func(a: float, b: float, c: float):
return a * b * c
# generic forms
@wp.func
def specialized_func(a: Any, b: Any):
return a + b
@wp.func
def specialized_func(a: Any, b: Any, c: Any):
return a + b + c
# specializations for ints
@wp.func
def specialized_func(a: int, b: int):
return a - b
@wp.func
def specialized_func(a: int, b: int, c: int):
return a - b - c
@wp.kernel
def test_specialized_func():
# subtraction with int args
wp.expect_eq(specialized_func(17, 25), -8)
wp.expect_eq(specialized_func(17, 25, 10), -18)
# multiplication with float args
wp.expect_eq(specialized_func(7.0, 10.0), 70.0)
wp.expect_eq(specialized_func(7.0, 10.0, 2.0), 140.0)
# addition with vector args
v1 = wp.vec3(1.0, 2.0, 3.0)
v2 = wp.vec3(10.0, 20.0, 30.0)
v3 = wp.vec3(100.0, 200.0, 300.0)
wp.expect_eq(specialized_func(v1, v2), wp.vec3(11.0, 22.0, 33.0))
wp.expect_eq(specialized_func(v1, v2, v3), wp.vec3(111.0, 222.0, 333.0))
# generic array kernel, version 1 (Any)
@wp.kernel
def generic_array_kernel_v1(a: Any, b: Any, c: Any):
tid = wp.tid()
sum = a[tid] + b[tid] # test direct access
c[tid] = generic_adder(sum, sum) # test generic function
wp.overload(generic_array_kernel_v1, [wp.array(dtype=int), wp.array(dtype=int), wp.array(dtype=int)])
wp.overload(generic_array_kernel_v1, [wp.array(dtype=float), wp.array(dtype=float), wp.array(dtype=float)])
wp.overload(generic_array_kernel_v1, [wp.array(dtype=wp.vec3), wp.array(dtype=wp.vec3), wp.array(dtype=wp.vec3)])
# generic array kernel, version 2 (generic dtype)
@wp.kernel
def generic_array_kernel_v2(a: wp.array(dtype=Any), b: wp.array(dtype=Any), c: wp.array(dtype=Any)):
tid = wp.tid()
sum = a[tid] + b[tid] # test direct access
c[tid] = generic_adder(sum, sum) # test generic function
wp.overload(generic_array_kernel_v2, [wp.array(dtype=int), wp.array(dtype=int), wp.array(dtype=int)])
wp.overload(generic_array_kernel_v2, [wp.array(dtype=float), wp.array(dtype=float), wp.array(dtype=float)])
wp.overload(generic_array_kernel_v2, [wp.array(dtype=wp.vec3), wp.array(dtype=wp.vec3), wp.array(dtype=wp.vec3)])
# generic array kernel, version 3 (unspecified dtype)
@wp.kernel
def generic_array_kernel_v3(a: wp.array(), b: wp.array(), c: wp.array()):
tid = wp.tid()
sum = a[tid] + b[tid] # test direct access
c[tid] = generic_adder(sum, sum) # test generic function
wp.overload(generic_array_kernel_v3, [wp.array(dtype=int), wp.array(dtype=int), wp.array(dtype=int)])
wp.overload(generic_array_kernel_v3, [wp.array(dtype=float), wp.array(dtype=float), wp.array(dtype=float)])
wp.overload(generic_array_kernel_v3, [wp.array(dtype=wp.vec3), wp.array(dtype=wp.vec3), wp.array(dtype=wp.vec3)])
def test_generic_array_kernel(test, device):
with wp.ScopedDevice(device):
n = 10
ai = wp.array(data=np.ones(n, dtype=np.int32))
ci = wp.empty(10, dtype=int)
af = wp.array(data=np.ones(n, dtype=np.float32))
cf = wp.empty(10, dtype=float)
a3 = wp.array(data=np.ones((n, 3), dtype=np.float32), dtype=wp.vec3)
c3 = wp.empty(n, dtype=wp.vec3)
wp.launch(generic_array_kernel_v1, dim=n, inputs=[af, af, cf])
wp.launch(generic_array_kernel_v1, dim=n, inputs=[ai, ai, ci])
wp.launch(generic_array_kernel_v1, dim=n, inputs=[a3, a3, c3])
assert_np_equal(ci.numpy(), np.full((n,), 4, dtype=np.int32))
assert_np_equal(cf.numpy(), np.full((n,), 4.0, dtype=np.float32))
assert_np_equal(c3.numpy(), np.full((n, 3), 4.0, dtype=np.float32))
wp.launch(generic_array_kernel_v2, dim=n, inputs=[af, af, cf])
wp.launch(generic_array_kernel_v2, dim=n, inputs=[ai, ai, ci])
wp.launch(generic_array_kernel_v2, dim=n, inputs=[a3, a3, c3])
assert_np_equal(ci.numpy(), np.full((n,), 4, dtype=np.int32))
assert_np_equal(cf.numpy(), np.full((n,), 4.0, dtype=np.float32))
assert_np_equal(c3.numpy(), np.full((n, 3), 4.0, dtype=np.float32))
wp.launch(generic_array_kernel_v3, dim=n, inputs=[af, af, cf])
wp.launch(generic_array_kernel_v3, dim=n, inputs=[ai, ai, ci])
wp.launch(generic_array_kernel_v3, dim=n, inputs=[a3, a3, c3])
assert_np_equal(ci.numpy(), np.full((n,), 4, dtype=np.int32))
assert_np_equal(cf.numpy(), np.full((n,), 4.0, dtype=np.float32))
assert_np_equal(c3.numpy(), np.full((n, 3), 4.0, dtype=np.float32))
# kernel that adds any scalar value to an array
@wp.kernel
def generic_accumulator_kernel(a: wp.array(dtype=wp.float64), value: Any):
tid = wp.tid()
a[tid] = a[tid] + wp.float64(value)
# overload named args
wp.overload(generic_accumulator_kernel, {"value": int})
wp.overload(generic_accumulator_kernel, {"value": float})
wp.overload(generic_accumulator_kernel, {"value": wp.float64})
wp.overload(generic_accumulator_kernel, {"value": wp.bool})
def test_generic_accumulator_kernel(test, device):
with wp.ScopedDevice(device):
n = 10
a = wp.zeros(n, dtype=wp.float64)
wp.launch(generic_accumulator_kernel, dim=a.size, inputs=[a, 25])
wp.launch(generic_accumulator_kernel, dim=a.size, inputs=[a, 17.0])
wp.launch(generic_accumulator_kernel, dim=a.size, inputs=[a, wp.float64(8.0)])
wp.launch(generic_accumulator_kernel, dim=a.size, inputs=[a, wp.bool(True)])
assert_np_equal(a.numpy(), np.full((n,), 51.0, dtype=np.float64))
# generic kernel used to automatically generate overloads from launch args
@wp.kernel
def generic_fill(a: wp.array(dtype=Any), value: Any):
tid = wp.tid()
a[tid] = value
def test_generic_fill(test, device):
with wp.ScopedDevice(device):
n = 10
ai = wp.zeros(n, dtype=int)
af = wp.zeros(n, dtype=float)
a3 = wp.zeros(n, dtype=wp.vec3)
ab = wp.zeros(n, dtype=wp.bool)
wp.launch(generic_fill, dim=ai.size, inputs=[ai, 42])
wp.launch(generic_fill, dim=af.size, inputs=[af, 17.0])
wp.launch(generic_fill, dim=a3.size, inputs=[a3, wp.vec3(5.0, 5.0, 5.0)])
wp.launch(generic_fill, dim=ab.size, inputs=[ab, wp.bool(True)])
assert_np_equal(ai.numpy(), np.full((n,), 42, dtype=np.int32))
assert_np_equal(af.numpy(), np.full((n,), 17.0, dtype=np.float32))
assert_np_equal(a3.numpy(), np.full((n, 3), 5.0, dtype=np.float32))
assert_np_equal(ab.numpy(), np.full((n,), True, dtype=np.bool_))
# generic kernel used to create and launch explicit overloads
@wp.kernel
def generic_fill_v2(a: wp.array(dtype=Any), value: Any):
tid = wp.tid()
a[tid] = value
vec3b_type = wp.vec(3, wp.bool)
# create explicit overloads to be launched directly
fill_int = wp.overload(generic_fill_v2, [wp.array(dtype=int), int])
fill_float = wp.overload(generic_fill_v2, [wp.array(dtype=float), float])
fill_vec3 = wp.overload(generic_fill_v2, [wp.array(dtype=wp.vec3), wp.vec3])
fill_vec3b = wp.overload(generic_fill_v2, [wp.array(dtype=vec3b_type), vec3b_type])
def test_generic_fill_overloads(test, device):
with wp.ScopedDevice(device):
n = 10
ai = wp.zeros(n, dtype=int)
af = wp.zeros(n, dtype=float)
a3 = wp.zeros(n, dtype=wp.vec3)
a3b = wp.zeros(n, dtype=vec3b_type)
wp.launch(fill_int, dim=ai.size, inputs=[ai, 42])
wp.launch(fill_float, dim=af.size, inputs=[af, 17.0])
wp.launch(fill_vec3, dim=a3.size, inputs=[a3, wp.vec3(5.0, 5.0, 5.0)])
wp.launch(fill_vec3b, dim=a3b.size, inputs=[a3b, vec3b_type([True, True, True])])
assert_np_equal(ai.numpy(), np.full((n,), 42, dtype=np.int32))
assert_np_equal(af.numpy(), np.full((n,), 17.0, dtype=np.float32))
assert_np_equal(a3.numpy(), np.full((n, 3), 5.0, dtype=np.float32))
assert_np_equal(a3b.numpy(), np.full((n, 3), True, dtype=np.bool_))
# custom vector/matrix types
my_vec5 = wp.vec(length=5, dtype=wp.float32)
my_mat55 = wp.mat(shape=(5, 5), dtype=wp.float32)
@wp.kernel
def generic_transform(v: Any, m: Any, expected: Any):
result = wp.mul(m, v)
wp.expect_eq(result, expected)
# use overload decorator syntax
@wp.overload
def generic_transform(v: wp.vec2, m: wp.mat22, expected: wp.vec2): # fmt: skip
...
@wp.overload
def generic_transform(v: wp.vec3, m: wp.mat33, expected: wp.vec3): # fmt: skip
...
@wp.overload
def generic_transform(v: wp.vec4, m: wp.mat44, expected: wp.vec4): # fmt: skip
...
@wp.overload
def generic_transform(v: my_vec5, m: my_mat55, expected: my_vec5): # fmt: skip
...
def test_generic_transform_kernel(test, device):
with wp.ScopedDevice(device):
v2 = wp.vec2(1, 2)
m22 = wp.mat22(2, 0, 0, 2)
e2 = wp.vec2(2, 4)
v3 = wp.vec3(1, 2, 3)
m33 = wp.mat33(2, 0, 0, 0, 2, 0, 0, 0, 2)
e3 = wp.vec3(2, 4, 6)
v4 = wp.vec4(1, 2, 3, 4)
m44 = wp.mat44(2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2)
e4 = wp.vec4(2, 4, 6, 8)
v5 = my_vec5(1, 2, 3, 4, 5)
m55 = my_mat55(2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2)
e5 = my_vec5(2, 4, 6, 8, 10)
wp.launch(generic_transform, dim=1, inputs=[v2, m22, e2])
wp.launch(generic_transform, dim=1, inputs=[v3, m33, e3])
wp.launch(generic_transform, dim=1, inputs=[v4, m44, e4])
wp.launch(generic_transform, dim=1, inputs=[v5, m55, e5])
wp.synchronize()
@wp.kernel
def generic_transform_array(v: wp.array(), m: wp.array(), result: wp.array()):
tid = wp.tid()
result[tid] = wp.mul(m[tid], v[tid])
wp.overload(generic_transform_array, [wp.array(dtype=wp.vec2), wp.array(dtype=wp.mat22), wp.array(dtype=wp.vec2)])
wp.overload(generic_transform_array, [wp.array(dtype=wp.vec3), wp.array(dtype=wp.mat33), wp.array(dtype=wp.vec3)])
wp.overload(generic_transform_array, [wp.array(dtype=wp.vec4), wp.array(dtype=wp.mat44), wp.array(dtype=wp.vec4)])
wp.overload(generic_transform_array, [wp.array(dtype=my_vec5), wp.array(dtype=my_mat55), wp.array(dtype=my_vec5)])
def test_generic_transform_array_kernel(test, device):
with wp.ScopedDevice(device):
n = 10
a2_data = np.tile(np.arange(2, dtype=np.float32), (n, 1))
a3_data = np.tile(np.arange(3, dtype=np.float32), (n, 1))
a4_data = np.tile(np.arange(4, dtype=np.float32), (n, 1))
a5_data = np.tile(np.arange(5, dtype=np.float32), (n, 1))
m22_data = np.tile((np.identity(2, dtype=np.float32) * 2), (n, 1, 1))
m33_data = np.tile((np.identity(3, dtype=np.float32) * 2), (n, 1, 1))
m44_data = np.tile((np.identity(4, dtype=np.float32) * 2), (n, 1, 1))
m55_data = np.tile((np.identity(5, dtype=np.float32) * 2), (n, 1, 1))
a2 = wp.array(data=a2_data, dtype=wp.vec2)
a3 = wp.array(data=a3_data, dtype=wp.vec3)
a4 = wp.array(data=a4_data, dtype=wp.vec4)
a5 = wp.array(data=a5_data, dtype=my_vec5)
m22 = wp.array(data=m22_data, dtype=wp.mat22)
m33 = wp.array(data=m33_data, dtype=wp.mat33)
m44 = wp.array(data=m44_data, dtype=wp.mat44)
m55 = wp.array(data=m55_data, dtype=my_mat55)
b2 = wp.zeros_like(a2)
b3 = wp.zeros_like(a3)
b4 = wp.zeros_like(a4)
b5 = wp.zeros_like(a5)
wp.launch(generic_transform_array, dim=n, inputs=[a2, m22, b2])
wp.launch(generic_transform_array, dim=n, inputs=[a3, m33, b3])
wp.launch(generic_transform_array, dim=n, inputs=[a4, m44, b4])
wp.launch(generic_transform_array, dim=n, inputs=[a5, m55, b5])
assert_np_equal(b2.numpy(), a2_data * 2)
assert_np_equal(b3.numpy(), a3_data * 2)
assert_np_equal(b4.numpy(), a4_data * 2)
assert_np_equal(b5.numpy(), a5_data * 2)
@wp.struct
class Foo:
x: float
y: float
z: float
@wp.struct
class Bar:
x: wp.vec3
y: wp.vec3
z: wp.vec3
@wp.kernel
def test_generic_struct_kernel(s: Any):
# test member access for generic structs
wp.expect_eq(s.x + s.y, s.z)
wp.overload(test_generic_struct_kernel, [Foo])
wp.overload(test_generic_struct_kernel, [Bar])
@wp.kernel
def test_generic_type_cast_kernel(a: Any, b: Any):
a = type(a)(b)
c = type(generic_adder(b, b))(a)
wp.expect_eq(b, c)
wp.overload(test_generic_type_cast_kernel, [wp.float32, wp.float64])
wp.overload(test_generic_type_cast_kernel, [wp.float32, wp.int32])
wp.overload(test_generic_type_cast_kernel, [wp.vec3f, wp.vec3d])
wp.overload(test_generic_type_cast_kernel, [wp.mat22f, wp.mat22d])
def test_generic_type_cast(test, device):
with wp.ScopedDevice(device):
wp.launch(test_generic_type_cast_kernel, dim=1, inputs=[1.0, 2.0])
wp.launch(test_generic_type_cast_kernel, dim=1, inputs=[2.0, -5])
wp.launch(test_generic_type_cast_kernel, dim=1, inputs=[wp.vec3f(1.0, 2.0, 3.0), wp.vec3d(4.0, 5.0, 6.0)])
wp.launch(test_generic_type_cast_kernel, dim=1, inputs=[wp.mat22f(0.0), wp.mat22d(np.eye(2))])
wp.synchronize()
@wp.kernel
def test_generic_scalar_construction_kernel(a: wp.array(dtype=Any)):
zero = type(a[0])(0)
copy = a.dtype(a[0])
copy += zero
wp.expect_eq(copy, a[0])
wp.overload(test_generic_scalar_construction_kernel, [wp.array(dtype=wp.int32)])
wp.overload(test_generic_scalar_construction_kernel, [wp.array(dtype=wp.float64)])
def test_generic_scalar_construction(test, device):
with wp.ScopedDevice(device):
wp.launch(test_generic_scalar_construction_kernel, dim=1, inputs=[wp.array([1.0], dtype=wp.int32)])
wp.launch(test_generic_scalar_construction_kernel, dim=1, inputs=[wp.array([-5], dtype=wp.float64)])
wp.synchronize()
@wp.kernel
def test_generic_type_construction_kernel(a: wp.array(dtype=Any)):
zero = type(a[0])()
copy = type(a).dtype(a[0]) * a.dtype.dtype(1.0)
copy += zero
wp.expect_eq(copy, a[0])
wp.overload(test_generic_type_construction_kernel, [wp.array(dtype=wp.vec3f)])
wp.overload(test_generic_type_construction_kernel, [wp.array(dtype=wp.mat22d)])
def test_generic_type_construction(test, device):
with wp.ScopedDevice(device):
wp.launch(test_generic_type_construction_kernel, dim=1, inputs=[wp.array([1.0, 2.0, 3.0], dtype=wp.vec3f)])
wp.launch(test_generic_type_construction_kernel, dim=1, inputs=[wp.array([np.eye(2)], dtype=wp.mat22d)])
wp.synchronize()
@wp.kernel
def test_generic_struct_construction_kernel(a: Any):
b = type(a)(a.x, a.y, a.z)
wp.expect_eq(a.x, b.x)
wp.expect_eq(a.y, b.y)
wp.expect_eq(a.z, b.z)
wp.overload(test_generic_struct_construction_kernel, [Foo])
wp.overload(test_generic_struct_construction_kernel, [Bar])
@wp.kernel
def test_generic_type_as_argument_kernel(a: Any):
vec = wp.vector(length=2, dtype=type(a))
matrix = wp.identity(n=vec.length, dtype=vec.dtype) * a
wp.expect_eq(wp.trace(matrix), type(a)(2.0) * a)
wp.overload(test_generic_type_as_argument_kernel, [wp.float32])
wp.overload(test_generic_type_as_argument_kernel, [wp.float64])
def test_generic_type_as_argument(test, device):
with wp.ScopedDevice(device):
wp.launch(test_generic_type_as_argument_kernel, dim=1, inputs=[2.0])
wp.launch(test_generic_type_as_argument_kernel, dim=1, inputs=[-1.0])
wp.synchronize()
def test_type_operator_misspell(test, device):
@wp.kernel
def kernel():
i = wp.tid()
_ = typez(i)(0)
with test.assertRaisesRegex(KeyError, r"Referencing undefined symbol: typez"):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_type_attribute_error(test, device):
@wp.kernel
def kernel():
a = wp.vec3(0.0)
_ = a.dtype.shape
with test.assertRaisesRegex(AttributeError, r"`shape` is not an attribute of '<class 'warp.types.float32'>'"):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
class TestGenerics(unittest.TestCase):
pass
devices = get_test_devices()
add_kernel_test(TestGenerics, name="test_generic_adder", kernel=test_generic_adder, dim=1, devices=devices)
add_kernel_test(TestGenerics, name="test_specialized_func", kernel=test_specialized_func, dim=1, devices=devices)
add_function_test(TestGenerics, "test_generic_array_kernel", test_generic_array_kernel, devices=devices)
add_function_test(TestGenerics, "test_generic_accumulator_kernel", test_generic_accumulator_kernel, devices=devices)
add_function_test(TestGenerics, "test_generic_fill", test_generic_fill, devices=devices)
add_function_test(TestGenerics, "test_generic_fill_overloads", test_generic_fill_overloads, devices=devices)
add_function_test(TestGenerics, "test_generic_transform_kernel", test_generic_transform_kernel, devices=devices)
add_function_test(
TestGenerics, "test_generic_transform_array_kernel", test_generic_transform_array_kernel, devices=devices
)
add_function_test(TestGenerics, "test_generic_type_cast", test_generic_type_cast, devices=devices)
add_function_test(TestGenerics, "test_generic_type_construction", test_generic_type_construction, devices=devices)
add_function_test(TestGenerics, "test_generic_scalar_construction", test_generic_scalar_construction, devices=devices)
add_function_test(TestGenerics, "test_generic_type_as_argument", test_generic_type_as_argument, devices=devices)
foo = Foo()
foo.x = 17.0
foo.y = 25.0
foo.z = 42.0
bar = Bar()
bar.x = wp.vec3(1, 2, 3)
bar.y = wp.vec3(10, 20, 30)
bar.z = wp.vec3(11, 22, 33)
add_kernel_test(
TestGenerics,
name="test_generic_struct_kernel",
kernel=test_generic_struct_kernel,
dim=1,
inputs=[foo],
devices=devices,
)
add_kernel_test(
TestGenerics,
name="test_generic_struct_kernel",
kernel=test_generic_struct_kernel,
dim=1,
inputs=[bar],
devices=devices,
)
add_kernel_test(
TestGenerics,
name="test_generic_struct_construction_kernel",
kernel=test_generic_struct_construction_kernel,
dim=1,
inputs=[foo],
devices=devices,
)
add_kernel_test(
TestGenerics,
name="test_generic_struct_construction_kernel",
kernel=test_generic_struct_construction_kernel,
dim=1,
inputs=[bar],
devices=devices,
)
add_function_test(TestGenerics, "test_type_operator_misspell", test_type_operator_misspell, devices=devices)
add_function_test(TestGenerics, "test_type_attribute_error", test_type_attribute_error, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 19,605 | Python | 33.396491 | 118 | 0.649222 |
NVIDIA/warp/warp/tests/test_fp16.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def load_store_half(f32: wp.array(dtype=wp.float32), f16: wp.array(dtype=wp.float16)):
tid = wp.tid()
# check conversion from f32->f16
a = wp.float16(f32[tid])
b = f16[tid]
wp.expect_eq(a, b)
# check stores
f16[tid] = a
def test_fp16_conversion(test, device):
s = [1.0, 2.0, 3.0, -3.14159]
np_f32 = np.array(s, dtype=np.float32)
np_f16 = np.array(s, dtype=np.float16)
wp_f32 = wp.array(s, dtype=wp.float32, device=device)
wp_f16 = wp.array(s, dtype=wp.float16, device=device)
assert_np_equal(np_f32, wp_f32.numpy())
assert_np_equal(np_f16, wp_f16.numpy())
wp.launch(load_store_half, dim=len(s), inputs=[wp_f32, wp_f16], device=device)
# check that stores worked
assert_np_equal(np_f16, wp_f16.numpy())
@wp.kernel
def value_load_store_half(f16_value: wp.float16, f16_array: wp.array(dtype=wp.float16)):
wp.expect_eq(f16_value, f16_array[0])
# check stores
f16_array[0] = f16_value
def test_fp16_kernel_parameter(test, device):
"""Test the ability to pass in fp16 into kernels as parameters"""
s = [1.0, 2.0, 3.0, -3.14159]
for test_val in s:
np_f16 = np.array([test_val], dtype=np.float16)
wp_f16 = wp.array([test_val], dtype=wp.float16, device=device)
wp.launch(value_load_store_half, (1,), inputs=[wp.float16(test_val), wp_f16], device=device)
# check that stores worked
assert_np_equal(np_f16, wp_f16.numpy())
# Do the same thing but pass in test_val as a Python float to test automatic conversion
wp_f16 = wp.array([test_val], dtype=wp.float16, device=device)
wp.launch(value_load_store_half, (1,), inputs=[test_val, wp_f16], device=device)
assert_np_equal(np_f16, wp_f16.numpy())
@wp.kernel
def mul_half(input: wp.array(dtype=wp.float16), output: wp.array(dtype=wp.float16)):
tid = wp.tid()
# convert to compute type fp32
x = wp.float(input[tid]) * 2.0
# store back as fp16
output[tid] = wp.float16(x)
def test_fp16_grad(test, device):
rng = np.random.default_rng(123)
# checks that gradients are correctly propagated for
# fp16 arrays, even when intermediate calculations
# are performed in e.g.: fp32
s = rng.random(size=15).astype(np.float16)
input = wp.array(s, dtype=wp.float16, device=device, requires_grad=True)
output = wp.zeros_like(input)
tape = wp.Tape()
with tape:
wp.launch(mul_half, dim=len(s), inputs=[input, output], device=device)
ones = wp.array(np.ones(len(output)), dtype=wp.float16, device=device)
tape.backward(grads={output: ones})
assert_np_equal(input.grad.numpy(), np.ones(len(s)) * 2.0)
class TestFp16(unittest.TestCase):
pass
devices = []
if wp.is_cpu_available():
devices.append("cpu")
for cuda_device in get_selected_cuda_test_devices():
if cuda_device.arch >= 70:
devices.append(cuda_device)
add_function_test(TestFp16, "test_fp16_conversion", test_fp16_conversion, devices=devices)
add_function_test(TestFp16, "test_fp16_grad", test_fp16_grad, devices=devices)
add_function_test(TestFp16, "test_fp16_kernel_parameter", test_fp16_kernel_parameter, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 3,824 | Python | 28.651163 | 102 | 0.675209 |
NVIDIA/warp/warp/tests/test_intersect.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def intersect_tri(
v0: wp.vec3, v1: wp.vec3, v2: wp.vec3, u0: wp.vec3, u1: wp.vec3, u2: wp.vec3, result: wp.array(dtype=int)
):
tid = wp.tid()
result[0] = wp.intersect_tri_tri(v0, v1, v2, u0, u1, u2)
def test_intersect_tri(test, device):
points_intersect = [
wp.vec3(0.0, 0.0, 0.0),
wp.vec3(1.0, 0.0, 0.0),
wp.vec3(0.0, 0.0, 1.0),
wp.vec3(0.5, -0.5, 0.0),
wp.vec3(0.5, -0.5, 1.0),
wp.vec3(0.5, 0.5, 0.0),
]
points_separated = [
wp.vec3(0.0, 0.0, 0.0),
wp.vec3(1.0, 0.0, 0.0),
wp.vec3(0.0, 0.0, 1.0),
wp.vec3(-0.5, -0.5, 0.0),
wp.vec3(-0.5, -0.5, 1.0),
wp.vec3(-0.5, 0.5, 0.0),
]
result = wp.zeros(1, dtype=int, device=device)
wp.launch(intersect_tri, dim=1, inputs=[*points_intersect, result], device=device)
assert_np_equal(result.numpy(), np.array([1]))
wp.launch(intersect_tri, dim=1, inputs=[*points_separated, result], device=device)
assert_np_equal(result.numpy(), np.array([0]))
devices = get_test_devices()
class TestIntersect(unittest.TestCase):
pass
add_function_test(TestIntersect, "test_intersect_tri", test_intersect_tri, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 1,855 | Python | 27.121212 | 109 | 0.638814 |
NVIDIA/warp/warp/tests/test_torch.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def op_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float)):
tid = wp.tid()
y[tid] = 0.5 - x[tid] * 2.0
@wp.kernel
def inc(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] + 1.0
@wp.kernel
def arange(start: int, step: int, a: wp.array(dtype=int)):
tid = wp.tid()
a[tid] = start + step * tid
# copy elements between non-contiguous 1d arrays of float
@wp.kernel
def copy1d_float_kernel(dst: wp.array(dtype=float), src: wp.array(dtype=float)):
i = wp.tid()
dst[i] = src[i]
# copy elements between non-contiguous 2d arrays of float
@wp.kernel
def copy2d_float_kernel(dst: wp.array2d(dtype=float), src: wp.array2d(dtype=float)):
i, j = wp.tid()
dst[i, j] = src[i, j]
# copy elements between non-contiguous 3d arrays of float
@wp.kernel
def copy3d_float_kernel(dst: wp.array3d(dtype=float), src: wp.array3d(dtype=float)):
i, j, k = wp.tid()
dst[i, j, k] = src[i, j, k]
# copy elements between non-contiguous 2d arrays of vec3
@wp.kernel
def copy2d_vec3_kernel(dst: wp.array2d(dtype=wp.vec3), src: wp.array2d(dtype=wp.vec3)):
i, j = wp.tid()
dst[i, j] = src[i, j]
# copy elements between non-contiguous 2d arrays of mat22
@wp.kernel
def copy2d_mat22_kernel(dst: wp.array2d(dtype=wp.mat22), src: wp.array2d(dtype=wp.mat22)):
i, j = wp.tid()
dst[i, j] = src[i, j]
def test_dtype_from_torch(test, device):
import torch
def test_conversions(torch_type, warp_type):
test.assertEqual(wp.dtype_from_torch(torch_type), warp_type)
test_conversions(torch.float16, wp.float16)
test_conversions(torch.float32, wp.float32)
test_conversions(torch.float64, wp.float64)
test_conversions(torch.int8, wp.int8)
test_conversions(torch.int16, wp.int16)
test_conversions(torch.int32, wp.int32)
test_conversions(torch.int64, wp.int64)
test_conversions(torch.uint8, wp.uint8)
test_conversions(torch.bool, wp.bool)
def test_dtype_to_torch(test, device):
import torch
def test_conversions(warp_type, torch_type):
test.assertEqual(wp.dtype_to_torch(warp_type), torch_type)
test_conversions(wp.float16, torch.float16)
test_conversions(wp.float32, torch.float32)
test_conversions(wp.float64, torch.float64)
test_conversions(wp.int8, torch.int8)
test_conversions(wp.int16, torch.int16)
test_conversions(wp.int32, torch.int32)
test_conversions(wp.int64, torch.int64)
test_conversions(wp.uint8, torch.uint8)
test_conversions(wp.uint16, torch.int16)
test_conversions(wp.uint32, torch.int32)
test_conversions(wp.uint64, torch.int64)
test_conversions(wp.bool, torch.bool)
def test_device_conversion(test, device):
torch_device = wp.device_to_torch(device)
warp_device = wp.device_from_torch(torch_device)
test.assertEqual(warp_device, device)
def test_torch_zerocopy(test, device):
import torch
a = wp.zeros(10, dtype=wp.float32, device=device)
t = wp.to_torch(a)
assert a.ptr == t.data_ptr()
torch_device = wp.device_to_torch(device)
t = torch.zeros(10, dtype=torch.float32, device=torch_device)
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
def test_from_torch(test, device):
import torch
torch_device = wp.device_to_torch(device)
# automatically determine warp dtype
def wrap_scalar_tensor_implicit(torch_dtype, expected_warp_dtype):
t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
a = wp.from_torch(t)
assert a.dtype == expected_warp_dtype
assert a.shape == tuple(t.shape)
wrap_scalar_tensor_implicit(torch.float64, wp.float64)
wrap_scalar_tensor_implicit(torch.float32, wp.float32)
wrap_scalar_tensor_implicit(torch.float16, wp.float16)
wrap_scalar_tensor_implicit(torch.int64, wp.int64)
wrap_scalar_tensor_implicit(torch.int32, wp.int32)
wrap_scalar_tensor_implicit(torch.int16, wp.int16)
wrap_scalar_tensor_implicit(torch.int8, wp.int8)
wrap_scalar_tensor_implicit(torch.uint8, wp.uint8)
wrap_scalar_tensor_implicit(torch.bool, wp.bool)
# explicitly specify warp dtype
def wrap_scalar_tensor_explicit(torch_dtype, expected_warp_dtype):
t = torch.zeros(10, dtype=torch_dtype, device=torch_device)
a = wp.from_torch(t, expected_warp_dtype)
assert a.dtype == expected_warp_dtype
assert a.shape == tuple(t.shape)
wrap_scalar_tensor_explicit(torch.float64, wp.float64)
wrap_scalar_tensor_explicit(torch.float32, wp.float32)
wrap_scalar_tensor_explicit(torch.float16, wp.float16)
wrap_scalar_tensor_explicit(torch.int64, wp.int64)
wrap_scalar_tensor_explicit(torch.int64, wp.uint64)
wrap_scalar_tensor_explicit(torch.int32, wp.int32)
wrap_scalar_tensor_explicit(torch.int32, wp.uint32)
wrap_scalar_tensor_explicit(torch.int16, wp.int16)
wrap_scalar_tensor_explicit(torch.int16, wp.uint16)
wrap_scalar_tensor_explicit(torch.int8, wp.int8)
wrap_scalar_tensor_explicit(torch.int8, wp.uint8)
wrap_scalar_tensor_explicit(torch.uint8, wp.uint8)
wrap_scalar_tensor_explicit(torch.uint8, wp.int8)
wrap_scalar_tensor_explicit(torch.bool, wp.uint8)
wrap_scalar_tensor_explicit(torch.bool, wp.int8)
wrap_scalar_tensor_explicit(torch.bool, wp.bool)
def wrap_vec_tensor(n, desired_warp_dtype):
t = torch.zeros((10, n), dtype=torch.float32, device=torch_device)
a = wp.from_torch(t, desired_warp_dtype)
assert a.dtype == desired_warp_dtype
assert a.shape == (10,)
wrap_vec_tensor(2, wp.vec2)
wrap_vec_tensor(3, wp.vec3)
wrap_vec_tensor(4, wp.vec4)
wrap_vec_tensor(6, wp.spatial_vector)
wrap_vec_tensor(7, wp.transform)
def wrap_mat_tensor(n, m, desired_warp_dtype):
t = torch.zeros((10, n, m), dtype=torch.float32, device=torch_device)
a = wp.from_torch(t, desired_warp_dtype)
assert a.dtype == desired_warp_dtype
assert a.shape == (10,)
wrap_mat_tensor(2, 2, wp.mat22)
wrap_mat_tensor(3, 3, wp.mat33)
wrap_mat_tensor(4, 4, wp.mat44)
wrap_mat_tensor(6, 6, wp.spatial_matrix)
def wrap_vec_tensor_with_grad(n, desired_warp_dtype):
t = torch.zeros((10, n), dtype=torch.float32, device=torch_device)
a = wp.from_torch(t, desired_warp_dtype, requires_grad=True)
assert a.dtype == desired_warp_dtype
assert a.shape == (10,)
wrap_vec_tensor_with_grad(2, wp.vec2)
wrap_vec_tensor_with_grad(3, wp.vec3)
wrap_vec_tensor_with_grad(4, wp.vec4)
wrap_vec_tensor_with_grad(6, wp.spatial_vector)
wrap_vec_tensor_with_grad(7, wp.transform)
def wrap_mat_tensor_with_grad(n, m, desired_warp_dtype):
t = torch.zeros((10, n, m), dtype=torch.float32, device=torch_device)
a = wp.from_torch(t, desired_warp_dtype, requires_grad=True)
assert a.dtype == desired_warp_dtype
assert a.shape == (10,)
wrap_mat_tensor_with_grad(2, 2, wp.mat22)
wrap_mat_tensor_with_grad(3, 3, wp.mat33)
wrap_mat_tensor_with_grad(4, 4, wp.mat44)
wrap_mat_tensor_with_grad(6, 6, wp.spatial_matrix)
def test_to_torch(test, device):
import torch
def wrap_scalar_array(warp_dtype, expected_torch_dtype):
a = wp.zeros(10, dtype=warp_dtype, device=device)
t = wp.to_torch(a)
assert t.dtype == expected_torch_dtype
assert tuple(t.shape) == a.shape
wrap_scalar_array(wp.float64, torch.float64)
wrap_scalar_array(wp.float32, torch.float32)
wrap_scalar_array(wp.float16, torch.float16)
wrap_scalar_array(wp.int64, torch.int64)
wrap_scalar_array(wp.int32, torch.int32)
wrap_scalar_array(wp.int16, torch.int16)
wrap_scalar_array(wp.int8, torch.int8)
wrap_scalar_array(wp.uint8, torch.uint8)
wrap_scalar_array(wp.bool, torch.bool)
# not supported by torch
# wrap_scalar_array(wp.uint64, torch.int64)
# wrap_scalar_array(wp.uint32, torch.int32)
# wrap_scalar_array(wp.uint16, torch.int16)
def wrap_vec_array(n, warp_dtype):
a = wp.zeros(10, dtype=warp_dtype, device=device)
t = wp.to_torch(a)
assert t.dtype == torch.float32
assert tuple(t.shape) == (10, n)
wrap_vec_array(2, wp.vec2)
wrap_vec_array(3, wp.vec3)
wrap_vec_array(4, wp.vec4)
wrap_vec_array(6, wp.spatial_vector)
wrap_vec_array(7, wp.transform)
def wrap_mat_array(n, m, warp_dtype):
a = wp.zeros(10, dtype=warp_dtype, device=device)
t = wp.to_torch(a)
assert t.dtype == torch.float32
assert tuple(t.shape) == (10, n, m)
wrap_mat_array(2, 2, wp.mat22)
wrap_mat_array(3, 3, wp.mat33)
wrap_mat_array(4, 4, wp.mat44)
wrap_mat_array(6, 6, wp.spatial_matrix)
def test_from_torch_slices(test, device):
import torch
torch_device = wp.device_to_torch(device)
# 1D slice, contiguous
t_base = torch.arange(10, dtype=torch.float32, device=torch_device)
t = t_base[2:9]
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
assert a.is_contiguous
assert a.shape == tuple(t.shape)
assert_np_equal(a.numpy(), t.cpu().numpy())
# 1D slice with non-contiguous stride
t_base = torch.arange(10, dtype=torch.float32, device=torch_device)
t = t_base[2:9:2]
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape)
# copy contents to contiguous array
a_contiguous = wp.empty_like(a)
wp.launch(copy1d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
# 2D slices (non-contiguous)
t_base = torch.arange(24, dtype=torch.float32, device=torch_device).reshape((4, 6))
t = t_base[1:3, 2:5]
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape)
# copy contents to contiguous array
a_contiguous = wp.empty_like(a)
wp.launch(copy2d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
# 3D slices (non-contiguous)
t_base = torch.arange(36, dtype=torch.float32, device=torch_device).reshape((4, 3, 3))
t = t_base[::2, 0:1, 1:2]
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape)
# copy contents to contiguous array
a_contiguous = wp.empty_like(a)
wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
# 2D slices of vec3 (inner contiguous, outer non-contiguous)
t_base = torch.arange(150, dtype=torch.float32, device=torch_device).reshape((10, 5, 3))
t = t_base[1:7:2, 2:5]
a = wp.from_torch(t, dtype=wp.vec3)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape[:-1])
# copy contents to contiguous array
a_contiguous = wp.empty_like(a)
wp.launch(copy2d_vec3_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
# 2D slices of mat22 (inner contiguous, outer non-contiguous)
t_base = torch.arange(200, dtype=torch.float32, device=torch_device).reshape((10, 5, 2, 2))
t = t_base[1:7:2, 2:5]
a = wp.from_torch(t, dtype=wp.mat22)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape[:-2])
# copy contents to contiguous array
a_contiguous = wp.empty_like(a)
wp.launch(copy2d_mat22_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
def test_from_torch_zero_strides(test, device):
import torch
torch_device = wp.device_to_torch(device)
t_base = torch.arange(9, dtype=torch.float32, device=torch_device).reshape((3, 3))
# expand outermost dimension
t = t_base.unsqueeze(0).expand(3, -1, -1)
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape)
a_contiguous = wp.empty_like(a)
wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
# expand middle dimension
t = t_base.unsqueeze(1).expand(-1, 3, -1)
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape)
a_contiguous = wp.empty_like(a)
wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
# expand innermost dimension
t = t_base.unsqueeze(2).expand(-1, -1, 3)
a = wp.from_torch(t)
assert a.ptr == t.data_ptr()
assert not a.is_contiguous
assert a.shape == tuple(t.shape)
a_contiguous = wp.empty_like(a)
wp.launch(copy3d_float_kernel, dim=a.shape, inputs=[a_contiguous, a], device=device)
assert_np_equal(a_contiguous.numpy(), t.cpu().numpy())
def test_torch_mgpu_from_torch(test, device):
import torch
n = 32
t0 = torch.arange(0, n, 1, dtype=torch.int32, device="cuda:0")
t1 = torch.arange(0, n * 2, 2, dtype=torch.int32, device="cuda:1")
a0 = wp.from_torch(t0, dtype=wp.int32)
a1 = wp.from_torch(t1, dtype=wp.int32)
assert a0.device == "cuda:0"
assert a1.device == "cuda:1"
expected0 = np.arange(0, n, 1)
expected1 = np.arange(0, n * 2, 2)
assert_np_equal(a0.numpy(), expected0)
assert_np_equal(a1.numpy(), expected1)
def test_torch_mgpu_to_torch(test, device):
n = 32
with wp.ScopedDevice("cuda:0"):
a0 = wp.empty(n, dtype=wp.int32)
wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
with wp.ScopedDevice("cuda:1"):
a1 = wp.empty(n, dtype=wp.int32)
wp.launch(arange, dim=a1.size, inputs=[0, 2, a1])
t0 = wp.to_torch(a0)
t1 = wp.to_torch(a1)
assert str(t0.device) == "cuda:0"
assert str(t1.device) == "cuda:1"
expected0 = np.arange(0, n, 1, dtype=np.int32)
expected1 = np.arange(0, n * 2, 2, dtype=np.int32)
assert_np_equal(t0.cpu().numpy(), expected0)
assert_np_equal(t1.cpu().numpy(), expected1)
def test_torch_mgpu_interop(test, device):
import torch
n = 1024 * 1024
with torch.cuda.device(0):
t0 = torch.arange(n, dtype=torch.float32, device="cuda")
a0 = wp.from_torch(t0)
wp.launch(inc, dim=a0.size, inputs=[a0], stream=wp.stream_from_torch())
with torch.cuda.device(1):
t1 = torch.arange(n, dtype=torch.float32, device="cuda")
a1 = wp.from_torch(t1)
wp.launch(inc, dim=a1.size, inputs=[a1], stream=wp.stream_from_torch())
assert a0.device == "cuda:0"
assert a1.device == "cuda:1"
expected = np.arange(n, dtype=int) + 1
# ensure the torch tensors were modified by warp
assert_np_equal(t0.cpu().numpy(), expected)
assert_np_equal(t1.cpu().numpy(), expected)
def test_torch_autograd(test, device):
"""Test torch autograd with a custom Warp op"""
import torch
# custom autograd op
class TestFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
# allocate output array
y = torch.empty_like(x)
ctx.x = x
ctx.y = y
wp.launch(kernel=op_kernel, dim=len(x), inputs=[wp.from_torch(x)], outputs=[wp.from_torch(y)])
return y
@staticmethod
def backward(ctx, adj_y):
# adjoints should be allocated as zero initialized
adj_x = torch.zeros_like(ctx.x).contiguous()
adj_y = adj_y.contiguous()
wp_x = wp.from_torch(ctx.x, grad=adj_x)
wp_y = wp.from_torch(ctx.y, grad=adj_y)
wp.launch(
kernel=op_kernel,
dim=len(ctx.x),
# fwd inputs
inputs=[wp_x],
outputs=[wp_y],
# adj inputs (already stored in input/output arrays, passing null pointers)
adj_inputs=[None],
adj_outputs=[None],
adjoint=True,
)
return adj_x
# run autograd on given device
with wp.ScopedDevice(device):
torch_device = wp.device_to_torch(device)
# input data
x = torch.ones(16, dtype=torch.float32, device=torch_device, requires_grad=True)
# execute op
y = TestFunc.apply(x)
# compute grads
l = y.sum()
l.backward()
passed = (x.grad == -2.0).all()
assert passed.item()
def test_torch_graph_torch_stream(test, device):
"""Capture Torch graph on Torch stream"""
wp.load_module(device=device)
import torch
torch_device = wp.device_to_torch(device)
n = 1024 * 1024
t = torch.zeros(n, dtype=torch.float32, device=torch_device)
a = wp.from_torch(t)
g = torch.cuda.CUDAGraph()
# create a device-specific torch stream to use for capture
# (otherwise torch.cuda.graph reuses its capture stream, which can be problematic if it's from a different device)
torch_stream = torch.cuda.Stream(device=torch_device)
# make warp use the same stream
warp_stream = wp.stream_from_torch(torch_stream)
# capture graph
with wp.ScopedStream(warp_stream), torch.cuda.graph(g, stream=torch_stream):
wp.capture_begin(force_module_load=False, external=True)
try:
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
finally:
wp.capture_end()
# replay graph
num_iters = 10
for _i in range(num_iters):
g.replay()
passed = (t == num_iters * 4.0).all()
assert passed.item()
def test_torch_graph_warp_stream(test, device):
"""Capture Torch graph on Warp stream"""
import torch
torch_device = wp.device_to_torch(device)
n = 1024 * 1024
t = torch.zeros(n, dtype=torch.float32, device=torch_device)
a = wp.from_torch(t)
g = torch.cuda.CUDAGraph()
# make torch use the warp stream from the given device
torch_stream = wp.stream_to_torch(device)
# capture graph
with wp.ScopedDevice(device), torch.cuda.graph(g, stream=torch_stream):
wp.capture_begin(force_module_load=False, external=True)
try:
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
finally:
wp.capture_end()
# replay graph
num_iters = 10
for _i in range(num_iters):
g.replay()
passed = (t == num_iters * 4.0).all()
assert passed.item()
def test_warp_graph_warp_stream(test, device):
"""Capture Warp graph on Warp stream"""
import torch
torch_device = wp.device_to_torch(device)
n = 1024 * 1024
t = torch.zeros(n, dtype=torch.float32, device=torch_device)
a = wp.from_torch(t)
# make torch use the warp stream from the given device
torch_stream = wp.stream_to_torch(device)
# capture graph
with wp.ScopedDevice(device), torch.cuda.stream(torch_stream):
wp.capture_begin(force_module_load=False)
try:
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
finally:
g = wp.capture_end()
# replay graph
num_iters = 10
for _i in range(num_iters):
wp.capture_launch(g)
passed = (t == num_iters * 4.0).all()
assert passed.item()
def test_warp_graph_torch_stream(test, device):
"""Capture Warp graph on Torch stream"""
wp.load_module(device=device)
import torch
torch_device = wp.device_to_torch(device)
n = 1024 * 1024
t = torch.zeros(n, dtype=torch.float32, device=torch_device)
a = wp.from_torch(t)
# create a device-specific torch stream to use for capture
# (the default torch stream is not suitable for graph capture)
torch_stream = torch.cuda.Stream(device=torch_device)
# make warp use the same stream
warp_stream = wp.stream_from_torch(torch_stream)
# capture graph
with wp.ScopedStream(warp_stream), torch.cuda.stream(torch_stream):
wp.capture_begin(force_module_load=False)
try:
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
t += 1.0
wp.launch(inc, dim=n, inputs=[a])
finally:
g = wp.capture_end()
# replay graph
num_iters = 10
for _i in range(num_iters):
wp.capture_launch(g)
passed = (t == num_iters * 4.0).all()
assert passed.item()
class TestTorch(unittest.TestCase):
pass
test_devices = get_test_devices()
try:
import torch
# check which Warp devices work with Torch
# CUDA devices may fail if Torch was not compiled with CUDA support
torch_compatible_devices = []
torch_compatible_cuda_devices = []
for d in test_devices:
try:
t = torch.arange(10, device=wp.device_to_torch(d))
t += 1
torch_compatible_devices.append(d)
if d.is_cuda:
torch_compatible_cuda_devices.append(d)
except Exception as e:
print(f"Skipping Torch tests on device '{d}' due to exception: {e}")
add_function_test(TestTorch, "test_dtype_from_torch", test_dtype_from_torch, devices=None)
add_function_test(TestTorch, "test_dtype_to_torch", test_dtype_to_torch, devices=None)
if torch_compatible_devices:
add_function_test(TestTorch, "test_device_conversion", test_device_conversion, devices=torch_compatible_devices)
add_function_test(TestTorch, "test_from_torch", test_from_torch, devices=torch_compatible_devices)
add_function_test(TestTorch, "test_from_torch_slices", test_from_torch_slices, devices=torch_compatible_devices)
add_function_test(
TestTorch,
"test_from_torch_zero_strides",
test_from_torch_zero_strides,
devices=torch_compatible_devices,
)
add_function_test(TestTorch, "test_to_torch", test_to_torch, devices=torch_compatible_devices)
add_function_test(TestTorch, "test_torch_zerocopy", test_torch_zerocopy, devices=torch_compatible_devices)
add_function_test(TestTorch, "test_torch_autograd", test_torch_autograd, devices=torch_compatible_devices)
if torch_compatible_cuda_devices:
add_function_test(
TestTorch,
"test_torch_graph_torch_stream",
test_torch_graph_torch_stream,
devices=torch_compatible_cuda_devices,
)
add_function_test(
TestTorch,
"test_torch_graph_warp_stream",
test_torch_graph_warp_stream,
devices=torch_compatible_cuda_devices,
)
add_function_test(
TestTorch,
"test_warp_graph_warp_stream",
test_warp_graph_warp_stream,
devices=torch_compatible_cuda_devices,
)
add_function_test(
TestTorch,
"test_warp_graph_torch_stream",
test_warp_graph_torch_stream,
devices=torch_compatible_cuda_devices,
)
# multi-GPU tests
if len(torch_compatible_cuda_devices) > 1:
add_function_test(TestTorch, "test_torch_mgpu_from_torch", test_torch_mgpu_from_torch)
add_function_test(TestTorch, "test_torch_mgpu_to_torch", test_torch_mgpu_to_torch)
add_function_test(TestTorch, "test_torch_mgpu_interop", test_torch_mgpu_interop)
except Exception as e:
print(f"Skipping Torch tests due to exception: {e}")
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 24,392 | Python | 31.874663 | 120 | 0.638775 |
NVIDIA/warp/warp/tests/test_mesh.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# fmt: off
POINT_POSITIONS = (
( 0.5, -0.5, 0.5),
(-0.5, -0.5, 0.5),
( 0.5, 0.5, 0.5),
(-0.5, 0.5, 0.5),
(-0.5, -0.5, -0.5),
( 0.5, -0.5, -0.5),
(-0.5, 0.5, -0.5),
( 0.5, 0.5, -0.5),
)
# Right-hand winding order. This corresponds to USD's (and others).
RIGHT_HANDED_FACE_VERTEX_INDICES = (
0, 3, 1,
0, 2, 3,
4, 7, 5,
4, 6, 7,
6, 2, 7,
6, 3, 2,
5, 1, 4,
5, 0, 1,
5, 2, 0,
5, 7, 2,
1, 6, 4,
1, 3, 6,
)
# Left-hand winding order. This corresponds to Houdini's (and others).
LEFT_HANDED_FACE_VERTEX_INDICES = (
0, 1, 3,
0, 3, 2,
4, 5, 7,
4, 7, 6,
6, 7, 2,
6, 2, 3,
5, 4, 1,
5, 1, 0,
5, 0, 2,
5, 2, 7,
1, 4, 6,
1, 6, 3,
)
# fmt: on
POINT_COUNT = 8
VERTEX_COUNT = 36
FACE_COUNT = 12
@wp.kernel(enable_backward=False)
def read_points_kernel(
mesh_id: wp.uint64,
out_points: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
mesh = wp.mesh_get(mesh_id)
out_points[tid] = mesh.points[tid]
@wp.kernel(enable_backward=False)
def read_indices_kernel(
mesh_id: wp.uint64,
out_indices: wp.array(dtype=int),
):
tid = wp.tid()
mesh = wp.mesh_get(mesh_id)
out_indices[tid * 3 + 0] = mesh.indices[tid * 3 + 0]
out_indices[tid * 3 + 1] = mesh.indices[tid * 3 + 1]
out_indices[tid * 3 + 2] = mesh.indices[tid * 3 + 2]
def test_mesh_read_properties(test, device):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
mesh = wp.Mesh(points=points, indices=indices)
assert mesh.points.size == POINT_COUNT
assert mesh.indices.size == VERTEX_COUNT
assert int(mesh.indices.size / 3) == FACE_COUNT
out_points = wp.empty(POINT_COUNT, dtype=wp.vec3, device=device)
wp.launch(read_points_kernel, dim=POINT_COUNT, inputs=[mesh.id], outputs=[out_points], device=device)
assert_np_equal(out_points.numpy(), np.array(POINT_POSITIONS))
out_indices = wp.empty(VERTEX_COUNT, dtype=int, device=device)
wp.launch(read_indices_kernel, dim=FACE_COUNT, inputs=[mesh.id], outputs=[out_indices], device=device)
assert_np_equal(out_indices.numpy(), np.array(RIGHT_HANDED_FACE_VERTEX_INDICES))
@wp.kernel(enable_backward=False)
def query_point_kernel(
mesh_id: wp.uint64,
expected_sign: float,
):
point = wp.vec3(0.1, 0.2, 0.3)
expected_pos = wp.vec3(0.1, 0.2, 0.5)
sign = float(0.0)
face = int(0)
bary_u = float(0.0)
bary_v = float(0.0)
wp.mesh_query_point(mesh_id, point, 1e6, sign, face, bary_u, bary_v)
pos = wp.mesh_eval_position(mesh_id, face, bary_u, bary_v)
wp.expect_eq(wp.sign(sign), expected_sign)
wp.expect_eq(face, 1)
wp.expect_near(wp.length(pos - expected_pos), 0.0)
def test_mesh_query_point(test, device):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
mesh = wp.Mesh(points=points, indices=indices)
expected_sign = -1.0
wp.launch(query_point_kernel, dim=1, inputs=[mesh.id, expected_sign], device=device)
indices = wp.array(LEFT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
mesh = wp.Mesh(points=points, indices=indices)
expected_sign = 1.0
wp.launch(query_point_kernel, dim=1, inputs=[mesh.id, expected_sign], device=device)
@wp.kernel(enable_backward=False)
def query_ray_kernel(
mesh_id: wp.uint64,
expected_sign: float,
):
start = wp.vec3(0.1, 0.2, 0.3)
dir = wp.normalize(wp.vec3(-1.2, 2.3, -3.4))
expected_t = 0.557828
expected_pos = wp.vec3(-0.0565217, 0.5, -0.143478)
t = float(0.0)
bary_u = float(0.0)
bary_v = float(0.0)
sign = float(0.0)
normal = wp.vec3(0.0, 0.0, 0.0)
face = int(0)
wp.mesh_query_ray(
mesh_id,
start,
dir,
1e6,
t,
bary_u,
bary_v,
sign,
normal,
face,
)
pos = wp.mesh_eval_position(mesh_id, face, bary_u, bary_v)
wp.expect_near(t, expected_t)
wp.expect_near(t, wp.length(pos - start), 1e-6)
wp.expect_eq(wp.sign(sign), expected_sign)
wp.expect_eq(face, 4)
wp.expect_near(wp.length(pos - expected_pos), 0.0, 1e-6)
def test_mesh_query_ray(test, device):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
mesh = wp.Mesh(points=points, indices=indices)
expected_sign = -1.0
wp.launch(
query_ray_kernel,
dim=1,
inputs=[
mesh.id,
expected_sign,
],
device=device,
)
indices = wp.array(LEFT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
mesh = wp.Mesh(points=points, indices=indices)
expected_sign = 1.0
wp.launch(
query_ray_kernel,
dim=1,
inputs=[
mesh.id,
expected_sign,
],
device=device,
)
def test_mesh_refit_graph(test, device):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
mesh = wp.Mesh(points=points, indices=indices)
wp.capture_begin(device, force_module_load=False)
try:
mesh.refit()
finally:
graph = wp.capture_end(device)
# replay
num_iters = 10
for _ in range(num_iters):
wp.capture_launch(graph)
wp.synchronize_device(device)
def test_mesh_exceptions(test, device):
# points and indices must be on same device
with test.assertRaises(RuntimeError):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device="cpu")
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
wp.Mesh(points=points, indices=indices)
# points must be vec3
with test.assertRaises(RuntimeError):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3d, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
wp.Mesh(points=points, indices=indices)
# velocities must be vec3
with test.assertRaises(RuntimeError):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device=device)
velocities = wp.zeros(points.shape, dtype=wp.vec3d, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
wp.Mesh(points=points, indices=indices, velocities=velocities)
# indices must be int32
with test.assertRaises(RuntimeError):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=wp.int64, device=device)
wp.Mesh(points=points, indices=indices)
# indices must be 1d
with test.assertRaises(RuntimeError):
points = wp.array(POINT_POSITIONS, dtype=wp.vec3, device=device)
indices = wp.array(RIGHT_HANDED_FACE_VERTEX_INDICES, dtype=int, device=device)
indices = indices.reshape((3, -1))
wp.Mesh(points=points, indices=indices)
devices = get_test_devices()
class TestMesh(unittest.TestCase):
pass
add_function_test(TestMesh, "test_mesh_read_properties", test_mesh_read_properties, devices=devices)
add_function_test(TestMesh, "test_mesh_query_point", test_mesh_query_point, devices=devices)
add_function_test(TestMesh, "test_mesh_query_ray", test_mesh_query_ray, devices=devices)
add_function_test(TestMesh, "test_mesh_refit_graph", test_mesh_refit_graph, devices=get_selected_cuda_test_devices())
add_function_test(TestMesh, "test_mesh_exceptions", test_mesh_exceptions, devices=get_selected_cuda_test_devices())
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 8,386 | Python | 28.741135 | 117 | 0.641426 |
NVIDIA/warp/warp/tests/test_lvalue.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def rmw_array_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
foos[i] += wp.uint32(1)
def test_rmw_array(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=rmw_array_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
@wp.struct
class RmwFoo:
field: wp.uint32
@wp.kernel
def rmw_array_struct_kernel(foos: wp.array(dtype=RmwFoo)):
i = wp.tid()
foos[i].field += wp.uint32(1)
def test_rmw_array_struct(test, device):
foos = wp.zeros((10,), dtype=RmwFoo, device=device)
wp.launch(kernel=rmw_array_struct_kernel, dim=(10,), inputs=[foos], device=device)
expected = RmwFoo()
expected.field = 1
for f in foos.list():
test.assertEqual(f.field, expected.field)
@wp.func
def lookup(foos: wp.array(dtype=wp.uint32), index: int):
return foos[index]
@wp.kernel
def lookup_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
x = lookup(foos, i)
foos[i] = x + wp.uint32(1)
def test_lookup(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=lookup_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
@wp.func
def lookup3(foos: wp.array(dtype=wp.float32), index: int):
return foos[index]
@wp.kernel
def grad_kernel(foos: wp.array(dtype=wp.float32), bars: wp.array(dtype=wp.float32)):
i = wp.tid()
x = lookup3(foos, i)
bars[i] = x * wp.float32(i) + 1.0
def test_grad(test, device):
num = 10
data = np.linspace(20, 20 + num, num, endpoint=False, dtype=np.float32)
input = wp.array(data, device=device, requires_grad=True)
output = wp.zeros(num, dtype=wp.float32, device=device)
ones = wp.array(np.ones(len(output)), dtype=wp.float32, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=grad_kernel, dim=(num,), inputs=[input], outputs=[output], device=device)
tape.backward(grads={output: ones})
# test forward results
for i, f in enumerate(output.list()):
test.assertEqual(f, data[i] * i + 1)
# test backward results
for i, f in enumerate(tape.gradients[input].list()):
test.assertEqual(f, i)
@wp.func
def lookup2(foos: wp.array(dtype=wp.uint32), index: int):
if index % 2 == 0:
x = foos[index]
x = wp.uint32(0)
return x
else:
return foos[index]
@wp.kernel
def lookup2_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
x = lookup2(foos, i)
foos[i] = x + wp.uint32(1)
def test_lookup2(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=lookup2_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
@wp.kernel
def unary_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
foos[i] = wp.uint32(-1)
x = -foos[i]
foos[i] = x
def test_unary(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=unary_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
@wp.kernel
def rvalue_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
if foos[i] < wp.uint32(1):
foos[i] = wp.uint32(1)
def test_rvalue(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=rvalue_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
# Tests, among other things, that assigning a reference to a new variable does
# not create a reference
@wp.kernel
def intermediate_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
x = foos[i]
x = x + wp.uint32(1)
foos[i] = x
def test_intermediate(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=intermediate_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
@wp.kernel
def array_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
foos[i] = wp.uint32(1)
def test_array_assign(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=array_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
@wp.func
def increment(arg: wp.uint32):
return arg + wp.uint32(1)
@wp.kernel
def array_call_kernel(foos: wp.array(dtype=wp.uint32)):
i = wp.tid()
foos[i] = increment(foos[i])
def test_array_call_assign(test, device):
arr = wp.zeros((10,), dtype=wp.uint32, device=device)
wp.launch(kernel=array_kernel, dim=(10,), inputs=[arr], device=device)
assert_np_equal(arr.numpy(), np.ones(10))
@wp.struct
class Foo:
field: wp.uint32
@wp.kernel
def array_struct_kernel(foos: wp.array(dtype=Foo)):
i = wp.tid()
foos[i].field = wp.uint32(1)
def test_array_struct_assign(test, device):
foos = wp.zeros((10,), dtype=Foo, device=device)
wp.launch(kernel=array_struct_kernel, dim=(10,), inputs=[foos], device=device)
expected = Foo()
expected.field = 1
test.assertEqual(expected.field, 1)
for f in foos.list():
test.assertEqual(f.field, 1)
@wp.struct
class Bar:
field: wp.uint32
@wp.struct
class Baz:
bar: Bar
@wp.kernel
def array_struct_struct_kernel(foos: wp.array(dtype=Baz)):
i = wp.tid()
foos[i].bar.field = wp.uint32(1)
def test_array_struct_struct_assign(test, device):
foos = wp.zeros((10,), dtype=Baz, device=device)
wp.launch(kernel=array_struct_struct_kernel, dim=(10,), inputs=[foos], device=device)
expected = Baz()
expected.bar.field = 1
test.assertEqual(expected.bar.field, 1)
for f in foos.list():
test.assertEqual(f.bar.field, 1)
@wp.struct
class S:
a: wp.uint32
b: wp.float32
@wp.struct
class F:
x: wp.float32
s: S
y: wp.int32
@wp.kernel
def complex_kernel(foos: wp.array(dtype=F)):
i = wp.tid()
foos[i].x += wp.float32(1.0)
foos[i].y = wp.int32(2)
foos[i].s.b += wp.float32(3.0)
foos[i].s.a = wp.uint32(foos[i].y)
def test_complex(test, device):
foos = wp.zeros((10,), dtype=F, device=device)
wp.launch(kernel=complex_kernel, dim=(10,), inputs=[foos], device=device)
expected = F()
expected.x = 1.0
expected.y = 2
expected.s.b = 3.0
expected.s.a = expected.y
for f in foos.list():
test.assertEqual(f.x, expected.x)
test.assertEqual(f.y, expected.y)
test.assertEqual(f.s.a, expected.s.a)
test.assertEqual(f.s.b, expected.s.b)
@wp.struct
class Svec:
a: wp.uint32
b: wp.vec2f
@wp.struct
class Fvec:
x: wp.vec2f
s: Svec
y: wp.int32
@wp.kernel
def swizzle_kernel(foos: wp.array(dtype=Fvec)):
i = wp.tid()
foos[i].x += wp.vec2f(1.0, 2.0)
foos[i].y = wp.int32(3)
foos[i].s.b = wp.vec2f(4.0, 5.0)
foos[i].s.b.y = wp.float32(6.0)
foos[i].s.b.x = foos[i].x.y
foos[i].s.a = wp.uint32(foos[i].y)
def test_swizzle(test, device):
foos = wp.zeros((10,), dtype=Fvec, device=device)
wp.launch(kernel=swizzle_kernel, dim=(10,), inputs=[foos], device=device)
expected = Fvec()
expected.x = wp.vec2f(1.0, 2.0)
expected.y = 3
expected.s.b = wp.vec2f(4.0, 5.0)
expected.s.b.y = 6.0
expected.s.b.x = expected.x.y
expected.s.a = expected.y
for f in foos.list():
test.assertEqual(f.x, expected.x)
test.assertEqual(f.y, expected.y)
test.assertEqual(f.s.a, expected.s.a)
test.assertEqual(f.s.b, expected.s.b)
@wp.kernel
def slice_kernel(a: wp.array2d(dtype=wp.vec3), b: wp.array2d(dtype=wp.vec3), c: wp.array2d(dtype=wp.vec3)):
tid = wp.tid()
c[tid][0] = a[tid][0] + b[tid][0]
def test_slice(test, device):
a = wp.full((1, 1), value=1.0, dtype=wp.vec3, requires_grad=True, device=device)
b = wp.full((1, 1), value=1.0, dtype=wp.vec3, requires_grad=True, device=device)
c = wp.zeros((1, 1), dtype=wp.vec3, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=slice_kernel, dim=1, inputs=[a, b], outputs=[c], device=device)
c.grad = wp.full((1, 1), value=1.0, dtype=wp.vec3, device=device)
tape.backward()
x = a.grad.list()[0]
y = b.grad.list()[0]
expected = wp.vec3(1.0)
test.assertEqual(x, expected)
test.assertEqual(y, expected)
devices = get_test_devices()
class TestLValue(unittest.TestCase):
def test_swizzle_error_invalid_attribute(self):
v = wp.vec3(1, 2, 3)
with self.assertRaisesRegex(AttributeError, r"'vec3f' object has no attribute 'foo'$"):
v.foo # noqa: B018
try:
v.bar = 123
except AttributeError:
self.fail()
add_function_test(TestLValue, "test_rmw_array", test_rmw_array, devices=devices)
add_function_test(TestLValue, "test_rmw_array_struct", test_rmw_array_struct, devices=devices)
add_function_test(TestLValue, "test_lookup", test_lookup, devices=devices)
add_function_test(TestLValue, "test_lookup2", test_lookup2, devices=devices)
add_function_test(TestLValue, "test_grad", test_grad, devices=devices)
add_function_test(TestLValue, "test_unary", test_unary, devices=devices)
add_function_test(TestLValue, "test_rvalue", test_rvalue, devices=devices)
add_function_test(TestLValue, "test_intermediate", test_intermediate, devices=devices)
add_function_test(TestLValue, "test_array_assign", test_array_assign, devices=devices)
add_function_test(TestLValue, "test_array_struct_assign", test_array_struct_assign, devices=devices)
add_function_test(TestLValue, "test_array_struct_struct_assign", test_array_struct_struct_assign, devices=devices)
add_function_test(TestLValue, "test_complex", test_complex, devices=devices)
add_function_test(TestLValue, "test_swizzle", test_swizzle, devices=devices)
add_function_test(TestLValue, "test_slice", test_slice, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 10,580 | Python | 24.192857 | 114 | 0.649811 |
NVIDIA/warp/warp/tests/test_mat_scalar_ops.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_signed_int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.byte,
]
np_unsigned_int_types = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.ubyte,
]
np_int_types = np_signed_int_types + np_unsigned_int_types
np_float_types = [np.float16, np.float32, np.float64]
np_scalar_types = np_int_types + np_float_types
def randvals(rng, shape, dtype):
if dtype in np_float_types:
return rng.standard_normal(size=shape).astype(dtype)
elif dtype in [np.int8, np.uint8, np.byte, np.ubyte]:
return rng.integers(1, high=3, size=shape, dtype=dtype)
return rng.integers(1, high=5, size=shape, dtype=dtype)
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
def test_arrays(test, device, dtype):
rng = np.random.default_rng(123)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
mat32 = wp.types.matrix(shape=(3, 2), dtype=wptype)
v2_np = randvals(rng, [10, 2, 2], dtype)
v3_np = randvals(rng, [10, 3, 3], dtype)
v4_np = randvals(rng, [10, 4, 4], dtype)
v5_np = randvals(rng, [10, 5, 5], dtype)
v32_np = randvals(rng, [10, 3, 2], dtype)
v2 = wp.array(v2_np, dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(v3_np, dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(v4_np, dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(v5_np, dtype=mat55, requires_grad=True, device=device)
v32 = wp.array(v32_np, dtype=mat32, requires_grad=True, device=device)
assert_np_equal(v2.numpy(), v2_np, tol=1.0e-6)
assert_np_equal(v3.numpy(), v3_np, tol=1.0e-6)
assert_np_equal(v4.numpy(), v4_np, tol=1.0e-6)
assert_np_equal(v5.numpy(), v5_np, tol=1.0e-6)
assert_np_equal(v32.numpy(), v32_np, tol=1.0e-6)
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
v2 = wp.array(v2_np, dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(v3_np, dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(v4_np, dtype=mat44, requires_grad=True, device=device)
assert_np_equal(v2.numpy(), v2_np, tol=1.0e-6)
assert_np_equal(v3.numpy(), v3_np, tol=1.0e-6)
assert_np_equal(v4.numpy(), v4_np, tol=1.0e-6)
def test_components(test, device, dtype):
# test accessing matrix components from Python - this is especially important
# for float16, which requires special handling internally
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat23 = wp.types.matrix(shape=(2, 3), dtype=wptype)
m = mat23(1, 2, 3, 4, 5, 6)
# test __getitem__ for row vectors
r0 = m[0]
r1 = m[1]
test.assertEqual(r0[0], 1)
test.assertEqual(r0[1], 2)
test.assertEqual(r0[2], 3)
test.assertEqual(r1[0], 4)
test.assertEqual(r1[1], 5)
test.assertEqual(r1[2], 6)
# test __getitem__ for individual components
test.assertEqual(m[0, 0], 1)
test.assertEqual(m[0, 1], 2)
test.assertEqual(m[0, 2], 3)
test.assertEqual(m[1, 0], 4)
test.assertEqual(m[1, 1], 5)
test.assertEqual(m[1, 2], 6)
# test __setitem__ for row vectors
m[0] = [7, 8, 9]
m[1] = [10, 11, 12]
test.assertEqual(m[0, 0], 7)
test.assertEqual(m[0, 1], 8)
test.assertEqual(m[0, 2], 9)
test.assertEqual(m[1, 0], 10)
test.assertEqual(m[1, 1], 11)
test.assertEqual(m[1, 2], 12)
# test __setitem__ for individual components
m[0, 0] = 13
m[0, 1] = 14
m[0, 2] = 15
m[1, 0] = 16
m[1, 1] = 17
m[1, 2] = 18
test.assertEqual(m[0, 0], 13)
test.assertEqual(m[0, 1], 14)
test.assertEqual(m[0, 2], 15)
test.assertEqual(m[1, 0], 16)
test.assertEqual(m[1, 1], 17)
test.assertEqual(m[1, 2], 18)
def test_constants(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
mat32 = wp.types.matrix(shape=(3, 2), dtype=wptype)
cm22 = wp.constant(mat22(22))
cm33 = wp.constant(mat33(33))
cm44 = wp.constant(mat44(44))
cm55 = wp.constant(mat55(55))
cm32 = wp.constant(mat32(32))
def check_matrix_constants():
wp.expect_eq(cm22, mat22(wptype(22)))
wp.expect_eq(cm33, mat33(wptype(33)))
wp.expect_eq(cm44, mat44(wptype(44)))
wp.expect_eq(cm55, mat55(wptype(55)))
wp.expect_eq(cm32, mat32(wptype(32)))
kernel = getkernel(check_matrix_constants, suffix=dtype.__name__)
if register_kernels:
return
def test_constructors(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_scalar_mat_constructor(
input: wp.array(dtype=wptype),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
m2result = wptype(2) * mat22(input[0])
m3result = wptype(2) * mat33(input[0])
m4result = wptype(2) * mat44(input[0])
m5result = wptype(2) * mat55(input[0])
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = m4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = m5result[i, j]
idx = idx + 1
def check_component_mat_constructor(
input: wp.array(dtype=wptype),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
m2result = wptype(2) * mat22(input[0], input[1], input[2], input[3])
m3result = wptype(2) * mat33(
input[4],
input[5],
input[6],
input[7],
input[8],
input[9],
input[10],
input[11],
input[12],
)
m4result = wptype(2) * mat44(
input[13],
input[14],
input[15],
input[16],
input[17],
input[18],
input[19],
input[20],
input[21],
input[22],
input[23],
input[24],
input[25],
input[26],
input[27],
input[28],
)
m5result = wptype(2) * mat55(
input[29],
input[30],
input[31],
input[32],
input[33],
input[34],
input[35],
input[36],
input[37],
input[38],
input[39],
input[40],
input[41],
input[42],
input[43],
input[44],
input[45],
input[46],
input[47],
input[48],
input[49],
input[50],
input[51],
input[52],
input[53],
)
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = m4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = m5result[i, j]
idx = idx + 1
def check_vector_mat_constructor(
input: wp.array(dtype=wptype),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
m2result = wptype(2) * mat22(vec2(input[0], input[2]), vec2(input[1], input[3]))
m3result = wptype(2) * mat33(
vec3(input[4], input[7], input[10]),
vec3(input[5], input[8], input[11]),
vec3(input[6], input[9], input[12]),
)
m4result = wptype(2) * mat44(
vec4(input[13], input[17], input[21], input[25]),
vec4(input[14], input[18], input[22], input[26]),
vec4(input[15], input[19], input[23], input[27]),
vec4(input[16], input[20], input[24], input[28]),
)
m5result = wptype(2) * mat55(
vec5(input[29], input[34], input[39], input[44], input[49]),
vec5(input[30], input[35], input[40], input[45], input[50]),
vec5(input[31], input[36], input[41], input[46], input[51]),
vec5(input[32], input[37], input[42], input[47], input[52]),
vec5(input[33], input[38], input[43], input[48], input[53]),
)
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = m4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = m5result[i, j]
idx = idx + 1
kernel = getkernel(check_scalar_mat_constructor, suffix=dtype.__name__)
compkernel = getkernel(check_component_mat_constructor, suffix=dtype.__name__)
veckernel = getkernel(check_vector_mat_constructor, suffix=dtype.__name__)
if register_kernels:
return
input = wp.array(randvals(rng, [1], dtype), requires_grad=True, device=device)
val = input.numpy()[0]
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:4], 2 * val * np.ones(2 * 2), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * val * np.ones(3 * 3), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * val * np.ones(4 * 4), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * val * np.ones(5 * 5), tol=tol)
if dtype in np_float_types:
for idx in range(len(outcomponents)):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
test.assertEqual(tape.gradients[input].numpy()[0], 2)
tape.zero()
input = wp.array(randvals(rng, [2 * 2 + 3 * 3 + 4 * 4 + 5 * 5], dtype), requires_grad=True, device=device)
wp.launch(compkernel, dim=1, inputs=[input], outputs=[outcomponents], device=device)
assert_np_equal(2 * input.numpy(), outcomponents.numpy(), tol=10 * tol)
if dtype in np_float_types:
for idx in range(len(outcomponents)):
tape = wp.Tape()
with tape:
wp.launch(compkernel, dim=1, inputs=[input], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedgrads = np.zeros(len(input))
expectedgrads[idx] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
wp.launch(veckernel, dim=1, inputs=[input], outputs=[outcomponents], device=device)
assert_np_equal(2 * input.numpy(), outcomponents.numpy(), tol=10 * tol)
if dtype in np_float_types:
for idx in range(len(outcomponents)):
tape = wp.Tape()
with tape:
wp.launch(veckernel, dim=1, inputs=[input], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedgrads = np.zeros(len(input))
expectedgrads[idx] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
def test_anon_type_instance(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_scalar_init(
input: wp.array(dtype=wptype),
output: wp.array(dtype=wptype),
):
m2result = wp.matrix(input[0], shape=(2, 2))
m3result = wp.matrix(input[1], shape=(3, 3))
m4result = wp.matrix(input[2], shape=(4, 4))
m5result = wp.matrix(input[3], shape=(5, 5))
m32result = wp.matrix(input[4], shape=(3, 2))
idx = 0
for i in range(2):
for j in range(2):
output[idx] = wptype(2) * m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
output[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
output[idx] = wptype(2) * m4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
output[idx] = wptype(2) * m5result[i, j]
idx = idx + 1
for i in range(3):
for j in range(2):
output[idx] = wptype(2) * m32result[i, j]
idx = idx + 1
def check_component_init(
input: wp.array(dtype=wptype),
output: wp.array(dtype=wptype),
):
m2result = wp.matrix(input[0], input[1], input[2], input[3], shape=(2, 2))
m3result = wp.matrix(
input[4], input[5], input[6], input[7], input[8], input[9], input[10], input[11], input[12], shape=(3, 3)
)
m4result = wp.matrix(
input[13],
input[14],
input[15],
input[16],
input[17],
input[18],
input[19],
input[20],
input[21],
input[22],
input[23],
input[24],
input[25],
input[26],
input[27],
input[28],
shape=(4, 4),
)
m5result = wp.matrix(
input[29],
input[30],
input[31],
input[32],
input[33],
input[34],
input[35],
input[36],
input[37],
input[38],
input[39],
input[40],
input[41],
input[42],
input[43],
input[44],
input[45],
input[46],
input[47],
input[48],
input[49],
input[50],
input[51],
input[52],
input[53],
shape=(5, 5),
)
m32result = wp.matrix(input[54], input[55], input[56], input[57], input[58], input[59], shape=(3, 2))
idx = 0
for i in range(2):
for j in range(2):
output[idx] = wptype(2) * m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
output[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
output[idx] = wptype(2) * m4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
output[idx] = wptype(2) * m5result[i, j]
idx = idx + 1
for i in range(3):
for j in range(2):
output[idx] = wptype(2) * m32result[i, j]
idx = idx + 1
scalar_kernel = getkernel(check_scalar_init, suffix=dtype.__name__)
component_kernel = getkernel(check_component_init, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(randvals(rng, [5], dtype), requires_grad=True, device=device)
output = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 3 * 2, dtype=wptype, requires_grad=True, device=device)
wp.launch(scalar_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy()[:4], 2 * np.array([input.numpy()[0]] * 2 * 2), tol=1.0e-6)
assert_np_equal(output.numpy()[4:13], 2 * np.array([input.numpy()[1]] * 3 * 3), tol=1.0e-6)
assert_np_equal(output.numpy()[13:29], 2 * np.array([input.numpy()[2]] * 4 * 4), tol=1.0e-6)
assert_np_equal(output.numpy()[29:54], 2 * np.array([input.numpy()[3]] * 5 * 5), tol=1.0e-6)
assert_np_equal(output.numpy()[54:], 2 * np.array([input.numpy()[4]] * 3 * 2), tol=1.0e-6)
if dtype in np_float_types:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(len(output)):
tape = wp.Tape()
with tape:
wp.launch(scalar_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(input.numpy())
if i < 4:
expected[0] = 2
elif i < 13:
expected[1] = 2
elif i < 29:
expected[2] = 2
elif i < 54:
expected[3] = 2
else:
expected[4] = 2
assert_np_equal(tape.gradients[input].numpy(), expected, tol=tol)
tape.reset()
tape.zero()
input = wp.array(randvals(rng, [2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 3 * 2], dtype), requires_grad=True, device=device)
output = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 3 * 2, dtype=wptype, requires_grad=True, device=device)
wp.launch(component_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=1.0e-6)
if dtype in np_float_types:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(len(output)):
tape = wp.Tape()
with tape:
wp.launch(component_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[out], device=device)
tape.backward(loss=out)
expected = np.zeros_like(input.numpy())
expected[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expected, tol=tol)
tape.reset()
tape.zero()
def test_identity(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def check_identity_mat(
output: wp.array(dtype=wptype),
):
m2result = wp.identity(dtype=wptype, n=2)
m3result = wp.identity(dtype=wptype, n=3)
m4result = wp.identity(dtype=wptype, n=4)
m5result = wp.identity(dtype=wptype, n=5)
idx = 0
for i in range(2):
for j in range(2):
output[idx] = wptype(2) * m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
output[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
output[idx] = wptype(2) * m4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
output[idx] = wptype(2) * m5result[i, j]
idx = idx + 1
id_kernel = getkernel(check_identity_mat, suffix=dtype.__name__)
if register_kernels:
return
output = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(id_kernel, dim=1, inputs=[], outputs=[output], device=device)
assert_np_equal(output.numpy()[:4], 2 * np.eye(2), tol=1.0e-6)
assert_np_equal(output.numpy()[4:13], 2 * np.eye(3), tol=1.0e-6)
assert_np_equal(output.numpy()[13:29], 2 * np.eye(4), tol=1.0e-6)
assert_np_equal(output.numpy()[29:], 2 * np.eye(5), tol=1.0e-6)
def test_indexing(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_indexing(
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * m2[0][i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3[0][i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * m4[0][i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * m5[0][i, j]
idx = idx + 1
kernel = getkernel(check_mat_indexing, suffix=dtype.__name__)
if register_kernels:
return
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:4], 2 * m2.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * m3.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * m4.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * m5.numpy().reshape(-1), tol=tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, input in [(2, m2), (3, m3), (4, m4), (5, m5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5], outputs=[outcomponents], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult)
tape.zero()
idx = idx + 1
def test_equality(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
def check_mat_equality():
wp.expect_eq(
mat22(wptype(1.0), wptype(2.0), wptype(3.0), wptype(4.0)),
mat22(wptype(1.0), wptype(2.0), wptype(3.0), wptype(4.0)),
)
wp.expect_neq(
mat22(wptype(1.0), wptype(2.0), wptype(3.0), -wptype(4.0)),
mat22(wptype(1.0), wptype(2.0), wptype(3.0), wptype(4.0)),
)
wp.expect_eq(
mat33(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
),
mat33(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
),
)
wp.expect_neq(
mat33(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
),
mat33(
wptype(1.0),
wptype(2.0),
wptype(3.0),
-wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
),
)
wp.expect_eq(
mat44(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
),
mat44(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
),
)
wp.expect_neq(
mat44(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
),
mat44(
-wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
),
)
wp.expect_eq(
mat55(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
wptype(17.0),
wptype(18.0),
wptype(19.0),
wptype(20.0),
wptype(21.0),
wptype(22.0),
wptype(23.0),
wptype(24.0),
wptype(25.0),
),
mat55(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
wptype(17.0),
wptype(18.0),
wptype(19.0),
wptype(20.0),
wptype(21.0),
wptype(22.0),
wptype(23.0),
wptype(24.0),
wptype(25.0),
),
)
wp.expect_neq(
mat55(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
wptype(17.0),
wptype(18.0),
wptype(19.0),
wptype(20.0),
wptype(21.0),
wptype(22.0),
wptype(23.0),
wptype(24.0),
wptype(25.0),
),
mat55(
wptype(1.0),
wptype(2.0),
wptype(3.0),
wptype(4.0),
wptype(5.0),
wptype(6.0),
wptype(7.0),
wptype(8.0),
wptype(9.0),
wptype(10.0),
wptype(11.0),
wptype(12.0),
wptype(13.0),
wptype(14.0),
wptype(15.0),
wptype(16.0),
-wptype(17.0),
wptype(18.0),
wptype(19.0),
wptype(20.0),
wptype(21.0),
wptype(22.0),
wptype(23.0),
wptype(24.0),
wptype(25.0),
),
)
kernel = getkernel(check_mat_equality, suffix=dtype.__name__)
if register_kernels:
return
wp.launch(kernel, dim=1, inputs=[], outputs=[], device=device)
def test_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_scalar_mul(
s: wp.array(dtype=wptype),
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
outcomponents_rightmul: wp.array(dtype=wptype),
):
m2result = s[0] * m2[0]
m3result = s[0] * m3[0]
m4result = s[0] * m4[0]
m5result = s[0] * m5[0]
m2resultright = m2[0] * s[0]
m3resultright = m3[0] * s[0]
m4resultright = m4[0] * s[0]
m5resultright = m5[0] * s[0]
m2result_2 = s[0] * m2[0]
m3result_2 = s[0] * m3[0]
m4result_2 = s[0] * m4[0]
m5result_2 = s[0] * m5[0]
m2resultright_2 = m2[0] * s[0]
m3resultright_2 = m3[0] * s[0]
m4resultright_2 = m4[0] * s[0]
m5resultright_2 = m5[0] * s[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * m2result[i, j]
outcomponents_rightmul[idx] = wptype(2) * m2resultright[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3result[i, j]
outcomponents_rightmul[idx] = wptype(2) * m3resultright[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * m4result[i, j]
outcomponents_rightmul[idx] = wptype(2) * m4resultright[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * m5result[i, j]
outcomponents_rightmul[idx] = wptype(2) * m5resultright[i, j]
idx = idx + 1
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * m2result_2[i, j]
outcomponents_rightmul[idx] = wptype(2) * m2resultright_2[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3result_2[i, j]
outcomponents_rightmul[idx] = wptype(2) * m3resultright_2[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * m4result_2[i, j]
outcomponents_rightmul[idx] = wptype(2) * m4resultright_2[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * m5result_2[i, j]
outcomponents_rightmul[idx] = wptype(2) * m5resultright_2[i, j]
idx = idx + 1
kernel = getkernel(check_mat_scalar_mul, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(randvals(rng, [1], dtype), requires_grad=True, device=device)
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * (2 * 2 + 3 * 3 + 4 * 4 + 5 * 5), dtype=wptype, requires_grad=True, device=device)
outcomponents_rightmul = wp.zeros(
2 * (2 * 2 + 3 * 3 + 4 * 4 + 5 * 5), dtype=wptype, requires_grad=True, device=device
)
wp.launch(kernel, dim=1, inputs=[s, m2, m3, m4, m5], outputs=[outcomponents, outcomponents_rightmul], device=device)
sval = s.numpy()[0]
assert_np_equal(outcomponents.numpy()[:4], 2 * sval * m2.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * sval * m3.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * sval * m4.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * sval * m5.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents_rightmul.numpy()[:4], 2 * sval * m2.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents_rightmul.numpy()[4:13], 2 * sval * m3.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents_rightmul.numpy()[13:29], 2 * sval * m4.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents_rightmul.numpy()[29:54], 2 * sval * m5.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents.numpy()[54:58], 2 * sval * m2.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[58:67], 2 * sval * m3.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents.numpy()[67:83], 2 * sval * m4.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents.numpy()[83:108], 2 * sval * m5.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents_rightmul.numpy()[54:58], 2 * sval * m2.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents_rightmul.numpy()[58:67], 2 * sval * m3.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents_rightmul.numpy()[67:83], 2 * sval * m4.numpy().reshape(-1), tol=10 * tol)
assert_np_equal(outcomponents_rightmul.numpy()[83:108], 2 * sval * m5.numpy().reshape(-1), tol=10 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, input in [(2, m2), (3, m3), (4, m4), (5, m5)]:
for i in range(dim):
for j in range(dim):
# test left mul gradient:
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, m2, m3, m4, m5],
outputs=[outcomponents, outcomponents_rightmul],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2 * sval
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult, tol=10 * tol)
assert_np_equal(tape.gradients[s].numpy()[0], 2 * input.numpy()[0, i, j], tol=10 * tol)
tape.zero()
# test right mul gradient:
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, m2, m3, m4, m5],
outputs=[outcomponents, outcomponents_rightmul],
device=device,
)
wp.launch(
output_select_kernel,
dim=1,
inputs=[outcomponents_rightmul, idx],
outputs=[out],
device=device,
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2 * sval
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult, tol=10 * tol)
assert_np_equal(tape.gradients[s].numpy()[0], 2 * input.numpy()[0, i, j], tol=10 * tol)
tape.zero()
idx = idx + 1
def test_matvec_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-2,
np.float32: 5.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat32 = wp.types.matrix(shape=(3, 2), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_vec_mul(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v32: wp.array(dtype=vec2),
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
m32: wp.array(dtype=mat32),
outcomponents: wp.array(dtype=wptype),
):
v2result = m2[0] * v2[0]
v3result = m3[0] * v3[0]
v4result = m4[0] * v4[0]
v5result = m5[0] * v5[0]
v32result = m32[0] * v32[0]
v2result_2 = m2[0] @ v2[0]
v3result_2 = m3[0] @ v3[0]
v4result_2 = m4[0] @ v4[0]
v5result_2 = m5[0] @ v5[0]
v32result_2 = m32[0] @ v32[0]
idx = 0
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(2):
outcomponents[idx] = wptype(2) * v2result[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v3result[i]
idx = idx + 1
for i in range(4):
outcomponents[idx] = wptype(2) * v4result[i]
idx = idx + 1
for i in range(5):
outcomponents[idx] = wptype(2) * v5result[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v32result[i]
idx = idx + 1
for i in range(2):
outcomponents[idx] = wptype(2) * v2result_2[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v3result_2[i]
idx = idx + 1
for i in range(4):
outcomponents[idx] = wptype(2) * v4result_2[i]
idx = idx + 1
for i in range(5):
outcomponents[idx] = wptype(2) * v5result_2[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v32result_2[i]
idx = idx + 1
kernel = getkernel(check_mat_vec_mul, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, [1, 2], dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4], dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5], dtype), dtype=vec5, requires_grad=True, device=device)
v32 = wp.array(randvals(rng, [1, 2], dtype), dtype=vec2, requires_grad=True, device=device)
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
m32 = wp.array(randvals(rng, [1, 3, 2], dtype), dtype=mat32, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * (2 + 3 + 4 + 5 + 3), dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v2, v3, v4, v5, v32, m2, m3, m4, m5, m32], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:2], 2 * np.matmul(m2.numpy()[0], v2.numpy()[0]), tol=tol)
assert_np_equal(outcomponents.numpy()[2:5], 2 * np.matmul(m3.numpy()[0], v3.numpy()[0]), tol=tol)
assert_np_equal(outcomponents.numpy()[5:9], 2 * np.matmul(m4.numpy()[0], v4.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents.numpy()[9:14], 2 * np.matmul(m5.numpy()[0], v5.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents.numpy()[14:17], 2 * np.matmul(m32.numpy()[0], v32.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents.numpy()[17:19], 2 * np.matmul(m2.numpy()[0], v2.numpy()[0]), tol=tol)
assert_np_equal(outcomponents.numpy()[19:22], 2 * np.matmul(m3.numpy()[0], v3.numpy()[0]), tol=tol)
assert_np_equal(outcomponents.numpy()[22:26], 2 * np.matmul(m4.numpy()[0], v4.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents.numpy()[26:31], 2 * np.matmul(m5.numpy()[0], v5.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents.numpy()[31:34], 2 * np.matmul(m32.numpy()[0], v32.numpy()[0]), tol=5 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, invec, inmat in [(2, v2, m2), (3, v3, m3), (4, v4, m4), (5, v5, m5), (3, v32, m32)]:
for i in range(dim):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[v2, v3, v4, v5, v32, m2, m3, m4, m5, m32],
outputs=[outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(tape.gradients[invec].numpy()[0], 2 * inmat.numpy()[0, i, :], tol=2 * tol)
expectedresult = np.zeros(inmat.dtype._shape_, dtype=dtype)
expectedresult[i, :] = 2 * invec.numpy()[0]
assert_np_equal(tape.gradients[inmat].numpy()[0], expectedresult, tol=2 * tol)
tape.zero()
idx = idx + 1
def test_vecmat_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-2,
np.float32: 5.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat23 = wp.types.matrix(shape=(2, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_vec_mat_mul(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v32: wp.array(dtype=vec2),
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
m23: wp.array(dtype=mat23),
outcomponents: wp.array(dtype=wptype),
):
v2result = v2[0] * m2[0]
v3result = v3[0] * m3[0]
v4result = v4[0] * m4[0]
v5result = v5[0] * m5[0]
v32result = v32[0] * m23[0]
v2result_2 = v2[0] @ m2[0]
v3result_2 = v3[0] @ m3[0]
v4result_2 = v4[0] @ m4[0]
v5result_2 = v5[0] @ m5[0]
v32result_2 = v32[0] @ m23[0]
idx = 0
# multiply outputs by 2 so we've got something to backpropagate:
for i in range(2):
outcomponents[idx] = wptype(2) * v2result[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v3result[i]
idx = idx + 1
for i in range(4):
outcomponents[idx] = wptype(2) * v4result[i]
idx = idx + 1
for i in range(5):
outcomponents[idx] = wptype(2) * v5result[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v32result[i]
idx = idx + 1
for i in range(2):
outcomponents[idx] = wptype(2) * v2result_2[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v3result_2[i]
idx = idx + 1
for i in range(4):
outcomponents[idx] = wptype(2) * v4result_2[i]
idx = idx + 1
for i in range(5):
outcomponents[idx] = wptype(2) * v5result_2[i]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * v32result_2[i]
idx = idx + 1
kernel = getkernel(check_vec_mat_mul, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, [1, 2], dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4], dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5], dtype), dtype=vec5, requires_grad=True, device=device)
v32 = wp.array(randvals(rng, [1, 2], dtype), dtype=vec2, requires_grad=True, device=device)
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
m23 = wp.array(randvals(rng, [1, 2, 3], dtype), dtype=mat23, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * (2 + 3 + 4 + 5 + 3), dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v2, v3, v4, v5, v32, m2, m3, m4, m5, m23], outputs=[outcomponents], device=device)
outcomponents_np = outcomponents.numpy()
assert_np_equal(outcomponents_np[:2], 2 * np.matmul(v2.numpy()[0], m2.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[2:5], 2 * np.matmul(v3.numpy()[0], m3.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[5:9], 2 * np.matmul(v4.numpy()[0], m4.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents_np[9:14], 2 * np.matmul(v5.numpy()[0], m5.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents_np[14:17], 2 * np.matmul(v32.numpy()[0], m23.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents_np[17:19], 2 * np.matmul(v2.numpy()[0], m2.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[19:22], 2 * np.matmul(v3.numpy()[0], m3.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[22:26], 2 * np.matmul(v4.numpy()[0], m4.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents_np[26:31], 2 * np.matmul(v5.numpy()[0], m5.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents_np[31:34], 2 * np.matmul(v32.numpy()[0], m23.numpy()[0]), tol=5 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, inmat, invec in [(2, m2, v2), (3, m3, v3), (4, m4, v4), (5, m5, v5), (3, m23, v32)]:
for i in range(dim):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[v2, v3, v4, v5, v32, m2, m3, m4, m5, m23],
outputs=[outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(tape.gradients[invec].numpy()[0], 2 * inmat.numpy()[0, :, i], tol=2 * tol)
expectedresult = np.zeros(inmat.dtype._shape_, dtype=dtype)
expectedresult[:, i] = 2 * invec.numpy()[0]
assert_np_equal(tape.gradients[inmat].numpy()[0], expectedresult, tol=2 * tol)
tape.zero()
idx = idx + 1
def test_matmat_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-2,
np.float32: 5.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat32 = wp.types.matrix(shape=(3, 2), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_mat_mul(
a2: wp.array(dtype=mat22),
a3: wp.array(dtype=mat33),
a4: wp.array(dtype=mat44),
a5: wp.array(dtype=mat55),
a32: wp.array(dtype=mat32),
b2: wp.array(dtype=mat22),
b3: wp.array(dtype=mat33),
b4: wp.array(dtype=mat44),
b5: wp.array(dtype=mat55),
b32: wp.array(dtype=mat32),
outcomponents: wp.array(dtype=wptype),
):
c2result = b2[0] * a2[0]
c3result = b3[0] * a3[0]
c4result = b4[0] * a4[0]
c5result = b5[0] * a5[0]
c32result = b32[0] * a2[0]
c32result2 = b3[0] * a32[0]
c2result_2 = b2[0] @ a2[0]
c3result_2 = b3[0] @ a3[0]
c4result_2 = b4[0] @ a4[0]
c5result_2 = b5[0] @ a5[0]
c32result_2 = b32[0] @ a2[0]
c32result2_2 = b3[0] @ a32[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * c2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * c3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * c4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * c5result[i, j]
idx = idx + 1
for i in range(3):
for j in range(2):
outcomponents[idx] = wptype(2) * c32result[i, j]
idx = idx + 1
for i in range(3):
for j in range(2):
outcomponents[idx] = wptype(2) * c32result2[i, j]
idx = idx + 1
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * c2result_2[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * c3result_2[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * c4result_2[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * c5result_2[i, j]
idx = idx + 1
for i in range(3):
for j in range(2):
outcomponents[idx] = wptype(2) * c32result_2[i, j]
idx = idx + 1
for i in range(3):
for j in range(2):
outcomponents[idx] = wptype(2) * c32result2_2[i, j]
idx = idx + 1
kernel = getkernel(check_mat_mat_mul, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
v32 = wp.array(randvals(rng, [1, 3, 2], dtype), dtype=mat32, requires_grad=True, device=device)
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
m32 = wp.array(randvals(rng, [1, 3, 2], dtype), dtype=mat32, requires_grad=True, device=device)
outcomponents = wp.zeros(
2 * (2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 3 * 2 + 3 * 2), dtype=wptype, requires_grad=True, device=device
)
wp.launch(kernel, dim=1, inputs=[v2, v3, v4, v5, v32, m2, m3, m4, m5, m32], outputs=[outcomponents], device=device)
outcomponents_np = outcomponents.numpy()
assert_np_equal(outcomponents_np[:4].reshape((2, 2)), 2 * np.matmul(m2.numpy()[0], v2.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[4:13].reshape((3, 3)), 2 * np.matmul(m3.numpy()[0], v3.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[13:29].reshape((4, 4)), 2 * np.matmul(m4.numpy()[0], v4.numpy()[0]), tol=2 * tol)
assert_np_equal(outcomponents_np[29:54].reshape((5, 5)), 2 * np.matmul(m5.numpy()[0], v5.numpy()[0]), tol=10 * tol)
assert_np_equal(outcomponents_np[54:60].reshape((3, 2)), 2 * np.matmul(m32.numpy()[0], v2.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents_np[60:66].reshape((3, 2)), 2 * np.matmul(m3.numpy()[0], v32.numpy()[0]), tol=5 * tol)
assert_np_equal(outcomponents_np[66:70].reshape((2, 2)), 2 * np.matmul(m2.numpy()[0], v2.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[70:79].reshape((3, 3)), 2 * np.matmul(m3.numpy()[0], v3.numpy()[0]), tol=tol)
assert_np_equal(outcomponents_np[79:95].reshape((4, 4)), 2 * np.matmul(m4.numpy()[0], v4.numpy()[0]), tol=2 * tol)
assert_np_equal(outcomponents_np[95:120].reshape((5, 5)), 2 * np.matmul(m5.numpy()[0], v5.numpy()[0]), tol=10 * tol)
assert_np_equal(
outcomponents_np[120:126].reshape((3, 2)), 2 * np.matmul(m32.numpy()[0], v2.numpy()[0]), tol=5 * tol
)
assert_np_equal(
outcomponents_np[126:132].reshape((3, 2)), 2 * np.matmul(m3.numpy()[0], v32.numpy()[0]), tol=5 * tol
)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for v, m in [(v2, m2), (v3, m3), (v4, m4), (v5, m5), (v2, m32), (v32, m3)]:
rows, cols = m.dtype._shape_[0], v.dtype._shape_[1]
for i in range(rows):
for j in range(cols):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[v2, v3, v4, v5, v32, m2, m3, m4, m5, m32],
outputs=[outcomponents],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expected = np.zeros(v.dtype._shape_, dtype=dtype)
expected[:, j] = 2 * m.numpy()[0, i, :]
assert_np_equal(tape.gradients[v].numpy()[0], expected, tol=10 * tol)
expected = np.zeros(m.dtype._shape_, dtype=dtype)
expected[i, :] = 2 * v.numpy()[0, :, j]
assert_np_equal(tape.gradients[m].numpy()[0], expected, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_cw_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_cw_mul(
s2: wp.array(dtype=mat22),
s3: wp.array(dtype=mat33),
s4: wp.array(dtype=mat44),
s5: wp.array(dtype=mat55),
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
v5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
v2result = wptype(2) * wp.cw_mul(v2[0], s2[0])
v3result = wptype(2) * wp.cw_mul(v3[0], s3[0])
v4result = wptype(2) * wp.cw_mul(v4[0], s4[0])
v5result = wptype(2) * wp.cw_mul(v5[0], s5[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = v2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = v3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = v4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = v5result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_cw_mul, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
outcomponents_np = outcomponents.numpy()
assert_np_equal(outcomponents_np[:4], 2 * (v2.numpy() * s2.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents_np[4:13], 2 * (v3.numpy() * s3.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents_np[13:29], 2 * (v4.numpy() * s4.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents_np[29:54], 2 * (v5.numpy() * s5.numpy()).reshape(-1), tol=50 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, in1, in2 in [(2, s2, v2), (3, s3, v3), (4, s4, v4), (5, s5, v5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2 * in1.numpy()[0][i, j]
assert_np_equal(tape.gradients[in2].numpy()[0], expectedresult, tol=5 * tol)
expectedresult[i, j] = 2 * in2.numpy()[0][i, j]
assert_np_equal(tape.gradients[in1].numpy()[0], expectedresult, tol=5 * tol)
tape.zero()
idx = idx + 1
def test_cw_division(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_cw_div(
s2: wp.array(dtype=mat22),
s3: wp.array(dtype=mat33),
s4: wp.array(dtype=mat44),
s5: wp.array(dtype=mat55),
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
v5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
v2result = wptype(2) * wp.cw_div(v2[0], s2[0])
v3result = wptype(2) * wp.cw_div(v3[0], s3[0])
v4result = wptype(2) * wp.cw_div(v4[0], s4[0])
v5result = wptype(2) * wp.cw_div(v5[0], s5[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = v2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = v3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = v4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = v5result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_cw_div, suffix=dtype.__name__)
if register_kernels:
return
s2 = randvals(rng, [1, 2, 2], dtype)
s3 = randvals(rng, [1, 3, 3], dtype)
s4 = randvals(rng, [1, 4, 4], dtype)
s5 = randvals(rng, [1, 5, 5], dtype)
# set denominators to 1 if their magnitudes are small
# to prevent divide by zero, or overflows if we're testing
# float16:
s2[np.abs(s2) < 1.0e-2] = 1
s3[np.abs(s3) < 1.0e-2] = 1
s4[np.abs(s4) < 1.0e-2] = 1
s5[np.abs(s5) < 1.0e-2] = 1
s2 = wp.array(s2, dtype=mat22, requires_grad=True, device=device)
s3 = wp.array(s3, dtype=mat33, requires_grad=True, device=device)
s4 = wp.array(s4, dtype=mat44, requires_grad=True, device=device)
s5 = wp.array(s5, dtype=mat55, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
if dtype in np_float_types:
assert_np_equal(outcomponents.numpy()[:4], 2 * (v2.numpy() / s2.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * (v3.numpy() / s3.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * (v4.numpy() / s4.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * (v5.numpy() / s5.numpy()).reshape(-1), tol=50 * tol)
else:
assert_np_equal(outcomponents.numpy()[:4], 2 * (v2.numpy() // s2.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * (v3.numpy() // s3.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * (v4.numpy() // s4.numpy()).reshape(-1), tol=50 * tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * (v5.numpy() // s5.numpy()).reshape(-1), tol=50 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, s, v in [(2, s2, v2), (3, s3, v3), (4, s4, v4), (5, s5, v5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
# y = v/s
# dy/dv = 1.0/s
# dy/ds = -v/s^2
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2.0 / (s.numpy()[0, i, j])
assert_np_equal(tape.gradients[v].numpy()[0], expectedresult, tol=50 * tol)
expectedresult[i, j] = -2.0 * v.numpy()[0, i, j] / (s.numpy()[0, i, j] ** 2)
assert_np_equal(
tape.gradients[s].numpy()[0], expectedresult, tol=abs(outcomponents.numpy()[idx]) * 50 * tol
)
tape.zero()
idx = idx + 1
def test_outer_product(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_outer_product(
s2: wp.array(dtype=vec2),
s3: wp.array(dtype=vec3),
s4: wp.array(dtype=vec4),
s5: wp.array(dtype=vec5),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
outcomponents: wp.array(dtype=wptype),
):
m22result = wptype(2) * wp.outer(s2[0], v2[0])
m33result = wptype(2) * wp.outer(s3[0], v3[0])
m44result = wptype(2) * wp.outer(s4[0], v4[0])
m55result = wptype(2) * wp.outer(s5[0], v5[0])
m25result = wptype(2) * wp.outer(s2[0], v5[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = m22result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = m33result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = m44result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = m55result[i, j]
idx = idx + 1
for i in range(2):
for j in range(5):
outcomponents[idx] = m25result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_outer_product, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, [1, 2], dtype), dtype=vec2, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, [1, 4], dtype), dtype=vec4, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, [1, 5], dtype), dtype=vec5, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, [1, 2], dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4], dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5], dtype), dtype=vec5, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 2 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[s2, s3, s4, s5, v2, v3, v4, v5], outputs=[outcomponents], device=device)
outcomponents_np = outcomponents.numpy()
assert_np_equal(outcomponents_np[:4].reshape((2, 2)), 2 * s2.numpy()[0, :, None] * v2.numpy()[0, None, :], tol=tol)
assert_np_equal(
outcomponents_np[4:13].reshape((3, 3)), 2 * s3.numpy()[0, :, None] * v3.numpy()[0, None, :], tol=10 * tol
)
assert_np_equal(
outcomponents_np[13:29].reshape((4, 4)), 2 * s4.numpy()[0, :, None] * v4.numpy()[0, None, :], tol=10 * tol
)
assert_np_equal(
outcomponents_np[29:54].reshape((5, 5)), 2 * s5.numpy()[0, :, None] * v5.numpy()[0, None, :], tol=10 * tol
)
assert_np_equal(
outcomponents_np[54:].reshape(2, 5), 2 * s2.numpy()[0, :, None] * v5.numpy()[0, None, :], tol=10 * tol
)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for s, v in [(s2, v2), (s3, v3), (s4, v4), (s5, v5), (s2, v5)]:
rows = s.dtype._length_
cols = v.dtype._length_
for i in range(rows):
for j in range(cols):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
# this component's gonna be s_i * v_j, so its s gradient is gonna be nozero
# at the ith component and its v gradient will be nonzero at the jth component:
expectedresult = np.zeros((rows), dtype=dtype)
expectedresult[i] = 2 * v.numpy()[0, j]
assert_np_equal(tape.gradients[s].numpy()[0], expectedresult, tol=10 * tol)
expectedresult = np.zeros((cols), dtype=dtype)
expectedresult[j] = 2 * s.numpy()[0, i]
assert_np_equal(tape.gradients[v].numpy()[0], expectedresult, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_transpose(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat32 = wp.types.matrix(shape=(3, 2), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_transpose(
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
m32: wp.array(dtype=mat32),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
mat2 = wptype(2) * wp.transpose(m2[0])
mat3 = wptype(2) * wp.transpose(m3[0])
mat4 = wptype(2) * wp.transpose(m4[0])
mat5 = wptype(2) * wp.transpose(m5[0])
mat32 = wptype(2) * wp.transpose(m32[0])
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = mat2[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = mat3[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = mat4[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = mat5[i, j]
idx = idx + 1
for i in range(2):
for j in range(3):
outcomponents[idx] = mat32[i, j]
idx = idx + 1
kernel = getkernel(check_mat_transpose, suffix=dtype.__name__)
if register_kernels:
return
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
m32 = wp.array(randvals(rng, [1, 3, 2], dtype), dtype=mat32, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 2 * 3, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5, m32], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:4], 2 * m2.numpy()[0].T.reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * m3.numpy()[0].T.reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * m4.numpy()[0].T.reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * m5.numpy()[0].T.reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[54:], 2 * m32.numpy()[0].T.reshape(-1), tol=tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for input in [m2, m3, m4, m5]:
for i in range(input.dtype._shape_[0]):
for j in range(input.dtype._shape_[1]):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5, m32], outputs=[outcomponents], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((input.dtype._shape_[1], input.dtype._shape_[0]), dtype=dtype)
expectedresult[j, i] = 2
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult)
tape.zero()
idx = idx + 1
def test_scalar_division(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_scalar_div(
s: wp.array(dtype=wptype),
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
m2result = m2[0] / s[0]
m3result = m3[0] / s[0]
m4result = m4[0] / s[0]
m5result = m5[0] / s[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * m4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * m5result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_scalar_div, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(randvals(rng, [1], dtype), requires_grad=True, device=device)
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[s, m2, m3, m4, m5], outputs=[outcomponents], device=device)
sval = s.numpy()[0]
if dtype in np_float_types:
assert_np_equal(outcomponents.numpy()[:4], 2 * m2.numpy().reshape(-1) / sval, tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * m3.numpy().reshape(-1) / sval, tol=10 * tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * m4.numpy().reshape(-1) / sval, tol=10 * tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * m5.numpy().reshape(-1) / sval, tol=10 * tol)
else:
assert_np_equal(outcomponents.numpy()[:4], 2 * (m2.numpy().reshape(-1) // sval), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * (m3.numpy().reshape(-1) // sval), tol=10 * tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * (m4.numpy().reshape(-1) // sval), tol=10 * tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * (m5.numpy().reshape(-1) // sval), tol=10 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, input in [(2, m2), (3, m3), (4, m4), (5, m5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[s, m2, m3, m4, m5], outputs=[outcomponents], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2.0 / sval
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult, tol=10 * tol)
assert_np_equal(
tape.gradients[s].numpy()[0], -2 * input.numpy()[0, i, j] / (sval * sval), tol=10 * tol
)
tape.zero()
idx = idx + 1
def test_addition(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-2,
np.float32: 5.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_add(
s2: wp.array(dtype=mat22),
s3: wp.array(dtype=mat33),
s4: wp.array(dtype=mat44),
s5: wp.array(dtype=mat55),
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
v5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
v2result = v2[0] + s2[0]
v3result = v3[0] + s3[0]
v4result = v4[0] + s4[0]
v5result = v5[0] + s5[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * v2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * v3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * v4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * v5result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_add, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
assert_np_equal(outcomponents.numpy()[:4], 2 * (v2.numpy() + s2.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * (v3.numpy() + s3.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * (v4.numpy() + s4.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * (v5.numpy() + s5.numpy()).reshape(-1), tol=tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, in1, in2 in [(2, s2, v2), (3, s3, v3), (4, s4, v4), (5, s5, v5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[in2].numpy()[0], expectedresult, tol=10 * tol)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[in1].numpy()[0], expectedresult, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_ddot(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
def check_mat_dot(
s2: wp.array(dtype=mat22),
s3: wp.array(dtype=mat33),
s4: wp.array(dtype=mat44),
s5: wp.array(dtype=mat55),
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
v5: wp.array(dtype=mat55),
dot2: wp.array(dtype=wptype),
dot3: wp.array(dtype=wptype),
dot4: wp.array(dtype=wptype),
dot5: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
dot2[0] = wptype(2) * wp.ddot(v2[0], s2[0])
dot3[0] = wptype(2) * wp.ddot(v3[0], s3[0])
dot4[0] = wptype(2) * wp.ddot(v4[0], s4[0])
dot5[0] = wptype(2) * wp.ddot(v5[0], s5[0])
kernel = getkernel(check_mat_dot, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
dot2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
dot3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
dot4 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
dot5 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[dot2, dot3, dot4, dot5],
device=device,
)
assert_np_equal(dot2.numpy()[0], 2 * (v2.numpy() * s2.numpy()).sum(), tol=10 * tol)
assert_np_equal(dot3.numpy()[0], 2 * (v3.numpy() * s3.numpy()).sum(), tol=10 * tol)
assert_np_equal(dot4.numpy()[0], 2 * (v4.numpy() * s4.numpy()).sum(), tol=50 * tol)
assert_np_equal(dot5.numpy()[0], 2 * (v5.numpy() * s5.numpy()).sum(), tol=200 * tol)
if dtype in np_float_types:
tape.backward(loss=dot2)
sgrads = tape.gradients[s2].numpy()[0]
expected_grads = 2.0 * v2.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v2].numpy()[0]
expected_grads = 2.0 * s2.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=10 * tol)
tape.zero()
tape.backward(loss=dot3)
sgrads = tape.gradients[s3].numpy()[0]
expected_grads = 2.0 * v3.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v3].numpy()[0]
expected_grads = 2.0 * s3.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=10 * tol)
tape.zero()
tape.backward(loss=dot4)
sgrads = tape.gradients[s4].numpy()[0]
expected_grads = 2.0 * v4.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v4].numpy()[0]
expected_grads = 2.0 * s4.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=10 * tol)
tape.zero()
tape.backward(loss=dot5)
sgrads = tape.gradients[s5].numpy()[0]
expected_grads = 2.0 * v5.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v5].numpy()[0]
expected_grads = 2.0 * s5.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_trace(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
def check_mat_trace(
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
v5: wp.array(dtype=mat55),
tr2: wp.array(dtype=wptype),
tr3: wp.array(dtype=wptype),
tr4: wp.array(dtype=wptype),
tr5: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
tr2[0] = wptype(2) * wp.trace(v2[0])
tr3[0] = wptype(2) * wp.trace(v3[0])
tr4[0] = wptype(2) * wp.trace(v4[0])
tr5[0] = wptype(2) * wp.trace(v5[0])
kernel = getkernel(check_mat_trace, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
tr2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tr3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tr4 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tr5 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
v4,
v5,
],
outputs=[
tr2,
tr3,
tr4,
tr5,
],
device=device,
)
assert_np_equal(tr2.numpy()[0], 2 * np.trace(v2.numpy()[0]), tol=10 * tol)
assert_np_equal(tr3.numpy()[0], 2 * np.trace(v3.numpy()[0]), tol=10 * tol)
assert_np_equal(tr4.numpy()[0], 2 * np.trace(v4.numpy()[0]), tol=200 * tol)
assert_np_equal(tr4.numpy()[0], 2 * np.trace(v4.numpy()[0]), tol=200 * tol)
if dtype in np_float_types:
tape.backward(loss=tr2)
vgrads = tape.gradients[v2].numpy()[0]
assert_np_equal(vgrads, 2.0 * np.eye(2), tol=10 * tol)
tape.zero()
tape.backward(loss=tr3)
vgrads = tape.gradients[v3].numpy()[0]
assert_np_equal(vgrads, 2.0 * np.eye(3), tol=10 * tol)
tape.zero()
tape.backward(loss=tr4)
vgrads = tape.gradients[v4].numpy()[0]
assert_np_equal(vgrads, 2.0 * np.eye(4), tol=10 * tol)
tape.zero()
tape.backward(loss=tr5)
vgrads = tape.gradients[v5].numpy()[0]
assert_np_equal(vgrads, 2.0 * np.eye(5), tol=10 * tol)
tape.zero()
def test_diag(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec5 = wp.types.vector(length=5, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_diag(
s5: wp.array(dtype=vec5),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
m55result = wptype(2) * wp.diag(s5[0])
idx = 0
for i in range(5):
for j in range(5):
outcomponents[idx] = m55result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_diag, suffix=dtype.__name__)
if register_kernels:
return
s5 = wp.array(randvals(rng, [1, 5], dtype), dtype=vec5, requires_grad=True, device=device)
outcomponents = wp.zeros(5 * 5, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[s5], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.reshape((5, 5)).numpy(), 2 * np.diag(s5.numpy()[0]), tol=tol)
if dtype in np_float_types:
idx = 0
for i in range(5):
for j in range(5):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[s5], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
expectedresult = np.zeros(5, dtype=dtype)
if i == j:
expectedresult[i] = 2
assert_np_equal(tape.gradients[s5].numpy()[0], expectedresult, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_equivalent_types(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
# matrix types
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
# matrix types equivalent to the above
mat22_equiv = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33_equiv = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44_equiv = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55_equiv = wp.types.matrix(shape=(5, 5), dtype=wptype)
# declare kernel with original types
def check_equivalence(
m2: mat22,
m3: mat33,
m4: mat44,
m5: mat55,
):
wp.expect_eq(m2, mat22(wptype(42)))
wp.expect_eq(m3, mat33(wptype(43)))
wp.expect_eq(m4, mat44(wptype(44)))
wp.expect_eq(m5, mat55(wptype(45)))
wp.expect_eq(m2, mat22_equiv(wptype(42)))
wp.expect_eq(m3, mat33_equiv(wptype(43)))
wp.expect_eq(m4, mat44_equiv(wptype(44)))
wp.expect_eq(m5, mat55_equiv(wptype(45)))
kernel = getkernel(check_equivalence, suffix=dtype.__name__)
if register_kernels:
return
# call kernel with equivalent types
m2 = mat22_equiv(42)
m3 = mat33_equiv(43)
m4 = mat44_equiv(44)
m5 = mat55_equiv(45)
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5], device=device)
def test_conversions(test, device, dtype, register_kernels=False):
def check_matrices_equal(
m0: wp.mat22,
m1: wp.mat22,
m2: wp.mat22,
m3: wp.mat22,
m4: wp.mat22,
m5: wp.mat22,
m6: wp.mat22,
):
wp.expect_eq(m1, m0)
wp.expect_eq(m2, m0)
wp.expect_eq(m3, m0)
wp.expect_eq(m4, m0)
wp.expect_eq(m5, m0)
wp.expect_eq(m6, m0)
kernel = getkernel(check_matrices_equal, suffix=dtype.__name__)
if register_kernels:
return
m0 = wp.mat22(1, 2, 3, 4)
# test explicit conversions - constructing matrices from different containers
m1 = wp.mat22(((1, 2), (3, 4))) # nested tuples
m2 = wp.mat22([[1, 2], [3, 4]]) # nested lists
m3 = wp.mat22(np.array([[1, 2], [3, 4]], dtype=dtype)) # 2d array
m4 = wp.mat22((1, 2, 3, 4)) # flat tuple
m5 = wp.mat22([1, 2, 3, 4]) # flat list
m6 = wp.mat22(np.array([1, 2, 3, 4], dtype=dtype)) # 1d array
wp.launch(kernel, dim=1, inputs=[m0, m1, m2, m3, m4, m5, m6], device=device)
# test implicit conversions - passing different containers as matrices to wp.launch()
m1 = ((1, 2), (3, 4)) # nested tuples
m2 = [[1, 2], [3, 4]] # nested lists
m3 = np.array([[1, 2], [3, 4]], dtype=dtype) # 2d array
m4 = (1, 2, 3, 4) # flat tuple
m5 = [1, 2, 3, 4] # flat list
m6 = np.array([1, 2, 3, 4], dtype=dtype) # 1d array
wp.launch(kernel, dim=1, inputs=[m0, m1, m2, m3, m4, m5, m6], device=device)
devices = get_test_devices()
class TestMatScalarOps(unittest.TestCase):
pass
for dtype in np_scalar_types:
add_function_test(TestMatScalarOps, f"test_arrays_{dtype.__name__}", test_arrays, devices=devices, dtype=dtype)
add_function_test(TestMatScalarOps, f"test_components_{dtype.__name__}", test_components, devices=None, dtype=dtype)
add_function_test_register_kernel(
TestMatScalarOps, f"test_constructors_{dtype.__name__}", test_constructors, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps,
f"test_anon_type_instance_{dtype.__name__}",
test_anon_type_instance,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_identity_{dtype.__name__}", test_identity, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_indexing_{dtype.__name__}", test_indexing, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_equality_{dtype.__name__}", test_equality, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps,
f"test_scalar_multiplication_{dtype.__name__}",
test_scalar_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestMatScalarOps,
f"test_matvec_multiplication_{dtype.__name__}",
test_matvec_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestMatScalarOps,
f"test_vecmat_multiplication_{dtype.__name__}",
test_vecmat_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestMatScalarOps,
f"test_matmat_multiplication_{dtype.__name__}",
test_matmat_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestMatScalarOps,
f"test_cw_multiplication_{dtype.__name__}",
test_cw_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_cw_division_{dtype.__name__}", test_cw_division, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_outer_product_{dtype.__name__}", test_outer_product, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_transpose_{dtype.__name__}", test_transpose, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_scalar_division_{dtype.__name__}", test_scalar_division, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_addition_{dtype.__name__}", test_addition, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_ddot_{dtype.__name__}", test_ddot, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_trace_{dtype.__name__}", test_trace, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_diag_{dtype.__name__}", test_diag, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_get_diag_{dtype.__name__}", test_diag, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_equivalent_types_{dtype.__name__}", test_equivalent_types, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_conversions_{dtype.__name__}", test_conversions, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMatScalarOps, f"test_constants_{dtype.__name__}", test_constants, devices=None, dtype=dtype
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 110,256 | Python | 36.941156 | 120 | 0.522548 |
NVIDIA/warp/warp/tests/test_mlp.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.func
def mlp_activation(z: float):
return wp.tanh(z)
@wp.kernel
def mlp_kernel(
weights: wp.array2d(dtype=float),
bias: wp.array(dtype=float),
x: wp.array2d(dtype=float),
y: wp.array2d(dtype=float),
):
wp.mlp(weights, bias, mlp_activation, wp.tid(), x, y)
@wp.kernel
def loss_kernel(x: wp.array2d(dtype=float), loss: wp.array(dtype=float)):
i, j = wp.tid()
wp.atomic_add(loss, 0, x[i, j] * x[i, j])
def test_mlp(test, device):
rng = np.random.default_rng(123)
m = 10
n = 200
batches = 20000
weights = wp.array(rng.random(size=(m, n)) * 0.5 - 0.5, dtype=float, device=device)
bias = wp.array(rng.random(size=m) * 0.5 - 0.5, dtype=float, device=device)
x = wp.array(rng.random(size=(n, batches)), dtype=float, device=device)
y = wp.zeros(shape=(m, batches), device=device)
with wp.ScopedTimer("warp", active=False):
wp.launch(mlp_kernel, dim=batches, inputs=[weights, bias, x, y], device=device)
wp.synchronize()
# A*x + b
with wp.ScopedTimer("numpy", active=False):
expect = np.tanh(weights.numpy().reshape(m, n) @ x.numpy().reshape(-1, batches) + bias.numpy().reshape(m, 1))
result = y.numpy().reshape(-1, batches)
assert_np_equal(result, expect, tol=1.0e-6)
def create_mlp(m, n):
import torch
torch.manual_seed(0)
class FeedForward(torch.nn.Module):
def __init__(self, input_size, hidden_size):
super(FeedForward, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.act = torch.nn.Tanh()
def forward(self, x):
out = self.fc1(x)
out = self.act(out)
return out
return FeedForward(m, n)
def create_golden():
import torch
rng = np.random.default_rng(123)
input_size = 32
hidden_size = 16
batch_size = 64
network = create_mlp(input_size, hidden_size)
x = torch.Tensor(rng.random(size=(batch_size, input_size)))
x.requires_grad = True
y = network.forward(x)
y.retain_grad()
loss = torch.inner(y.flatten(), y.flatten())
loss.backward(retain_graph=True)
results = {}
results["weights"] = network.fc1.weight.cpu().detach().numpy()
results["weights_grad"] = network.fc1.weight.grad.cpu().detach().numpy()
results["bias"] = network.fc1.bias.cpu().detach().numpy()
results["bias_grad"] = network.fc1.bias.grad.cpu().detach().numpy()
results["x"] = x.cpu().detach().numpy()
results["x_grad"] = x.grad.cpu().detach().numpy()
results["y"] = y.cpu().detach().numpy()
results["y_grad"] = y.grad.cpu().detach().numpy()
results["loss"] = loss.cpu().detach().numpy()
np.save(os.path.join(os.path.dirname(__file__), "assets/mlp_golden.npy"), results, allow_pickle=True)
def load_golden():
return np.load(os.path.join(os.path.dirname(__file__), "assets/mlp_golden.npy"), allow_pickle=True).item()
def test_mlp_grad(test, device):
# uncomment to re-build golden files
# create_golden()
results = load_golden()
torch_weights = results["weights"]
torch_weights_grad = results["weights_grad"]
torch_bias = results["bias"]
torch_bias_grad = results["bias_grad"]
torch_x = results["x"].T
torch_x_grad = results["x_grad"].T
torch_y = results["y"].T
torch_y_grad = results["y_grad"].T
torch_loss = results["loss"].T
weights = wp.array(torch_weights, dtype=float, device=device, requires_grad=True)
bias = wp.array(torch_bias, dtype=float, device=device, requires_grad=True)
x = wp.array(torch_x, dtype=float, device=device, requires_grad=True)
y = wp.array(torch_y, dtype=float, device=device, requires_grad=True)
y.zero_()
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
m = torch_weights.shape[0]
n = torch_weights.shape[1]
b = torch_x.shape[1]
tape = wp.Tape()
with tape:
wp.launch(mlp_kernel, dim=b, inputs=[weights, bias, x, y], device=device)
wp.launch(loss_kernel, dim=y.shape, inputs=[y, loss], device=device)
tape.backward(loss=loss)
# check forward result
assert_np_equal(y.numpy().reshape(-1, b), torch_y, tol=1.0e-1)
assert_np_equal(loss.numpy(), torch_loss, tol=1.0e-1)
# check backward result
assert_np_equal(tape.gradients[weights].numpy().reshape(m, n), torch_weights_grad, tol=1.0e-1)
assert_np_equal(tape.gradients[bias].numpy(), torch_bias_grad, tol=1.0e-1)
assert_np_equal(tape.gradients[x].numpy().reshape(n, b), torch_x_grad, tol=1.0e-1)
assert_np_equal(tape.gradients[y].numpy().reshape(m, b), torch_y_grad, tol=1.0e-1)
def profile_mlp_torch():
import torch
rng = np.random.default_rng(123)
m = 128
n = 64
steps = 20
for i in range(steps):
b = 2**i
network = create_mlp(m, n)
x = torch.Tensor(rng.random(size=(b, m)))
with wp.ScopedTimer("torch_forward" + str(b)):
y = network.forward(x)
torch.cuda.synchronize()
for i in range(steps):
b = 2**i
network = create_mlp(m, n)
x = torch.Tensor(rng.random(size=(b, m)))
y = network.forward(x)
loss = torch.norm(y)
# run once to alloc all gradients
loss.backward(retain_graph=True)
with wp.ScopedTimer("torch-backward" + str(b)):
loss.backward()
torch.cuda.synchronize()
def profile_mlp_warp(device):
rng = np.random.default_rng(123)
m = 128
n = 64
steps = 20
for i in range(steps):
b = 2**i
weights = wp.array(rng.random(size=(m, n)) * 0.5 - 0.5, dtype=float, device=device)
bias = wp.array(rng.random(size=m) * 0.5 - 0.5, dtype=float, device=device)
x = wp.array(rng.random(size=(n, b)), dtype=float, device=device)
y = wp.zeros(shape=(m, b), device=device)
with wp.ScopedTimer("warp-forward" + str(b)):
wp.launch(mlp_kernel, dim=b, inputs=[weights, bias, x, y], device=device)
wp.synchronize()
for i in range(steps):
b = 2**i
weights = wp.array(rng.random(size=(m, n)) * 0.5 - 0.5, dtype=float, device=device, requires_grad=True)
bias = wp.array(rng.random(size=m) * 0.5 - 0.5, dtype=float, device=device, requires_grad=True)
x = wp.array(rng.random(size=(n, b)), dtype=float, device=device, requires_grad=True)
y = wp.zeros(shape=(m, b), device=device, requires_grad=True)
loss = wp.zeros(1, dtype=float, device=device)
tape = wp.Tape()
with tape:
wp.launch(mlp_kernel, dim=b, inputs=[weights, bias, x, y], device=device)
wp.launch(loss_kernel, dim=y.size, inputs=[y.flatten(), loss], device=device)
# run backward once to ensure all adjoints are allocated
tape.backward(loss)
wp.synchronize()
with wp.ScopedTimer("warp-backward" + str(b)):
tape.backward(loss)
wp.synchronize()
# profile_mlp_warp("cuda")
# profile_mlp_torch()
devices = get_test_devices()
class TestMLP(unittest.TestCase):
pass
add_function_test(TestMLP, "test_mlp", test_mlp, devices=devices)
add_function_test(TestMLP, "test_mlp_grad", test_mlp_grad, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 8,000 | Python | 28.094545 | 117 | 0.622 |
NVIDIA/warp/warp/tests/test_array.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def kernel_1d(a: wp.array(dtype=int, ndim=1)):
i = wp.tid()
wp.expect_eq(a[i], wp.tid())
a[i] = a[i] * 2
wp.atomic_add(a, i, 1)
wp.expect_eq(a[i], wp.tid() * 2 + 1)
def test_1d(test, device):
dim_x = 4
a = np.arange(0, dim_x, dtype=np.int32)
arr = wp.array(a, device=device)
test.assertEqual(arr.shape, a.shape)
test.assertEqual(arr.size, a.size)
test.assertEqual(arr.ndim, a.ndim)
with CheckOutput(test):
wp.launch(kernel_1d, dim=arr.size, inputs=[arr], device=device)
@wp.kernel
def kernel_2d(a: wp.array(dtype=int, ndim=2), m: int, n: int):
i = wp.tid() // n
j = wp.tid() % n
wp.expect_eq(a[i, j], wp.tid())
wp.expect_eq(a[i][j], wp.tid())
a[i, j] = a[i, j] * 2
wp.atomic_add(a, i, j, 1)
wp.expect_eq(a[i, j], wp.tid() * 2 + 1)
def test_2d(test, device):
dim_x = 4
dim_y = 2
a = np.arange(0, dim_x * dim_y, dtype=np.int32)
a = a.reshape(dim_x, dim_y)
arr = wp.array(a, device=device)
test.assertEqual(arr.shape, a.shape)
test.assertEqual(arr.size, a.size)
test.assertEqual(arr.ndim, a.ndim)
with CheckOutput(test):
wp.launch(kernel_2d, dim=arr.size, inputs=[arr, dim_x, dim_y], device=device)
@wp.kernel
def kernel_3d(a: wp.array(dtype=int, ndim=3), m: int, n: int, o: int):
i = wp.tid() // (n * o)
j = wp.tid() % (n * o) // o
k = wp.tid() % o
wp.expect_eq(a[i, j, k], wp.tid())
wp.expect_eq(a[i][j][k], wp.tid())
a[i, j, k] = a[i, j, k] * 2
a[i][j][k] = a[i][j][k] * 2
wp.atomic_add(a, i, j, k, 1)
wp.expect_eq(a[i, j, k], wp.tid() * 4 + 1)
def test_3d(test, device):
dim_x = 8
dim_y = 4
dim_z = 2
a = np.arange(0, dim_x * dim_y * dim_z, dtype=np.int32)
a = a.reshape(dim_x, dim_y, dim_z)
arr = wp.array(a, device=device)
test.assertEqual(arr.shape, a.shape)
test.assertEqual(arr.size, a.size)
test.assertEqual(arr.ndim, a.ndim)
with CheckOutput(test):
wp.launch(kernel_3d, dim=arr.size, inputs=[arr, dim_x, dim_y, dim_z], device=device)
@wp.kernel
def kernel_4d(a: wp.array(dtype=int, ndim=4), m: int, n: int, o: int, p: int):
i = wp.tid() // (n * o * p)
j = wp.tid() % (n * o * p) // (o * p)
k = wp.tid() % (o * p) / p
l = wp.tid() % p
wp.expect_eq(a[i, j, k, l], wp.tid())
wp.expect_eq(a[i][j][k][l], wp.tid())
def test_4d(test, device):
dim_x = 16
dim_y = 8
dim_z = 4
dim_w = 2
a = np.arange(0, dim_x * dim_y * dim_z * dim_w, dtype=np.int32)
a = a.reshape(dim_x, dim_y, dim_z, dim_w)
arr = wp.array(a, device=device)
test.assertEqual(arr.shape, a.shape)
test.assertEqual(arr.size, a.size)
test.assertEqual(arr.ndim, a.ndim)
with CheckOutput(test):
wp.launch(kernel_4d, dim=arr.size, inputs=[arr, dim_x, dim_y, dim_z, dim_w], device=device)
@wp.kernel
def kernel_4d_transposed(a: wp.array(dtype=int, ndim=4), m: int, n: int, o: int, p: int):
i = wp.tid() // (n * o * p)
j = wp.tid() % (n * o * p) // (o * p)
k = wp.tid() % (o * p) / p
l = wp.tid() % p
wp.expect_eq(a[l, k, j, i], wp.tid())
wp.expect_eq(a[l][k][j][i], wp.tid())
def test_4d_transposed(test, device):
dim_x = 16
dim_y = 8
dim_z = 4
dim_w = 2
a = np.arange(0, dim_x * dim_y * dim_z * dim_w, dtype=np.int32)
a = a.reshape(dim_x, dim_y, dim_z, dim_w)
arr = wp.array(a, device=device)
# Transpose the array manually, as using the wp.array() constructor with arr.T would make it contiguous first
a_T = a.T
arr_T = wp.array(
dtype=arr.dtype,
shape=a_T.shape,
strides=a_T.__array_interface__["strides"],
capacity=arr.capacity,
ptr=arr.ptr,
requires_grad=arr.requires_grad,
device=device,
)
test.assertFalse(arr_T.is_contiguous)
test.assertEqual(arr_T.shape, a_T.shape)
test.assertEqual(arr_T.strides, a_T.__array_interface__["strides"])
test.assertEqual(arr_T.size, a_T.size)
test.assertEqual(arr_T.ndim, a_T.ndim)
with CheckOutput(test):
wp.launch(kernel_4d_transposed, dim=arr_T.size, inputs=[arr_T, dim_x, dim_y, dim_z, dim_w], device=device)
@wp.kernel
def lower_bound_kernel(values: wp.array(dtype=float), arr: wp.array(dtype=float), indices: wp.array(dtype=int)):
tid = wp.tid()
indices[tid] = wp.lower_bound(arr, values[tid])
def test_lower_bound(test, device):
arr = wp.array(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], dtype=float), dtype=float, device=device)
values = wp.array(np.array([-0.1, 0.0, 2.5, 4.0, 5.0, 5.5], dtype=float), dtype=float, device=device)
indices = wp.zeros(6, dtype=int, device=device)
wp.launch(kernel=lower_bound_kernel, dim=6, inputs=[values, arr, indices], device=device)
test.assertTrue((np.array([0, 0, 3, 4, 5, 5]) == indices.numpy()).all())
@wp.kernel
def f1(arr: wp.array(dtype=float)):
wp.expect_eq(arr.shape[0], 10)
@wp.kernel
def f2(arr: wp.array2d(dtype=float)):
wp.expect_eq(arr.shape[0], 10)
wp.expect_eq(arr.shape[1], 20)
slice = arr[0]
wp.expect_eq(slice.shape[0], 20)
@wp.kernel
def f3(arr: wp.array3d(dtype=float)):
wp.expect_eq(arr.shape[0], 10)
wp.expect_eq(arr.shape[1], 20)
wp.expect_eq(arr.shape[2], 30)
slice = arr[0, 0]
wp.expect_eq(slice.shape[0], 30)
@wp.kernel
def f4(arr: wp.array4d(dtype=float)):
wp.expect_eq(arr.shape[0], 10)
wp.expect_eq(arr.shape[1], 20)
wp.expect_eq(arr.shape[2], 30)
wp.expect_eq(arr.shape[3], 40)
slice = arr[0, 0, 0]
wp.expect_eq(slice.shape[0], 40)
def test_shape(test, device):
with CheckOutput(test):
a1 = wp.zeros(dtype=float, shape=10, device=device)
wp.launch(f1, dim=1, inputs=[a1], device=device)
a2 = wp.zeros(dtype=float, shape=(10, 20), device=device)
wp.launch(f2, dim=1, inputs=[a2], device=device)
a3 = wp.zeros(dtype=float, shape=(10, 20, 30), device=device)
wp.launch(f3, dim=1, inputs=[a3], device=device)
a4 = wp.zeros(dtype=float, shape=(10, 20, 30, 40), device=device)
wp.launch(f4, dim=1, inputs=[a4], device=device)
def test_negative_shape(test, device):
with test.assertRaisesRegex(ValueError, "Array shapes must be non-negative"):
_ = wp.zeros(shape=-1, dtype=int, device=device)
with test.assertRaisesRegex(ValueError, "Array shapes must be non-negative"):
_ = wp.zeros(shape=-(2**32), dtype=int, device=device)
with test.assertRaisesRegex(ValueError, "Array shapes must be non-negative"):
_ = wp.zeros(shape=(10, -1), dtype=int, device=device)
@wp.kernel
def sum_array(arr: wp.array(dtype=float), loss: wp.array(dtype=float)):
tid = wp.tid()
wp.atomic_add(loss, 0, arr[tid])
def test_flatten(test, device):
np_arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=float)
arr = wp.array(np_arr, dtype=float, shape=np_arr.shape, device=device, requires_grad=True)
arr_flat = arr.flatten()
arr_comp = wp.array(np_arr.flatten(), dtype=float, device=device)
assert_array_equal(arr_flat, arr_comp)
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=sum_array, dim=len(arr_flat), inputs=[arr_flat, loss], device=device)
tape.backward(loss=loss)
grad = tape.gradients[arr_flat]
ones = wp.array(
np.ones(
(8,),
dtype=float,
),
dtype=float,
device=device,
)
assert_array_equal(grad, ones)
test.assertEqual(loss.numpy()[0], 36)
def test_reshape(test, device):
np_arr = np.arange(6, dtype=float)
arr = wp.array(np_arr, dtype=float, device=device, requires_grad=True)
arr_reshaped = arr.reshape((3, 2))
arr_comp = wp.array(np_arr.reshape((3, 2)), dtype=float, device=device)
assert_array_equal(arr_reshaped, arr_comp)
arr_reshaped = arr_reshaped.reshape(6)
assert_array_equal(arr_reshaped, arr)
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=sum_array, dim=len(arr_reshaped), inputs=[arr_reshaped, loss], device=device)
tape.backward(loss=loss)
grad = tape.gradients[arr_reshaped]
ones = wp.array(
np.ones(
(6,),
dtype=float,
),
dtype=float,
device=device,
)
assert_array_equal(grad, ones)
test.assertEqual(loss.numpy()[0], 15)
np_arr = np.arange(6, dtype=float)
arr = wp.array(np_arr, dtype=float, device=device)
arr_infer = arr.reshape((-1, 3))
arr_comp = wp.array(np_arr.reshape((-1, 3)), dtype=float, device=device)
assert_array_equal(arr_infer, arr_comp)
@wp.kernel
def compare_stepped_window_a(x: wp.array2d(dtype=float)):
wp.expect_eq(x[0, 0], 1.0)
wp.expect_eq(x[0, 1], 2.0)
wp.expect_eq(x[1, 0], 9.0)
wp.expect_eq(x[1, 1], 10.0)
@wp.kernel
def compare_stepped_window_b(x: wp.array2d(dtype=float)):
wp.expect_eq(x[0, 0], 3.0)
wp.expect_eq(x[0, 1], 4.0)
wp.expect_eq(x[1, 0], 7.0)
wp.expect_eq(x[1, 1], 8.0)
wp.expect_eq(x[2, 0], 11.0)
wp.expect_eq(x[2, 1], 12.0)
def test_slicing(test, device):
np_arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=float)
arr = wp.array(np_arr, dtype=float, shape=np_arr.shape, device=device, requires_grad=True)
slice_a = arr[1, :, :] # test indexing
slice_b = arr[1:2, :, :] # test slicing
slice_c = arr[-1, :, :] # test negative indexing
slice_d = arr[-2:-1, :, :] # test negative slicing
slice_e = arr[-1:3, :, :] # test mixed slicing
slice_e2 = slice_e[0, 0, :] # test 2x slicing
slice_f = arr[0:3:2, 0, :] # test step
assert_array_equal(slice_a, wp.array(np_arr[1, :, :], dtype=float, device=device))
assert_array_equal(slice_b, wp.array(np_arr[1:2, :, :], dtype=float, device=device))
assert_array_equal(slice_c, wp.array(np_arr[-1, :, :], dtype=float, device=device))
assert_array_equal(slice_d, wp.array(np_arr[-2:-1, :, :], dtype=float, device=device))
assert_array_equal(slice_e, wp.array(np_arr[-1:3, :, :], dtype=float, device=device))
assert_array_equal(slice_e2, wp.array(np_arr[2, 0, :], dtype=float, device=device))
# wp does not support copying from/to non-contiguous arrays
# stepped windows must read on the device the original array was created on
wp.launch(kernel=compare_stepped_window_a, dim=1, inputs=[slice_f], device=device)
slice_flat = slice_b.flatten()
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=sum_array, dim=len(slice_flat), inputs=[slice_flat, loss], device=device)
tape.backward(loss=loss)
grad = tape.gradients[slice_flat]
ones = wp.array(
np.ones(
(4,),
dtype=float,
),
dtype=float,
device=device,
)
assert_array_equal(grad, ones)
test.assertEqual(loss.numpy()[0], 26)
index_a = arr[1]
index_b = arr[2, 1]
index_c = arr[1, :]
index_d = arr[:, 1]
assert_array_equal(index_a, wp.array(np_arr[1], dtype=float, device=device))
assert_array_equal(index_b, wp.array(np_arr[2, 1], dtype=float, device=device))
assert_array_equal(index_c, wp.array(np_arr[1, :], dtype=float, device=device))
wp.launch(kernel=compare_stepped_window_b, dim=1, inputs=[index_d], device=device)
np_arr = np.zeros(10, dtype=int)
wp_arr = wp.array(np_arr, dtype=int, device=device)
assert_array_equal(wp_arr[:5], wp.array(np_arr[:5], dtype=int, device=device))
assert_array_equal(wp_arr[1:5], wp.array(np_arr[1:5], dtype=int, device=device))
assert_array_equal(wp_arr[-9:-5:1], wp.array(np_arr[-9:-5:1], dtype=int, device=device))
assert_array_equal(wp_arr[:5,], wp.array(np_arr[:5], dtype=int, device=device)) # noqa: E231
def test_view(test, device):
np_arr_a = np.arange(1, 10, 1, dtype=np.uint32)
np_arr_b = np.arange(1, 10, 1, dtype=np.float32)
np_arr_c = np.arange(1, 10, 1, dtype=np.uint16)
np_arr_d = np.arange(1, 10, 1, dtype=np.float16)
np_arr_e = np.ones((4, 4), dtype=np.float32)
wp_arr_a = wp.array(np_arr_a, dtype=wp.uint32, device=device)
wp_arr_b = wp.array(np_arr_b, dtype=wp.float32, device=device)
wp_arr_c = wp.array(np_arr_a, dtype=wp.uint16, device=device)
wp_arr_d = wp.array(np_arr_b, dtype=wp.float16, device=device)
wp_arr_e = wp.array(np_arr_e, dtype=wp.vec4, device=device)
wp_arr_f = wp.array(np_arr_e, dtype=wp.quat, device=device)
assert_np_equal(wp_arr_a.view(dtype=wp.float32).numpy(), np_arr_a.view(dtype=np.float32))
assert_np_equal(wp_arr_b.view(dtype=wp.uint32).numpy(), np_arr_b.view(dtype=np.uint32))
assert_np_equal(wp_arr_c.view(dtype=wp.float16).numpy(), np_arr_c.view(dtype=np.float16))
assert_np_equal(wp_arr_d.view(dtype=wp.uint16).numpy(), np_arr_d.view(dtype=np.uint16))
assert_array_equal(wp_arr_e.view(dtype=wp.quat), wp_arr_f)
def test_clone_adjoint(test, device):
state_in = wp.from_numpy(
np.array([1.0, 2.0, 3.0]).astype(np.float32), dtype=wp.float32, requires_grad=True, device=device
)
tape = wp.Tape()
with tape:
state_out = wp.clone(state_in)
grads = {state_out: wp.from_numpy(np.array([1.0, 1.0, 1.0]).astype(np.float32), dtype=wp.float32, device=device)}
tape.backward(grads=grads)
assert_np_equal(state_in.grad.numpy(), np.array([1.0, 1.0, 1.0]).astype(np.float32))
def test_assign_adjoint(test, device):
state_in = wp.from_numpy(
np.array([1.0, 2.0, 3.0]).astype(np.float32), dtype=wp.float32, requires_grad=True, device=device
)
state_out = wp.zeros(state_in.shape, dtype=wp.float32, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
state_out.assign(state_in)
grads = {state_out: wp.from_numpy(np.array([1.0, 1.0, 1.0]).astype(np.float32), dtype=wp.float32, device=device)}
tape.backward(grads=grads)
assert_np_equal(state_in.grad.numpy(), np.array([1.0, 1.0, 1.0]).astype(np.float32))
@wp.kernel
def compare_2darrays(x: wp.array2d(dtype=float), y: wp.array2d(dtype=float), z: wp.array2d(dtype=int)):
i, j = wp.tid()
if x[i, j] == y[i, j]:
z[i, j] = 1
@wp.kernel
def compare_3darrays(x: wp.array3d(dtype=float), y: wp.array3d(dtype=float), z: wp.array3d(dtype=int)):
i, j, k = wp.tid()
if x[i, j, k] == y[i, j, k]:
z[i, j, k] = 1
def test_transpose(test, device):
# test default transpose in non-square 2d case
# wp does not support copying from/to non-contiguous arrays so check in kernel
np_arr = np.array([[1, 2], [3, 4], [5, 6]], dtype=float)
arr = wp.array(np_arr, dtype=float, device=device)
arr_transpose = arr.transpose()
arr_compare = wp.array(np_arr.transpose(), dtype=float, device=device)
check = wp.zeros(shape=(2, 3), dtype=int, device=device)
wp.launch(compare_2darrays, dim=(2, 3), inputs=[arr_transpose, arr_compare, check], device=device)
assert_np_equal(check.numpy(), np.ones((2, 3), dtype=int))
# test transpose in square 3d case
# wp does not support copying from/to non-contiguous arrays so check in kernel
np_arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=float)
arr = wp.array3d(np_arr, dtype=float, shape=np_arr.shape, device=device, requires_grad=True)
arr_transpose = arr.transpose((0, 2, 1))
arr_compare = wp.array3d(np_arr.transpose((0, 2, 1)), dtype=float, device=device)
check = wp.zeros(shape=(3, 2, 2), dtype=int, device=device)
wp.launch(compare_3darrays, dim=(3, 2, 2), inputs=[arr_transpose, arr_compare, check], device=device)
assert_np_equal(check.numpy(), np.ones((3, 2, 2), dtype=int))
# test transpose in square 3d case without axes supplied
arr_transpose = arr.transpose()
arr_compare = wp.array3d(np_arr.transpose(), dtype=float, device=device)
check = wp.zeros(shape=(2, 2, 3), dtype=int, device=device)
wp.launch(compare_3darrays, dim=(2, 2, 3), inputs=[arr_transpose, arr_compare, check], device=device)
assert_np_equal(check.numpy(), np.ones((2, 2, 3), dtype=int))
# test transpose in 1d case (should be noop)
np_arr = np.array([1, 2, 3], dtype=float)
arr = wp.array(np_arr, dtype=float, device=device)
assert_np_equal(arr.transpose().numpy(), np_arr.transpose())
def test_fill_scalar(test, device):
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
a1 = wp.zeros(dim_x, dtype=wptype, device=device)
a2 = wp.zeros((dim_x, dim_x), dtype=wptype, device=device)
a3 = wp.zeros((dim_x, dim_x, dim_x), dtype=wptype, device=device)
a4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=wptype, device=device)
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
# fill with int value
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value, dtype=nptype))
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value
fill_value = 13.37
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value, dtype=nptype))
# fill with Warp scalar value
fill_value = wptype(17)
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value.value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value.value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value.value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value.value, dtype=nptype))
def test_fill_vector(test, device):
# test filling a vector array with scalar or vector values (vec_type, list, or numpy array)
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# vector types
vector_types = [
wp.types.vector(2, wptype),
wp.types.vector(3, wptype),
wp.types.vector(4, wptype),
wp.types.vector(5, wptype),
]
for vec_type in vector_types:
vec_len = vec_type._length_
a1 = wp.zeros(dim_x, dtype=vec_type, device=device)
a2 = wp.zeros((dim_x, dim_x), dtype=vec_type, device=device)
a3 = wp.zeros((dim_x, dim_x, dim_x), dtype=vec_type, device=device)
a4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=vec_type, device=device)
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, vec_len), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, vec_len), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, vec_len), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, vec_len), dtype=nptype))
# fill with int scalar
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, vec_len), fill_value, dtype=nptype))
# test zeroing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, vec_len), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, vec_len), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, vec_len), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, vec_len), dtype=nptype))
# vector values can be passed as a list, numpy array, or Warp vector instance
fill_list = [17, 42, 99, 101, 127][:vec_len]
fill_arr = np.array(fill_list, dtype=nptype)
fill_vec = vec_type(fill_list)
expected1 = np.tile(fill_arr, a1.size).reshape((*a1.shape, vec_len))
expected2 = np.tile(fill_arr, a2.size).reshape((*a2.shape, vec_len))
expected3 = np.tile(fill_arr, a3.size).reshape((*a3.shape, vec_len))
expected4 = np.tile(fill_arr, a4.size).reshape((*a4.shape, vec_len))
# fill with list of vector length
a1.fill_(fill_list)
a2.fill_(fill_list)
a3.fill_(fill_list)
a4.fill_(fill_list)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with numpy array of vector length
a1.fill_(fill_arr)
a2.fill_(fill_arr)
a3.fill_(fill_arr)
a4.fill_(fill_arr)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with vec instance
a1.fill_(fill_vec)
a2.fill_(fill_vec)
a3.fill_(fill_vec)
a4.fill_(fill_vec)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
if wptype in wp.types.float_types:
# fill with float scalar
fill_value = 13.37
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, vec_len), fill_value, dtype=nptype))
# fill with float list of vector length
fill_list = [-2.5, -1.25, 1.25, 2.5, 5.0][:vec_len]
a1.fill_(fill_list)
a2.fill_(fill_list)
a3.fill_(fill_list)
a4.fill_(fill_list)
expected1 = np.tile(np.array(fill_list, dtype=nptype), a1.size).reshape((*a1.shape, vec_len))
expected2 = np.tile(np.array(fill_list, dtype=nptype), a2.size).reshape((*a2.shape, vec_len))
expected3 = np.tile(np.array(fill_list, dtype=nptype), a3.size).reshape((*a3.shape, vec_len))
expected4 = np.tile(np.array(fill_list, dtype=nptype), a4.size).reshape((*a4.shape, vec_len))
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
def test_fill_matrix(test, device):
# test filling a matrix array with scalar or matrix values (mat_type, nested list, or 2d numpy array)
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# matrix types
matrix_types = [
# square matrices
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
# non-square matrices
wp.types.matrix((2, 3), wptype),
wp.types.matrix((3, 2), wptype),
wp.types.matrix((3, 4), wptype),
wp.types.matrix((4, 3), wptype),
]
for mat_type in matrix_types:
mat_len = mat_type._length_
mat_shape = mat_type._shape_
a1 = wp.zeros(dim_x, dtype=mat_type, device=device)
a2 = wp.zeros((dim_x, dim_x), dtype=mat_type, device=device)
a3 = wp.zeros((dim_x, dim_x, dim_x), dtype=mat_type, device=device)
a4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=mat_type, device=device)
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, *mat_shape), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, *mat_shape), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, *mat_shape), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, *mat_shape), dtype=nptype))
# fill with scalar
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, *mat_shape), fill_value, dtype=nptype))
# test zeroing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, *mat_shape), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, *mat_shape), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, *mat_shape), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, *mat_shape), dtype=nptype))
# matrix values can be passed as a 1d numpy array, 2d numpy array, flat list, nested list, or Warp matrix instance
if wptype != wp.bool:
fill_arr1 = np.arange(mat_len, dtype=nptype)
else:
fill_arr1 = np.ones(mat_len, dtype=nptype)
fill_arr2 = fill_arr1.reshape(mat_shape)
fill_list1 = list(fill_arr1)
fill_list2 = [list(row) for row in fill_arr2]
fill_mat = mat_type(fill_arr1)
expected1 = np.tile(fill_arr1, a1.size).reshape((*a1.shape, *mat_shape))
expected2 = np.tile(fill_arr1, a2.size).reshape((*a2.shape, *mat_shape))
expected3 = np.tile(fill_arr1, a3.size).reshape((*a3.shape, *mat_shape))
expected4 = np.tile(fill_arr1, a4.size).reshape((*a4.shape, *mat_shape))
# fill with 1d numpy array
a1.fill_(fill_arr1)
a2.fill_(fill_arr1)
a3.fill_(fill_arr1)
a4.fill_(fill_arr1)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with 2d numpy array
a1.fill_(fill_arr2)
a2.fill_(fill_arr2)
a3.fill_(fill_arr2)
a4.fill_(fill_arr2)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with flat list
a1.fill_(fill_list1)
a2.fill_(fill_list1)
a3.fill_(fill_list1)
a4.fill_(fill_list1)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with nested list
a1.fill_(fill_list2)
a2.fill_(fill_list2)
a3.fill_(fill_list2)
a4.fill_(fill_list2)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with mat instance
a1.fill_(fill_mat)
a2.fill_(fill_mat)
a3.fill_(fill_mat)
a4.fill_(fill_mat)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
@wp.struct
class FillStruct:
# scalar members (make sure to test float16)
i1: wp.int8
i2: wp.int16
i4: wp.int32
i8: wp.int64
f2: wp.float16
f4: wp.float32
f8: wp.float16
# vector members (make sure to test vectors of float16)
v2: wp.types.vector(2, wp.int64)
v3: wp.types.vector(3, wp.float32)
v4: wp.types.vector(4, wp.float16)
v5: wp.types.vector(5, wp.uint8)
# matrix members (make sure to test matrices of float16)
m2: wp.types.matrix((2, 2), wp.float64)
m3: wp.types.matrix((3, 3), wp.int32)
m4: wp.types.matrix((4, 4), wp.float16)
m5: wp.types.matrix((5, 5), wp.int8)
# arrays
a1: wp.array(dtype=float)
a2: wp.array2d(dtype=float)
a3: wp.array3d(dtype=float)
a4: wp.array4d(dtype=float)
def test_fill_struct(test, device):
dim_x = 4
nptype = FillStruct.numpy_dtype()
a1 = wp.zeros(dim_x, dtype=FillStruct, device=device)
a2 = wp.zeros((dim_x, dim_x), dtype=FillStruct, device=device)
a3 = wp.zeros((dim_x, dim_x, dim_x), dtype=FillStruct, device=device)
a4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=FillStruct, device=device)
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
s = FillStruct()
# fill with default struct value (should be all zeros)
a1.fill_(s)
a2.fill_(s)
a3.fill_(s)
a4.fill_(s)
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
# scalars
s.i1 = -17
s.i2 = 42
s.i4 = 99
s.i8 = 101
s.f2 = -1.25
s.f4 = 13.37
s.f8 = 0.125
# vectors
s.v2 = [21, 22]
s.v3 = [31, 32, 33]
s.v4 = [41, 42, 43, 44]
s.v5 = [51, 52, 53, 54, 55]
# matrices
s.m2 = [[61, 62]] * 2
s.m3 = [[71, 72, 73]] * 3
s.m4 = [[81, 82, 83, 84]] * 4
s.m5 = [[91, 92, 93, 94, 95]] * 5
# arrays
s.a1 = wp.zeros((2,) * 1, dtype=float, device=device)
s.a2 = wp.zeros((2,) * 2, dtype=float, device=device)
s.a3 = wp.zeros((2,) * 3, dtype=float, device=device)
s.a4 = wp.zeros((2,) * 4, dtype=float, device=device)
# fill with custom struct value
a1.fill_(s)
a2.fill_(s)
a3.fill_(s)
a4.fill_(s)
ns = s.numpy_value()
expected1 = np.empty(a1.shape, dtype=nptype)
expected2 = np.empty(a2.shape, dtype=nptype)
expected3 = np.empty(a3.shape, dtype=nptype)
expected4 = np.empty(a4.shape, dtype=nptype)
expected1.fill(ns)
expected2.fill(ns)
expected3.fill(ns)
expected4.fill(ns)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# test clearing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
def test_fill_slices(test, device):
# test fill_ and zero_ for non-contiguous arrays
# Note: we don't need to test the whole range of dtypes (vectors, matrices, structs) here
dim_x = 8
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
a1 = wp.zeros(dim_x, dtype=wptype, device=device)
a2 = wp.zeros((dim_x, dim_x), dtype=wptype, device=device)
a3 = wp.zeros((dim_x, dim_x, dim_x), dtype=wptype, device=device)
a4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=wptype, device=device)
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
# partititon each array into even and odd slices
a1a = a1[::2]
a1b = a1[1::2]
a2a = a2[::2]
a2b = a2[1::2]
a3a = a3[::2]
a3b = a3[1::2]
a4a = a4[::2]
a4b = a4[1::2]
# fill even slices
fill_a = 17
a1a.fill_(fill_a)
a2a.fill_(fill_a)
a3a.fill_(fill_a)
a4a.fill_(fill_a)
# ensure filled slices are correct
assert_np_equal(a1a.numpy(), np.full(a1a.shape, fill_a, dtype=nptype))
assert_np_equal(a2a.numpy(), np.full(a2a.shape, fill_a, dtype=nptype))
assert_np_equal(a3a.numpy(), np.full(a3a.shape, fill_a, dtype=nptype))
assert_np_equal(a4a.numpy(), np.full(a4a.shape, fill_a, dtype=nptype))
# ensure unfilled slices are unaffected
assert_np_equal(a1b.numpy(), np.zeros(a1b.shape, dtype=nptype))
assert_np_equal(a2b.numpy(), np.zeros(a2b.shape, dtype=nptype))
assert_np_equal(a3b.numpy(), np.zeros(a3b.shape, dtype=nptype))
assert_np_equal(a4b.numpy(), np.zeros(a4b.shape, dtype=nptype))
# fill odd slices
fill_b = 42
a1b.fill_(fill_b)
a2b.fill_(fill_b)
a3b.fill_(fill_b)
a4b.fill_(fill_b)
# ensure filled slices are correct
assert_np_equal(a1b.numpy(), np.full(a1b.shape, fill_b, dtype=nptype))
assert_np_equal(a2b.numpy(), np.full(a2b.shape, fill_b, dtype=nptype))
assert_np_equal(a3b.numpy(), np.full(a3b.shape, fill_b, dtype=nptype))
assert_np_equal(a4b.numpy(), np.full(a4b.shape, fill_b, dtype=nptype))
# ensure unfilled slices are unaffected
assert_np_equal(a1a.numpy(), np.full(a1a.shape, fill_a, dtype=nptype))
assert_np_equal(a2a.numpy(), np.full(a2a.shape, fill_a, dtype=nptype))
assert_np_equal(a3a.numpy(), np.full(a3a.shape, fill_a, dtype=nptype))
assert_np_equal(a4a.numpy(), np.full(a4a.shape, fill_a, dtype=nptype))
# clear even slices
a1a.zero_()
a2a.zero_()
a3a.zero_()
a4a.zero_()
# ensure cleared slices are correct
assert_np_equal(a1a.numpy(), np.zeros(a1a.shape, dtype=nptype))
assert_np_equal(a2a.numpy(), np.zeros(a2a.shape, dtype=nptype))
assert_np_equal(a3a.numpy(), np.zeros(a3a.shape, dtype=nptype))
assert_np_equal(a4a.numpy(), np.zeros(a4a.shape, dtype=nptype))
# ensure uncleared slices are unaffected
assert_np_equal(a1b.numpy(), np.full(a1b.shape, fill_b, dtype=nptype))
assert_np_equal(a2b.numpy(), np.full(a2b.shape, fill_b, dtype=nptype))
assert_np_equal(a3b.numpy(), np.full(a3b.shape, fill_b, dtype=nptype))
assert_np_equal(a4b.numpy(), np.full(a4b.shape, fill_b, dtype=nptype))
# re-fill even slices
a1a.fill_(fill_a)
a2a.fill_(fill_a)
a3a.fill_(fill_a)
a4a.fill_(fill_a)
# clear odd slices
a1b.zero_()
a2b.zero_()
a3b.zero_()
a4b.zero_()
# ensure cleared slices are correct
assert_np_equal(a1b.numpy(), np.zeros(a1b.shape, dtype=nptype))
assert_np_equal(a2b.numpy(), np.zeros(a2b.shape, dtype=nptype))
assert_np_equal(a3b.numpy(), np.zeros(a3b.shape, dtype=nptype))
assert_np_equal(a4b.numpy(), np.zeros(a4b.shape, dtype=nptype))
# ensure uncleared slices are unaffected
assert_np_equal(a1a.numpy(), np.full(a1a.shape, fill_a, dtype=nptype))
assert_np_equal(a2a.numpy(), np.full(a2a.shape, fill_a, dtype=nptype))
assert_np_equal(a3a.numpy(), np.full(a3a.shape, fill_a, dtype=nptype))
assert_np_equal(a4a.numpy(), np.full(a4a.shape, fill_a, dtype=nptype))
def test_full_scalar(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# fill with int value and specific dtype
fill_value = 42
a = wp.full(shape, fill_value, dtype=wptype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, wptype)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(shape, fill_value, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value and specific dtype
fill_value = 13.37
a = wp.full(shape, fill_value, dtype=wptype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, wptype)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(shape, fill_value, dtype=nptype))
# fill with int value and automatically inferred dtype
fill_value = 42
a = wp.full(shape, fill_value, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, wp.int32)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, np.int32)
assert_np_equal(na, np.full(shape, fill_value, dtype=np.int32))
# fill with float value and automatically inferred dtype
fill_value = 13.37
a = wp.full(shape, fill_value, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, wp.float32)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, np.float32)
assert_np_equal(na, np.full(shape, fill_value, dtype=np.float32))
def test_full_vector(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
# full from scalar
for veclen in [2, 3, 4, 5]:
npshape = (*shape, veclen)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
vectype = wp.types.vector(veclen, wptype)
# fill with scalar int value and specific dtype
fill_value = 42
a = wp.full(shape, fill_value, dtype=vectype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * veclen, fill_value, dtype=nptype).reshape(npshape))
if wptype in wp.types.float_types:
# fill with scalar float value and specific dtype
fill_value = 13.37
a = wp.full(shape, fill_value, dtype=vectype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * veclen, fill_value, dtype=nptype).reshape(npshape))
# fill with vector value and specific dtype
fill_vec = vectype(42)
a = wp.full(shape, fill_vec, dtype=vectype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * veclen, 42, dtype=nptype).reshape(npshape))
# fill with vector value and automatically inferred dtype
a = wp.full(shape, fill_vec, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * veclen, 42, dtype=nptype).reshape(npshape))
fill_lists = [
[17, 42],
[17, 42, 99],
[17, 42, 99, 101],
[17, 42, 99, 101, 127],
]
# full from list and numpy array
for fill_list in fill_lists:
veclen = len(fill_list)
npshape = (*shape, veclen)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
vectype = wp.types.vector(veclen, wptype)
# fill with list and specific dtype
a = wp.full(shape, fill_list, dtype=vectype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
expected = np.tile(np.array(fill_list, dtype=nptype), a.size).reshape(npshape)
assert_np_equal(na, expected)
fill_arr = np.array(fill_list, dtype=nptype)
# fill with numpy array and specific dtype
a = wp.full(shape, fill_arr, dtype=vectype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, expected)
# fill with numpy array and automatically infer dtype
a = wp.full(shape, fill_arr, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertTrue(wp.types.types_equal(a.dtype, vectype))
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, expected)
# fill with list and automatically infer dtype
a = wp.full(shape, fill_list, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
# check that the inferred dtype is a vector
# Note that we cannot guarantee the scalar type, because it depends on numpy and may vary by platform
# (e.g. int64 on Linux and int32 on Windows).
test.assertEqual(a.dtype._wp_generic_type_str_, "vec_t")
test.assertEqual(a.dtype._length_, veclen)
expected = np.tile(np.array(fill_list), a.size).reshape(npshape)
assert_np_equal(na, expected)
def test_full_matrix(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
matrix_types = [
# square matrices
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
# non-square matrices
wp.types.matrix((2, 3), wptype),
wp.types.matrix((3, 2), wptype),
wp.types.matrix((3, 4), wptype),
wp.types.matrix((4, 3), wptype),
]
for mattype in matrix_types:
npshape = (*shape, *mattype._shape_)
# fill with scalar int value and specific dtype
fill_value = 42
a = wp.full(shape, fill_value, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * mattype._length_, fill_value, dtype=nptype).reshape(npshape))
if wptype in wp.types.float_types:
# fill with scalar float value and specific dtype
fill_value = 13.37
a = wp.full(shape, fill_value, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * mattype._length_, fill_value, dtype=nptype).reshape(npshape))
# fill with matrix value and specific dtype
fill_mat = mattype(42)
a = wp.full(shape, fill_mat, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * mattype._length_, 42, dtype=nptype).reshape(npshape))
# fill with matrix value and automatically inferred dtype
fill_mat = mattype(42)
a = wp.full(shape, fill_mat, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.full(a.size * mattype._length_, 42, dtype=nptype).reshape(npshape))
# fill with 1d numpy array and specific dtype
if wptype != wp.bool:
fill_arr1d = np.arange(mattype._length_, dtype=nptype)
else:
fill_arr1d = np.ones(mattype._length_, dtype=nptype)
a = wp.full(shape, fill_arr1d, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
expected = np.tile(fill_arr1d, a.size).reshape(npshape)
assert_np_equal(na, expected)
# fill with 2d numpy array and specific dtype
fill_arr2d = fill_arr1d.reshape(mattype._shape_)
a = wp.full(shape, fill_arr2d, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, expected)
# fill with 2d numpy array and automatically infer dtype
a = wp.full(shape, fill_arr2d, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertTrue(wp.types.types_equal(a.dtype, mattype))
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, expected)
# fill with flat list and specific dtype
fill_list1d = list(fill_arr1d)
a = wp.full(shape, fill_list1d, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, expected)
# fill with nested list and specific dtype
fill_list2d = [list(row) for row in fill_arr2d]
a = wp.full(shape, fill_list2d, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, expected)
mat_lists = [
# square matrices
[[1, 2], [3, 4]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
# non-square matrices
[[1, 2, 3, 4], [5, 6, 7, 8]],
[[1, 2], [3, 4], [5, 6], [7, 8]],
]
# fill with nested lists and automatically infer dtype
for fill_list in mat_lists:
num_rows = len(fill_list)
num_cols = len(fill_list[0])
npshape = (*shape, num_rows, num_cols)
a = wp.full(shape, fill_list, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
# check that the inferred dtype is a correctly shaped matrix
# Note that we cannot guarantee the scalar type, because it depends on numpy and may vary by platform
# (e.g. int64 on Linux and int32 on Windows).
test.assertEqual(a.dtype._wp_generic_type_str_, "mat_t")
test.assertEqual(a.dtype._shape_, (num_rows, num_cols))
expected = np.tile(np.array(fill_list).flatten(), a.size).reshape(npshape)
assert_np_equal(na, expected)
def test_full_struct(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
s = FillStruct()
# fill with default struct (should be zeros)
a = wp.full(shape, s, dtype=FillStruct, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, FillStruct)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, FillStruct.numpy_dtype())
assert_np_equal(na, np.zeros(a.shape, dtype=FillStruct.numpy_dtype()))
# scalars
s.i1 = -17
s.i2 = 42
s.i4 = 99
s.i8 = 101
s.f2 = -1.25
s.f4 = 13.37
s.f8 = 0.125
# vectors
s.v2 = [21, 22]
s.v3 = [31, 32, 33]
s.v4 = [41, 42, 43, 44]
s.v5 = [51, 52, 53, 54, 55]
# matrices
s.m2 = [[61, 62]] * 2
s.m3 = [[71, 72, 73]] * 3
s.m4 = [[81, 82, 83, 84]] * 4
s.m5 = [[91, 92, 93, 94, 95]] * 5
# arrays
s.a1 = wp.zeros((2,) * 1, dtype=float, device=device)
s.a2 = wp.zeros((2,) * 2, dtype=float, device=device)
s.a3 = wp.zeros((2,) * 3, dtype=float, device=device)
s.a4 = wp.zeros((2,) * 4, dtype=float, device=device)
# fill with initialized struct and explicit dtype
a = wp.full(shape, s, dtype=FillStruct, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, FillStruct)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, FillStruct.numpy_dtype())
expected = np.empty(shape, dtype=FillStruct.numpy_dtype())
expected.fill(s.numpy_value())
assert_np_equal(na, expected)
# fill with initialized struct and automatically inferred dtype
a = wp.full(shape, s, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, FillStruct)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, FillStruct.numpy_dtype())
assert_np_equal(na, expected)
def test_ones_scalar(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
a = wp.ones(shape, dtype=wptype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, wptype)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.ones(shape, dtype=nptype))
def test_ones_vector(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for veclen in [2, 3, 4, 5]:
npshape = (*shape, veclen)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
vectype = wp.types.vector(veclen, wptype)
a = wp.ones(shape, dtype=vectype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.ones(npshape, dtype=nptype))
def test_ones_matrix(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
matrix_types = [
# square matrices
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
# non-square matrices
wp.types.matrix((2, 3), wptype),
wp.types.matrix((3, 2), wptype),
wp.types.matrix((3, 4), wptype),
wp.types.matrix((4, 3), wptype),
]
for mattype in matrix_types:
npshape = (*shape, *mattype._shape_)
a = wp.ones(shape, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.ones(npshape, dtype=nptype))
def test_ones_like_scalar(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# source array
a = wp.zeros(shape, dtype=wptype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, wptype)
test.assertEqual(na.shape, shape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.zeros(shape, dtype=nptype))
# ones array
b = wp.ones_like(a)
nb = b.numpy()
test.assertEqual(b.shape, shape)
test.assertEqual(b.dtype, wptype)
test.assertEqual(nb.shape, shape)
test.assertEqual(nb.dtype, nptype)
assert_np_equal(nb, np.ones(shape, dtype=nptype))
def test_ones_like_vector(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for veclen in [2, 3, 4, 5]:
npshape = (*shape, veclen)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
vectype = wp.types.vector(veclen, wptype)
# source array
a = wp.zeros(shape, dtype=vectype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, vectype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.zeros(npshape, dtype=nptype))
# ones array
b = wp.ones_like(a)
nb = b.numpy()
test.assertEqual(b.shape, shape)
test.assertEqual(b.dtype, vectype)
test.assertEqual(nb.shape, npshape)
test.assertEqual(nb.dtype, nptype)
assert_np_equal(nb, np.ones(npshape, dtype=nptype))
def test_ones_like_matrix(test, device):
dim = 4
for ndim in range(1, 5):
shape = (dim,) * ndim
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
matrix_types = [
# square matrices
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
# non-square matrices
wp.types.matrix((2, 3), wptype),
wp.types.matrix((3, 2), wptype),
wp.types.matrix((3, 4), wptype),
wp.types.matrix((4, 3), wptype),
]
for mattype in matrix_types:
npshape = (*shape, *mattype._shape_)
# source array
a = wp.zeros(shape, dtype=mattype, device=device)
na = a.numpy()
test.assertEqual(a.shape, shape)
test.assertEqual(a.dtype, mattype)
test.assertEqual(na.shape, npshape)
test.assertEqual(na.dtype, nptype)
assert_np_equal(na, np.zeros(npshape, dtype=nptype))
# ones array
b = wp.ones_like(a)
nb = b.numpy()
test.assertEqual(b.shape, shape)
test.assertEqual(b.dtype, mattype)
test.assertEqual(nb.shape, npshape)
test.assertEqual(nb.dtype, nptype)
assert_np_equal(nb, np.ones(npshape, dtype=nptype))
def test_round_trip(test, device):
rng = np.random.default_rng(123)
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
a_np = rng.standard_normal(size=dim_x).astype(nptype)
a = wp.array(a_np, device=device)
test.assertEqual(a.dtype, wptype)
assert_np_equal(a.numpy(), a_np)
v_np = rng.standard_normal(size=(dim_x, 3)).astype(nptype)
v = wp.array(v_np, dtype=wp.types.vector(3, wptype), device=device)
assert_np_equal(v.numpy(), v_np)
def test_empty_array(test, device):
# Test whether common operations work with empty (zero-sized) arrays
# without throwing exceptions.
def test_empty_ops(ndim, nrows, ncols, wptype, nptype):
shape = (0,) * ndim
dtype_shape = ()
if wptype in wp.types.scalar_types:
# scalar, vector, or matrix
if ncols > 0:
if nrows > 0:
wptype = wp.types.matrix((nrows, ncols), wptype)
else:
wptype = wp.types.vector(ncols, wptype)
dtype_shape = wptype._shape_
fill_value = wptype(42)
else:
# struct
fill_value = wptype()
# create a zero-sized array
a = wp.empty(shape, dtype=wptype, device=device, requires_grad=True)
test.assertEqual(a.ptr, None)
test.assertEqual(a.size, 0)
test.assertEqual(a.shape, shape)
test.assertEqual(a.grad.ptr, None)
test.assertEqual(a.grad.size, 0)
test.assertEqual(a.grad.shape, shape)
# all of these methods should succeed with zero-sized arrays
a.zero_()
a.fill_(fill_value)
b = a.flatten()
b = a.reshape((0,))
b = a.transpose()
b = a.contiguous()
b = wp.empty_like(a)
b = wp.zeros_like(a)
b = wp.full_like(a, fill_value)
b = wp.clone(a)
wp.copy(a, b)
a.assign(b)
na = a.numpy()
test.assertEqual(na.size, 0)
test.assertEqual(na.shape, (*shape, *dtype_shape))
test.assertEqual(na.dtype, nptype)
test.assertEqual(a.list(), [])
for ndim in range(1, 5):
# test with scalars, vectors, and matrices
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# scalars
test_empty_ops(ndim, 0, 0, wptype, nptype)
for ncols in [2, 3, 4, 5]:
# vectors
test_empty_ops(ndim, 0, ncols, wptype, nptype)
# square matrices
test_empty_ops(ndim, ncols, ncols, wptype, nptype)
# non-square matrices
test_empty_ops(ndim, 2, 3, wptype, nptype)
test_empty_ops(ndim, 3, 2, wptype, nptype)
test_empty_ops(ndim, 3, 4, wptype, nptype)
test_empty_ops(ndim, 4, 3, wptype, nptype)
# test with structs
test_empty_ops(ndim, 0, 0, FillStruct, FillStruct.numpy_dtype())
def test_empty_from_numpy(test, device):
# Test whether wrapping an empty (zero-sized) numpy array works correctly
def test_empty_from_data(ndim, nrows, ncols, wptype, nptype):
shape = (0,) * ndim
dtype_shape = ()
if ncols > 0:
if nrows > 0:
wptype = wp.types.matrix((nrows, ncols), wptype)
else:
wptype = wp.types.vector(ncols, wptype)
dtype_shape = wptype._shape_
npshape = (*shape, *dtype_shape)
na = np.empty(npshape, dtype=nptype)
a = wp.array(na, dtype=wptype, device=device)
test.assertEqual(a.size, 0)
test.assertEqual(a.shape, shape)
for ndim in range(1, 5):
# test with scalars, vectors, and matrices
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# scalars
test_empty_from_data(ndim, 0, 0, wptype, nptype)
for ncols in [2, 3, 4, 5]:
# vectors
test_empty_from_data(ndim, 0, ncols, wptype, nptype)
# square matrices
test_empty_from_data(ndim, ncols, ncols, wptype, nptype)
# non-square matrices
test_empty_from_data(ndim, 2, 3, wptype, nptype)
test_empty_from_data(ndim, 3, 2, wptype, nptype)
test_empty_from_data(ndim, 3, 4, wptype, nptype)
test_empty_from_data(ndim, 4, 3, wptype, nptype)
def test_empty_from_list(test, device):
# Test whether creating an array from an empty Python list works correctly
def test_empty_from_data(nrows, ncols, wptype):
if ncols > 0:
if nrows > 0:
wptype = wp.types.matrix((nrows, ncols), wptype)
else:
wptype = wp.types.vector(ncols, wptype)
a = wp.array([], dtype=wptype, device=device)
test.assertEqual(a.size, 0)
test.assertEqual(a.shape, (0,))
# test with scalars, vectors, and matrices
for wptype in wp.types.scalar_types:
# scalars
test_empty_from_data(0, 0, wptype)
for ncols in [2, 3, 4, 5]:
# vectors
test_empty_from_data(0, ncols, wptype)
# square matrices
test_empty_from_data(ncols, ncols, wptype)
# non-square matrices
test_empty_from_data(2, 3, wptype)
test_empty_from_data(3, 2, wptype)
test_empty_from_data(3, 4, wptype)
test_empty_from_data(4, 3, wptype)
def test_to_list_scalar(test, device):
dim = 3
fill_value = 42
for ndim in range(1, 5):
shape = (dim,) * ndim
for wptype in wp.types.scalar_types:
a = wp.full(shape, fill_value, dtype=wptype, device=device)
l = a.list()
test.assertEqual(len(l), a.size)
test.assertTrue(all(x == fill_value for x in l))
def test_to_list_vector(test, device):
dim = 3
for ndim in range(1, 5):
shape = (dim,) * ndim
for veclen in [2, 3, 4, 5]:
for wptype in wp.types.scalar_types:
vectype = wp.types.vector(veclen, wptype)
fill_value = vectype(42)
a = wp.full(shape, fill_value, dtype=vectype, device=device)
l = a.list()
test.assertEqual(len(l), a.size)
test.assertTrue(all(x == fill_value for x in l))
def test_to_list_matrix(test, device):
dim = 3
for ndim in range(1, 5):
shape = (dim,) * ndim
for wptype in wp.types.scalar_types:
matrix_types = [
# square matrices
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
# non-square matrices
wp.types.matrix((2, 3), wptype),
wp.types.matrix((3, 2), wptype),
wp.types.matrix((3, 4), wptype),
wp.types.matrix((4, 3), wptype),
]
for mattype in matrix_types:
fill_value = mattype(42)
a = wp.full(shape, fill_value, dtype=mattype, device=device)
l = a.list()
test.assertEqual(len(l), a.size)
test.assertTrue(all(x == fill_value for x in l))
def test_to_list_struct(test, device):
@wp.struct
class Inner:
h: wp.float16
v: wp.vec3
@wp.struct
class ListStruct:
i: int
f: float
h: wp.float16
vi: wp.vec2i
vf: wp.vec3f
vh: wp.vec4h
mi: wp.types.matrix((2, 2), int)
mf: wp.types.matrix((3, 3), float)
mh: wp.types.matrix((4, 4), wp.float16)
inner: Inner
a1: wp.array(dtype=int)
a2: wp.array2d(dtype=float)
a3: wp.array3d(dtype=wp.float16)
bool: wp.bool
dim = 3
s = ListStruct()
s.i = 42
s.f = 2.5
s.h = -1.25
s.vi = wp.vec2i(1, 2)
s.vf = wp.vec3f(0.1, 0.2, 0.3)
s.vh = wp.vec4h(1.0, 2.0, 3.0, 4.0)
s.mi = [[1, 2], [3, 4]]
s.mf = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
s.mh = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
s.inner = Inner()
s.inner.h = 1.5
s.inner.v = [1, 2, 3]
s.a1 = wp.empty(1, dtype=int, device=device)
s.a2 = wp.empty((1, 1), dtype=float, device=device)
s.a3 = wp.empty((1, 1, 1), dtype=wp.float16, device=device)
s.bool = True
for ndim in range(1, 5):
shape = (dim,) * ndim
a = wp.full(shape, s, dtype=ListStruct, device=device)
l = a.list()
for i in range(a.size):
test.assertEqual(l[i].i, s.i)
test.assertEqual(l[i].f, s.f)
test.assertEqual(l[i].h, s.h)
test.assertEqual(l[i].vi, s.vi)
test.assertEqual(l[i].vf, s.vf)
test.assertEqual(l[i].vh, s.vh)
test.assertEqual(l[i].mi, s.mi)
test.assertEqual(l[i].mf, s.mf)
test.assertEqual(l[i].mh, s.mh)
test.assertEqual(l[i].bool, s.bool)
test.assertEqual(l[i].inner.h, s.inner.h)
test.assertEqual(l[i].inner.v, s.inner.v)
test.assertEqual(l[i].a1.dtype, s.a1.dtype)
test.assertEqual(l[i].a1.ndim, s.a1.ndim)
test.assertEqual(l[i].a2.dtype, s.a2.dtype)
test.assertEqual(l[i].a2.ndim, s.a2.ndim)
test.assertEqual(l[i].a3.dtype, s.a3.dtype)
test.assertEqual(l[i].a3.ndim, s.a3.ndim)
@wp.kernel
def kernel_array_to_bool(array_null: wp.array(dtype=float), array_valid: wp.array(dtype=float)):
if not array_null:
# always succeed
wp.expect_eq(0, 0)
else:
# force failure
wp.expect_eq(1, 2)
if array_valid:
# always succeed
wp.expect_eq(0, 0)
else:
# force failure
wp.expect_eq(1, 2)
def test_array_to_bool(test, device):
arr = wp.zeros(8, dtype=float, device=device)
wp.launch(kernel_array_to_bool, dim=1, inputs=[None, arr], device=device)
@wp.struct
class InputStruct:
param1: int
param2: float
param3: wp.vec3
param4: wp.array(dtype=float)
@wp.struct
class OutputStruct:
param1: int
param2: float
param3: wp.vec3
@wp.kernel
def struct_array_kernel(inputs: wp.array(dtype=InputStruct), outputs: wp.array(dtype=OutputStruct)):
tid = wp.tid()
wp.expect_eq(inputs[tid].param1, tid)
wp.expect_eq(inputs[tid].param2, float(tid * tid))
wp.expect_eq(inputs[tid].param3[0], 1.0)
wp.expect_eq(inputs[tid].param3[1], 2.0)
wp.expect_eq(inputs[tid].param3[2], 3.0)
wp.expect_eq(inputs[tid].param4[0], 1.0)
wp.expect_eq(inputs[tid].param4[1], 2.0)
wp.expect_eq(inputs[tid].param4[2], 3.0)
o = OutputStruct()
o.param1 = inputs[tid].param1
o.param2 = inputs[tid].param2
o.param3 = inputs[tid].param3
outputs[tid] = o
def test_array_of_structs(test, device):
num_items = 10
l = []
for i in range(num_items):
s = InputStruct()
s.param1 = i
s.param2 = float(i * i)
s.param3 = wp.vec3(1.0, 2.0, 3.0)
s.param4 = wp.array([1.0, 2.0, 3.0], dtype=float, device=device)
l.append(s)
# initialize array from list of structs
inputs = wp.array(l, dtype=InputStruct, device=device)
outputs = wp.zeros(num_items, dtype=OutputStruct, device=device)
# pass to our compute kernel
wp.launch(struct_array_kernel, dim=num_items, inputs=[inputs, outputs], device=device)
out_numpy = outputs.numpy()
out_list = outputs.list()
out_cptr = outputs.to("cpu").cptr()
for i in range(num_items):
test.assertEqual(out_numpy[i][0], l[i].param1)
test.assertEqual(out_numpy[i][1], l[i].param2)
assert_np_equal(out_numpy[i][2], np.array(l[i].param3))
# test named slices of numpy structured array
test.assertEqual(out_numpy["param1"][i], l[i].param1)
test.assertEqual(out_numpy["param2"][i], l[i].param2)
assert_np_equal(out_numpy["param3"][i], np.array(l[i].param3))
test.assertEqual(out_list[i].param1, l[i].param1)
test.assertEqual(out_list[i].param2, l[i].param2)
test.assertEqual(out_list[i].param3, l[i].param3)
test.assertEqual(out_cptr[i].param1, l[i].param1)
test.assertEqual(out_cptr[i].param2, l[i].param2)
test.assertEqual(out_cptr[i].param3, l[i].param3)
@wp.struct
class GradStruct:
param1: int
param2: float
param3: wp.vec3
@wp.kernel
def test_array_of_structs_grad_kernel(inputs: wp.array(dtype=GradStruct), loss: wp.array(dtype=float)):
tid = wp.tid()
wp.atomic_add(loss, 0, inputs[tid].param2 * 2.0)
def test_array_of_structs_grad(test, device):
num_items = 10
l = []
for i in range(num_items):
g = GradStruct()
g.param2 = float(i)
l.append(g)
a = wp.array(l, dtype=GradStruct, device=device, requires_grad=True)
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
with wp.Tape() as tape:
wp.launch(test_array_of_structs_grad_kernel, dim=num_items, inputs=[a, loss], device=device)
tape.backward(loss)
grads = a.grad.numpy()
assert_np_equal(grads["param2"], np.full(num_items, 2.0, dtype=np.float32))
@wp.struct
class NumpyStruct:
x: int
v: wp.vec3
def test_array_of_structs_from_numpy(test, device):
num_items = 10
na = np.zeros(num_items, dtype=NumpyStruct.numpy_dtype())
na["x"] = 17
na["v"] = (1, 2, 3)
a = wp.array(data=na, dtype=NumpyStruct, device=device)
assert_np_equal(a.numpy(), na)
def test_array_of_structs_roundtrip(test, device):
num_items = 10
value = NumpyStruct()
value.x = 17
value.v = wp.vec3(1.0, 2.0, 3.0)
# create Warp structured array
a = wp.full(num_items, value, device=device)
# convert to NumPy structured array
na = a.numpy()
expected = np.zeros(num_items, dtype=NumpyStruct.numpy_dtype())
expected["x"] = value.x
expected["v"] = value.v
assert_np_equal(na, expected)
# modify a field
na["x"] = 42
# convert back to Warp array
a = wp.from_numpy(na, NumpyStruct, device=device)
expected["x"] = 42
assert_np_equal(a.numpy(), expected)
def test_array_from_numpy(test, device):
arr = np.array((1.0, 2.0, 3.0), dtype=float)
result = wp.from_numpy(arr, device=device)
expected = wp.array((1.0, 2.0, 3.0), dtype=wp.float32, shape=(3,))
assert_np_equal(result.numpy(), expected.numpy())
result = wp.from_numpy(arr, dtype=wp.vec3, device=device)
expected = wp.array(((1.0, 2.0, 3.0),), dtype=wp.vec3, shape=(1,))
assert_np_equal(result.numpy(), expected.numpy())
# --------------------------------------------------------------------------
arr = np.array(((1.0, 2.0, 3.0), (4.0, 5.0, 6.0)), dtype=float)
result = wp.from_numpy(arr, device=device)
expected = wp.array(((1.0, 2.0, 3.0), (4.0, 5.0, 6.0)), dtype=wp.vec3, shape=(2,))
assert_np_equal(result.numpy(), expected.numpy())
result = wp.from_numpy(arr, dtype=wp.float32, device=device)
expected = wp.array(((1.0, 2.0, 3.0), (4.0, 5.0, 6.0)), dtype=wp.float32, shape=(2, 3))
assert_np_equal(result.numpy(), expected.numpy())
result = wp.from_numpy(arr, dtype=wp.float32, shape=(6,), device=device)
expected = wp.array((1.0, 2.0, 3.0, 4.0, 5.0, 6.0), dtype=wp.float32, shape=(6,))
assert_np_equal(result.numpy(), expected.numpy())
# --------------------------------------------------------------------------
arr = np.array(
(
(
(1.0, 2.0, 3.0, 4.0),
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
),
(
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
(5.0, 6.0, 7.0, 8.0),
),
),
dtype=float,
)
result = wp.from_numpy(arr, device=device)
expected = wp.array(
(
(
(1.0, 2.0, 3.0, 4.0),
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
),
(
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
(5.0, 6.0, 7.0, 8.0),
),
),
dtype=wp.mat44,
shape=(2,),
)
assert_np_equal(result.numpy(), expected.numpy())
result = wp.from_numpy(arr, dtype=wp.float32, device=device)
expected = wp.array(
(
(
(1.0, 2.0, 3.0, 4.0),
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
),
(
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
(5.0, 6.0, 7.0, 8.0),
),
),
dtype=wp.float32,
shape=(2, 4, 4),
)
assert_np_equal(result.numpy(), expected.numpy())
result = wp.from_numpy(arr, dtype=wp.vec4, device=device).reshape((8,)) # Reshape from (2, 4)
expected = wp.array(
(
(1.0, 2.0, 3.0, 4.0),
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
(2.0, 3.0, 4.0, 5.0),
(3.0, 4.0, 5.0, 6.0),
(4.0, 5.0, 6.0, 7.0),
(5.0, 6.0, 7.0, 8.0),
),
dtype=wp.vec4,
shape=(8,),
)
assert_np_equal(result.numpy(), expected.numpy())
result = wp.from_numpy(arr, dtype=wp.float32, shape=(32,), device=device)
expected = wp.array(
(
1.0,
2.0,
3.0,
4.0,
2.0,
3.0,
4.0,
5.0,
3.0,
4.0,
5.0,
6.0,
4.0,
5.0,
6.0,
7.0,
2.0,
3.0,
4.0,
5.0,
3.0,
4.0,
5.0,
6.0,
4.0,
5.0,
6.0,
7.0,
5.0,
6.0,
7.0,
8.0,
),
dtype=wp.float32,
shape=(32,),
)
assert_np_equal(result.numpy(), expected.numpy())
def test_array_from_cai(test, device):
import torch
@wp.kernel
def first_row_plus_one(x: wp.array2d(dtype=float)):
i, j = wp.tid()
if i == 0:
x[i, j] += 1.0
# start with torch tensor
arr = torch.zeros((3, 3))
torch_device = wp.device_to_torch(device)
arr_torch = arr.to(torch_device)
# wrap as warp array via __cuda_array_interface__
arr_warp = wp.array(arr_torch, device=device)
wp.launch(kernel=first_row_plus_one, dim=(3, 3), inputs=[arr_warp], device=device)
# re-wrap as torch array
arr_torch = wp.to_torch(arr_warp)
# transpose
arr_torch = torch.as_strided(arr_torch, size=(3, 3), stride=(arr_torch.stride(1), arr_torch.stride(0)))
# re-wrap as warp array with new strides
arr_warp = wp.array(arr_torch, device=device)
wp.launch(kernel=first_row_plus_one, dim=(3, 3), inputs=[arr_warp], device=device)
assert_np_equal(arr_warp.numpy(), np.array([[2, 1, 1], [1, 0, 0], [1, 0, 0]]))
devices = get_test_devices()
class TestArray(unittest.TestCase):
def test_array_new_del(self):
# test the scenario in which an array instance is created but not initialized before gc
instance = wp.array.__new__(wp.array)
instance.__del__()
add_function_test(TestArray, "test_shape", test_shape, devices=devices)
add_function_test(TestArray, "test_negative_shape", test_negative_shape, devices=devices)
add_function_test(TestArray, "test_flatten", test_flatten, devices=devices)
add_function_test(TestArray, "test_reshape", test_reshape, devices=devices)
add_function_test(TestArray, "test_slicing", test_slicing, devices=devices)
add_function_test(TestArray, "test_transpose", test_transpose, devices=devices)
add_function_test(TestArray, "test_view", test_view, devices=devices)
add_function_test(TestArray, "test_clone_adjoint", test_clone_adjoint, devices=devices)
add_function_test(TestArray, "test_assign_adjoint", test_assign_adjoint, devices=devices)
add_function_test(TestArray, "test_1d_array", test_1d, devices=devices)
add_function_test(TestArray, "test_2d_array", test_2d, devices=devices)
add_function_test(TestArray, "test_3d_array", test_3d, devices=devices)
add_function_test(TestArray, "test_4d_array", test_4d, devices=devices)
add_function_test(TestArray, "test_4d_array_transposed", test_4d_transposed, devices=devices)
add_function_test(TestArray, "test_fill_scalar", test_fill_scalar, devices=devices)
add_function_test(TestArray, "test_fill_vector", test_fill_vector, devices=devices)
add_function_test(TestArray, "test_fill_matrix", test_fill_matrix, devices=devices)
add_function_test(TestArray, "test_fill_struct", test_fill_struct, devices=devices)
add_function_test(TestArray, "test_fill_slices", test_fill_slices, devices=devices)
add_function_test(TestArray, "test_full_scalar", test_full_scalar, devices=devices)
add_function_test(TestArray, "test_full_vector", test_full_vector, devices=devices)
add_function_test(TestArray, "test_full_matrix", test_full_matrix, devices=devices)
add_function_test(TestArray, "test_full_struct", test_full_struct, devices=devices)
add_function_test(TestArray, "test_ones_scalar", test_ones_scalar, devices=devices)
add_function_test(TestArray, "test_ones_vector", test_ones_vector, devices=devices)
add_function_test(TestArray, "test_ones_matrix", test_ones_matrix, devices=devices)
add_function_test(TestArray, "test_ones_like_scalar", test_ones_like_scalar, devices=devices)
add_function_test(TestArray, "test_ones_like_vector", test_ones_like_vector, devices=devices)
add_function_test(TestArray, "test_ones_like_matrix", test_ones_like_matrix, devices=devices)
add_function_test(TestArray, "test_empty_array", test_empty_array, devices=devices)
add_function_test(TestArray, "test_empty_from_numpy", test_empty_from_numpy, devices=devices)
add_function_test(TestArray, "test_empty_from_list", test_empty_from_list, devices=devices)
add_function_test(TestArray, "test_to_list_scalar", test_to_list_scalar, devices=devices)
add_function_test(TestArray, "test_to_list_vector", test_to_list_vector, devices=devices)
add_function_test(TestArray, "test_to_list_matrix", test_to_list_matrix, devices=devices)
add_function_test(TestArray, "test_to_list_struct", test_to_list_struct, devices=devices)
add_function_test(TestArray, "test_lower_bound", test_lower_bound, devices=devices)
add_function_test(TestArray, "test_round_trip", test_round_trip, devices=devices)
add_function_test(TestArray, "test_array_to_bool", test_array_to_bool, devices=devices)
add_function_test(TestArray, "test_array_of_structs", test_array_of_structs, devices=devices)
add_function_test(TestArray, "test_array_of_structs_grad", test_array_of_structs_grad, devices=devices)
add_function_test(TestArray, "test_array_of_structs_from_numpy", test_array_of_structs_from_numpy, devices=devices)
add_function_test(TestArray, "test_array_of_structs_roundtrip", test_array_of_structs_roundtrip, devices=devices)
add_function_test(TestArray, "test_array_from_numpy", test_array_from_numpy, devices=devices)
try:
import torch
# check which Warp devices work with Torch
# CUDA devices may fail if Torch was not compiled with CUDA support
torch_compatible_devices = []
torch_compatible_cuda_devices = []
for d in devices:
try:
t = torch.arange(10, device=wp.device_to_torch(d))
t += 1
torch_compatible_devices.append(d)
if d.is_cuda:
torch_compatible_cuda_devices.append(d)
except Exception as e:
print(f"Skipping Array tests that use Torch on device '{d}' due to exception: {e}")
add_function_test(TestArray, "test_array_from_cai", test_array_from_cai, devices=torch_compatible_cuda_devices)
except Exception as e:
print(f"Skipping Array tests that use Torch due to exception: {e}")
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 85,239 | Python | 34.281457 | 126 | 0.566243 |
NVIDIA/warp/warp/tests/test_streams.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import check_iommu
@wp.kernel
def inc(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] + 1.0
@wp.kernel
def inc_new(src: wp.array(dtype=float), dst: wp.array(dtype=float)):
tid = wp.tid()
dst[tid] = src[tid] + 1.0
@wp.kernel
def sum(a: wp.array(dtype=float), b: wp.array(dtype=float), c: wp.array(dtype=float)):
tid = wp.tid()
c[tid] = a[tid] + b[tid]
# number of elements to use for testing
N = 10 * 1024 * 1024
def test_stream_set(test, device):
device = wp.get_device(device)
old_stream = device.stream
new_stream = wp.Stream(device)
try:
wp.set_stream(new_stream, device)
test.assertTrue(device.has_stream)
test.assertEqual(device.stream, new_stream)
finally:
# restore original stream
wp.set_stream(old_stream, device)
def test_stream_arg_explicit_sync(test, device):
a = wp.zeros(N, dtype=float, device=device)
b = wp.full(N, 42, dtype=float, device=device)
c = wp.empty(N, dtype=float, device=device)
old_stream = wp.get_stream(device)
new_stream = wp.Stream(device)
# allocations need to be explicitly synced before launching work using stream arguments
new_stream.wait_stream(old_stream)
# launch work on new stream
wp.launch(inc, dim=a.size, inputs=[a], stream=new_stream)
wp.copy(b, a, stream=new_stream)
wp.launch(inc, dim=a.size, inputs=[a], stream=new_stream)
wp.copy(c, a, stream=new_stream)
wp.launch(inc, dim=a.size, inputs=[a], stream=new_stream)
assert_np_equal(a.numpy(), np.full(N, fill_value=3.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
def test_stream_scope_implicit_sync(test, device):
with wp.ScopedDevice(device):
a = wp.zeros(N, dtype=float)
b = wp.full(N, 42, dtype=float)
c = wp.empty(N, dtype=float)
old_stream = wp.get_stream()
new_stream = wp.Stream()
# launch work on new stream
# allocations are implicitly synced when entering wp.ScopedStream
with wp.ScopedStream(new_stream):
assert wp.get_stream() == new_stream
wp.launch(inc, dim=a.size, inputs=[a])
wp.copy(b, a)
wp.launch(inc, dim=a.size, inputs=[a])
wp.copy(c, a)
wp.launch(inc, dim=a.size, inputs=[a])
assert wp.get_stream() == old_stream
assert_np_equal(a.numpy(), np.full(N, fill_value=3.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
def test_stream_arg_synchronize(test, device):
a = wp.zeros(N, dtype=float, device=device)
b = wp.empty(N, dtype=float, device=device)
c = wp.empty(N, dtype=float, device=device)
d = wp.empty(N, dtype=float, device=device)
stream1 = wp.get_stream(device)
stream2 = wp.Stream(device)
stream3 = wp.Stream(device)
wp.launch(inc, dim=N, inputs=[a], device=device)
# b and c depend on a
wp.synchronize_stream(stream1)
wp.launch(inc_new, dim=N, inputs=[a, b], stream=stream2)
wp.launch(inc_new, dim=N, inputs=[a, c], stream=stream3)
# d depends on b and c
wp.synchronize_stream(stream2)
wp.synchronize_stream(stream3)
wp.launch(sum, dim=N, inputs=[b, c, d], device=device)
assert_np_equal(a.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(d.numpy(), np.full(N, fill_value=4.0))
def test_stream_arg_wait_event(test, device):
a = wp.zeros(N, dtype=float, device=device)
b = wp.empty(N, dtype=float, device=device)
c = wp.empty(N, dtype=float, device=device)
d = wp.empty(N, dtype=float, device=device)
stream1 = wp.get_stream(device)
stream2 = wp.Stream(device)
stream3 = wp.Stream(device)
event1 = wp.Event(device)
event2 = wp.Event(device)
event3 = wp.Event(device)
wp.launch(inc, dim=N, inputs=[a], stream=stream1)
stream1.record_event(event1)
# b and c depend on a
stream2.wait_event(event1)
stream3.wait_event(event1)
wp.launch(inc_new, dim=N, inputs=[a, b], stream=stream2)
stream2.record_event(event2)
wp.launch(inc_new, dim=N, inputs=[a, c], stream=stream3)
stream3.record_event(event3)
# d depends on b and c
stream1.wait_event(event2)
stream1.wait_event(event3)
wp.launch(sum, dim=N, inputs=[b, c, d], stream=stream1)
assert_np_equal(a.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(d.numpy(), np.full(N, fill_value=4.0))
def test_stream_arg_wait_stream(test, device):
a = wp.zeros(N, dtype=float, device=device)
b = wp.empty(N, dtype=float, device=device)
c = wp.empty(N, dtype=float, device=device)
d = wp.empty(N, dtype=float, device=device)
stream1 = wp.get_stream(device)
stream2 = wp.Stream(device)
stream3 = wp.Stream(device)
wp.launch(inc, dim=N, inputs=[a], stream=stream1)
# b and c depend on a
stream2.wait_stream(stream1)
stream3.wait_stream(stream1)
wp.launch(inc_new, dim=N, inputs=[a, b], stream=stream2)
wp.launch(inc_new, dim=N, inputs=[a, c], stream=stream3)
# d depends on b and c
stream1.wait_stream(stream2)
stream1.wait_stream(stream3)
wp.launch(sum, dim=N, inputs=[b, c, d], stream=stream1)
assert_np_equal(a.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(d.numpy(), np.full(N, fill_value=4.0))
def test_stream_scope_synchronize(test, device):
with wp.ScopedDevice(device):
a = wp.zeros(N, dtype=float)
b = wp.empty(N, dtype=float)
c = wp.empty(N, dtype=float)
d = wp.empty(N, dtype=float)
stream2 = wp.Stream()
stream3 = wp.Stream()
wp.launch(inc, dim=N, inputs=[a])
# b and c depend on a
wp.synchronize_stream()
with wp.ScopedStream(stream2):
wp.launch(inc_new, dim=N, inputs=[a, b])
with wp.ScopedStream(stream3):
wp.launch(inc_new, dim=N, inputs=[a, c])
# d depends on b and c
wp.synchronize_stream(stream2)
wp.synchronize_stream(stream3)
wp.launch(sum, dim=N, inputs=[b, c, d])
assert_np_equal(a.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(d.numpy(), np.full(N, fill_value=4.0))
def test_stream_scope_wait_event(test, device):
with wp.ScopedDevice(device):
a = wp.zeros(N, dtype=float)
b = wp.empty(N, dtype=float)
c = wp.empty(N, dtype=float)
d = wp.empty(N, dtype=float)
stream2 = wp.Stream()
stream3 = wp.Stream()
event1 = wp.Event()
event2 = wp.Event()
event3 = wp.Event()
wp.launch(inc, dim=N, inputs=[a])
wp.record_event(event1)
# b and c depend on a
with wp.ScopedStream(stream2):
wp.wait_event(event1)
wp.launch(inc_new, dim=N, inputs=[a, b])
wp.record_event(event2)
with wp.ScopedStream(stream3):
wp.wait_event(event1)
wp.launch(inc_new, dim=N, inputs=[a, c])
wp.record_event(event3)
# d depends on b and c
wp.wait_event(event2)
wp.wait_event(event3)
wp.launch(sum, dim=N, inputs=[b, c, d])
assert_np_equal(a.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(d.numpy(), np.full(N, fill_value=4.0))
def test_stream_scope_wait_stream(test, device):
with wp.ScopedDevice(device):
a = wp.zeros(N, dtype=float)
b = wp.empty(N, dtype=float)
c = wp.empty(N, dtype=float)
d = wp.empty(N, dtype=float)
stream1 = wp.get_stream()
stream2 = wp.Stream()
stream3 = wp.Stream()
wp.launch(inc, dim=N, inputs=[a])
# b and c depend on a
with wp.ScopedStream(stream2):
wp.wait_stream(stream1)
wp.launch(inc_new, dim=N, inputs=[a, b])
with wp.ScopedStream(stream3):
wp.wait_stream(stream1)
wp.launch(inc_new, dim=N, inputs=[a, c])
# d depends on b and c
wp.wait_stream(stream2)
wp.wait_stream(stream3)
wp.launch(sum, dim=N, inputs=[b, c, d])
assert_np_equal(a.numpy(), np.full(N, fill_value=1.0))
assert_np_equal(b.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(c.numpy(), np.full(N, fill_value=2.0))
assert_np_equal(d.numpy(), np.full(N, fill_value=4.0))
def test_event_synchronize(test, device):
stream = wp.get_stream(device)
a_host = wp.empty(N, dtype=float, device="cpu", pinned=True)
b_host = wp.empty(N, dtype=float, device="cpu", pinned=True)
# initialize GPU array and do an asynchronous readback
a = wp.full(N, 17, dtype=float, device=device)
wp.copy(a_host, a)
a_event = stream.record_event()
b = wp.full(N, 42, dtype=float, device=device)
wp.copy(b_host, b)
b_event = stream.record_event()
wp.synchronize_event(a_event)
assert_np_equal(a_host.numpy(), np.full(N, fill_value=17.0))
wp.synchronize_event(b_event)
assert_np_equal(b_host.numpy(), np.full(N, fill_value=42.0))
def test_event_elapsed_time(test, device):
stream = wp.get_stream(device)
e1 = wp.Event(device, enable_timing=True)
e2 = wp.Event(device, enable_timing=True)
a = wp.zeros(N, dtype=float, device=device)
stream.record_event(e1)
wp.launch(inc, dim=N, inputs=[a], device=device)
stream.record_event(e2)
elapsed = wp.get_event_elapsed_time(e1, e2)
test.assertGreater(elapsed, 0)
devices = get_selected_cuda_test_devices()
class TestStreams(unittest.TestCase):
def test_stream_exceptions(self):
cpu_device = wp.get_device("cpu")
# Can't set the stream on a CPU device
with self.assertRaises(RuntimeError):
stream0 = wp.Stream()
cpu_device.stream = stream0
# Can't create a stream on the CPU
with self.assertRaises(RuntimeError):
wp.Stream(device="cpu")
# Can't create an event with CPU device
with self.assertRaises(RuntimeError):
wp.Event(device=cpu_device)
# Can't get the stream on a CPU device
with self.assertRaises(RuntimeError):
cpu_stream = cpu_device.stream # noqa: F841
@unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
@unittest.skipUnless(check_iommu(), "IOMMU seems enabled")
def test_stream_arg_graph_mgpu(self):
wp.load_module(device="cuda:0")
wp.load_module(device="cuda:1")
# Peer-to-peer copies are not possible during graph capture if the arrays were
# allocated using pooled allocators and mempool access is not enabled.
# Here, we force default CUDA allocators and pre-allocate the memory.
with wp.ScopedMempool("cuda:0", False), wp.ScopedMempool("cuda:1", False):
# resources on GPU 0
stream0 = wp.get_stream("cuda:0")
a0 = wp.zeros(N, dtype=float, device="cuda:0")
b0 = wp.empty(N, dtype=float, device="cuda:0")
c0 = wp.empty(N, dtype=float, device="cuda:0")
# resources on GPU 1
stream1 = wp.get_stream("cuda:1")
a1 = wp.zeros(N, dtype=float, device="cuda:1")
# start recording on stream0
wp.capture_begin(stream=stream0, force_module_load=False)
try:
# branch into stream1
stream1.wait_stream(stream0)
# launch concurrent kernels on each stream
wp.launch(inc, dim=N, inputs=[a0], stream=stream0)
wp.launch(inc, dim=N, inputs=[a1], stream=stream1)
# wait for stream1 to finish
stream0.wait_stream(stream1)
# copy values from stream1
wp.copy(b0, a1, stream=stream0)
# compute sum
wp.launch(sum, dim=N, inputs=[a0, b0, c0], stream=stream0)
finally:
# finish recording on stream0
g = wp.capture_end(stream=stream0)
# replay
num_iters = 10
for _ in range(num_iters):
wp.capture_launch(g, stream=stream0)
# check results
assert_np_equal(c0.numpy(), np.full(N, fill_value=2 * num_iters))
@unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
@unittest.skipUnless(check_iommu(), "IOMMU seems enabled")
def test_stream_scope_graph_mgpu(self):
wp.load_module(device="cuda:0")
wp.load_module(device="cuda:1")
# Peer-to-peer copies are not possible during graph capture if the arrays were
# allocated using pooled allocators and mempool access is not enabled.
# Here, we force default CUDA allocators and pre-allocate the memory.
with wp.ScopedMempool("cuda:0", False), wp.ScopedMempool("cuda:1", False):
# resources on GPU 0
with wp.ScopedDevice("cuda:0"):
stream0 = wp.get_stream()
a0 = wp.zeros(N, dtype=float)
b0 = wp.empty(N, dtype=float)
c0 = wp.empty(N, dtype=float)
# resources on GPU 1
with wp.ScopedDevice("cuda:1"):
stream1 = wp.get_stream()
a1 = wp.zeros(N, dtype=float)
# capture graph
with wp.ScopedDevice("cuda:0"):
# start recording
wp.capture_begin(force_module_load=False)
try:
with wp.ScopedDevice("cuda:1"):
# branch into stream1
wp.wait_stream(stream0)
wp.launch(inc, dim=N, inputs=[a1])
wp.launch(inc, dim=N, inputs=[a0])
# wait for stream1 to finish
wp.wait_stream(stream1)
# copy values from stream1
wp.copy(b0, a1)
# compute sum
wp.launch(sum, dim=N, inputs=[a0, b0, c0])
finally:
# finish recording
g = wp.capture_end()
# replay
with wp.ScopedDevice("cuda:0"):
num_iters = 10
for _ in range(num_iters):
wp.capture_launch(g)
# check results
assert_np_equal(c0.numpy(), np.full(N, fill_value=2 * num_iters))
add_function_test(TestStreams, "test_stream_set", test_stream_set, devices=devices)
add_function_test(TestStreams, "test_stream_arg_explicit_sync", test_stream_arg_explicit_sync, devices=devices)
add_function_test(TestStreams, "test_stream_scope_implicit_sync", test_stream_scope_implicit_sync, devices=devices)
add_function_test(TestStreams, "test_stream_arg_synchronize", test_stream_arg_synchronize, devices=devices)
add_function_test(TestStreams, "test_stream_arg_wait_event", test_stream_arg_wait_event, devices=devices)
add_function_test(TestStreams, "test_stream_arg_wait_stream", test_stream_arg_wait_stream, devices=devices)
add_function_test(TestStreams, "test_stream_scope_synchronize", test_stream_scope_synchronize, devices=devices)
add_function_test(TestStreams, "test_stream_scope_wait_event", test_stream_scope_wait_event, devices=devices)
add_function_test(TestStreams, "test_stream_scope_wait_stream", test_stream_scope_wait_stream, devices=devices)
add_function_test(TestStreams, "test_event_synchronize", test_event_synchronize, devices=devices)
add_function_test(TestStreams, "test_event_elapsed_time", test_event_elapsed_time, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 16,901 | Python | 33.849484 | 115 | 0.610911 |
NVIDIA/warp/warp/tests/test_smoothstep.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from dataclasses import dataclass
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@dataclass
class TestData:
a: Any
b: Any
t: float
expected: Any
expected_adj_a: Any = None
expected_adj_b: Any = None
expected_adj_t: float = None
def check_backwards(self):
return self.expected_adj_a is not None and self.expected_adj_b is not None and self.expected_adj_t is not None
TEST_DATA = {
wp.float32: (
TestData(
a=1.0,
b=2.0,
t=1.5,
expected=0.5,
expected_adj_a=-0.75,
expected_adj_b=-0.75,
expected_adj_t=1.5,
),
TestData(
a=-1.0,
b=2.0,
t=-0.25,
expected=0.15625,
expected_adj_a=-0.28125,
expected_adj_b=-0.09375,
expected_adj_t=0.375,
),
TestData(
a=0.0,
b=1.0,
t=9.9,
expected=1.0,
expected_adj_a=0.0,
expected_adj_b=0.0,
expected_adj_t=0.0,
),
TestData(
a=0.0,
b=1.0,
t=-9.9,
expected=0.0,
expected_adj_a=0.0,
expected_adj_b=0.0,
expected_adj_t=0.0,
),
),
}
def test_smoothstep(test, device):
def make_kernel_fn(data_type):
def fn(
a: wp.array(dtype=data_type),
b: wp.array(dtype=data_type),
t: wp.array(dtype=float),
out: wp.array(dtype=data_type),
):
out[0] = wp.smoothstep(a[0], b[0], t[0])
return fn
for data_type in TEST_DATA:
kernel_fn = make_kernel_fn(data_type)
kernel = wp.Kernel(
func=kernel_fn,
key=f"test_smoothstep{data_type.__name__}_kernel",
)
for test_data in TEST_DATA[data_type]:
a = wp.array(
[test_data.a],
dtype=data_type,
device=device,
requires_grad=True,
)
b = wp.array(
[test_data.b],
dtype=data_type,
device=device,
requires_grad=True,
)
t = wp.array(
[test_data.t],
dtype=float,
device=device,
requires_grad=True,
)
out = wp.array(
[0] * wp.types.type_length(data_type),
dtype=data_type,
device=device,
requires_grad=True,
)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[a, b, t, out],
device=device,
)
assert_np_equal(
out.numpy(),
np.array([test_data.expected]),
tol=1e-6,
)
if test_data.check_backwards():
tape.backward(out)
assert_np_equal(
tape.gradients[a].numpy(),
np.array([test_data.expected_adj_a]),
tol=1e-6,
)
assert_np_equal(
tape.gradients[b].numpy(),
np.array([test_data.expected_adj_b]),
tol=1e-6,
)
assert_np_equal(
tape.gradients[t].numpy(),
np.array([test_data.expected_adj_t]),
tol=1e-6,
)
devices = get_test_devices()
class TestSmoothstep(unittest.TestCase):
pass
add_function_test(TestSmoothstep, "test_smoothstep", test_smoothstep, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 4,385 | Python | 25.263473 | 118 | 0.474344 |
NVIDIA/warp/warp/tests/test_linear_solvers.py | import unittest
import numpy as np
import warp as wp
from warp.optim.linear import bicgstab, cg, cr, gmres, preconditioner
from warp.tests.unittest_utils import *
wp.init() # For runtime.core.is_cutlass_enabled()
def _check_linear_solve(test, A, b, func, *args, **kwargs):
# test from zero
x = wp.zeros_like(b)
with wp.ScopedDevice(A.device):
niter, err, atol = func(A, b, x, *args, use_cuda_graph=True, **kwargs)
test.assertLessEqual(err, atol)
# test with warm start
with wp.ScopedDevice(A.device):
niter_warm, err, atol = func(A, b, x, *args, use_cuda_graph=False, **kwargs)
test.assertLessEqual(err, atol)
if func in [cr, gmres]:
# monotonic convergence
test.assertLess(niter_warm, niter)
# In CG and BiCGSTAB residual norm is evaluating from running residual
# rather then being computed from scratch as Ax - b
# This can lead to accumulated inaccuracies over iterations, esp in float32
residual = A.numpy() @ x.numpy() - b.numpy()
err_np = np.linalg.norm(residual)
if A.dtype == wp.float64:
test.assertLessEqual(err_np, 2.0 * atol)
else:
test.assertLessEqual(err_np, 32.0 * atol)
def _least_square_system(rng, n: int):
C = rng.uniform(low=-100, high=100, size=(n, n))
f = rng.uniform(low=-100, high=100, size=(n,))
A = C @ C.T
b = C @ f
return A, b
def _make_spd_system(n: int, seed: int, dtype, device):
rng = np.random.default_rng(seed)
A, b = _least_square_system(rng, n)
return wp.array(A, dtype=dtype, device=device), wp.array(b, dtype=dtype, device=device)
def _make_nonsymmetric_system(n: int, seed: int, dtype, device):
rng = np.random.default_rng(seed)
s = rng.uniform(low=0.1, high=10, size=(n,))
A, b = _least_square_system(rng, n)
A = A @ np.diag(s)
return wp.array(A, dtype=dtype, device=device), wp.array(b, dtype=dtype, device=device)
def _make_indefinite_system(n: int, seed: int, dtype, device):
rng = np.random.default_rng(seed)
s = rng.uniform(low=0.1, high=10, size=(n,))
A, b = _least_square_system(rng, n)
A = A @ np.diag(s)
return wp.array(A, dtype=dtype, device=device), wp.array(b, dtype=dtype, device=device)
def _make_identity_system(n: int, seed: int, dtype, device):
rng = np.random.default_rng(seed)
A = np.eye(n)
b = rng.uniform(low=-1.0, high=1.0, size=(n,))
return wp.array(A, dtype=dtype, device=device), wp.array(b, dtype=dtype, device=device)
def test_cg(test, device):
A, b = _make_spd_system(n=64, seed=123, device=device, dtype=wp.float64)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, cg, maxiter=1000)
_check_linear_solve(test, A, b, cg, M=M, maxiter=1000)
A, b = _make_spd_system(n=16, seed=321, device=device, dtype=wp.float32)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, cg, maxiter=1000)
_check_linear_solve(test, A, b, cg, M=M, maxiter=1000)
A, b = _make_identity_system(n=5, seed=321, device=device, dtype=wp.float32)
_check_linear_solve(test, A, b, cg, maxiter=30)
def test_cr(test, device):
A, b = _make_spd_system(n=64, seed=123, device=device, dtype=wp.float64)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, cr, maxiter=1000)
_check_linear_solve(test, A, b, cr, M=M, maxiter=1000)
A, b = _make_spd_system(n=16, seed=321, device=device, dtype=wp.float32)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, cr, maxiter=1000)
_check_linear_solve(test, A, b, cr, M=M, maxiter=1000)
A, b = _make_identity_system(n=5, seed=321, device=device, dtype=wp.float32)
_check_linear_solve(test, A, b, cr, maxiter=30)
def test_bicgstab(test, device):
A, b = _make_nonsymmetric_system(n=64, seed=123, device=device, dtype=wp.float64)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, bicgstab, maxiter=1000)
_check_linear_solve(test, A, b, bicgstab, M=M, maxiter=1000)
_check_linear_solve(test, A, b, bicgstab, M=M, maxiter=1000, is_left_preconditioner=True)
A, b = _make_nonsymmetric_system(n=16, seed=321, device=device, dtype=wp.float32)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, bicgstab, maxiter=1000)
_check_linear_solve(test, A, b, bicgstab, M=M, maxiter=1000)
_check_linear_solve(test, A, b, bicgstab, M=M, maxiter=1000, is_left_preconditioner=True)
A, b = _make_indefinite_system(n=64, seed=121, device=device, dtype=wp.float64)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, bicgstab, maxiter=1000)
_check_linear_solve(test, A, b, bicgstab, M=M, maxiter=1000)
_check_linear_solve(test, A, b, bicgstab, M=M, maxiter=1000, is_left_preconditioner=True)
A, b = _make_identity_system(n=5, seed=321, device=device, dtype=wp.float32)
_check_linear_solve(test, A, b, bicgstab, maxiter=30)
def test_gmres(test, device):
A, b = _make_nonsymmetric_system(n=64, seed=456, device=device, dtype=wp.float64)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, gmres, maxiter=1000, tol=1.0e-3)
_check_linear_solve(test, A, b, gmres, M=M, maxiter=1000, tol=1.0e-5)
_check_linear_solve(test, A, b, gmres, M=M, maxiter=1000, tol=1.0e-5, is_left_preconditioner=True)
A, b = _make_nonsymmetric_system(n=64, seed=654, device=device, dtype=wp.float64)
M = preconditioner(A, "diag")
_check_linear_solve(test, A, b, gmres, maxiter=1000, tol=1.0e-3)
_check_linear_solve(test, A, b, gmres, M=M, maxiter=1000, tol=1.0e-5)
_check_linear_solve(test, A, b, gmres, M=M, maxiter=1000, tol=1.0e-5, is_left_preconditioner=True)
A, b = _make_identity_system(n=5, seed=123, device=device, dtype=wp.float32)
_check_linear_solve(test, A, b, gmres, maxiter=120)
class TestLinearSolvers(unittest.TestCase):
pass
devices = get_test_devices()
if not wp.context.runtime.core.is_cutlass_enabled():
devices = [d for d in devices if not d.is_cuda]
print("Skipping CUDA linear solver tests because CUTLASS is not supported in this build")
if wp.context.runtime.core.is_debug_enabled():
# cutlass-based matmul is *very* slow in debug mode -- skip
devices = [d for d in devices if not d.is_cuda]
print("Skipping CUDA linear solver tests in debug mode")
add_function_test(TestLinearSolvers, "test_cg", test_cg, devices=devices)
add_function_test(TestLinearSolvers, "test_cr", test_cr, devices=devices)
add_function_test(TestLinearSolvers, "test_bicgstab", test_bicgstab, devices=devices)
add_function_test(TestLinearSolvers, "test_gmres", test_gmres, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,759 | Python | 34.578947 | 102 | 0.662968 |
NVIDIA/warp/warp/tests/test_math.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import NamedTuple
import numpy as np
import warp as wp
from warp.tests.unittest_utils import add_function_test, assert_np_equal, get_test_devices
class ScalarFloatValues(NamedTuple):
degrees: wp.float32 = None
radians: wp.float32 = None
@wp.kernel
def scalar_float_kernel(
i: int,
x: wp.array(dtype=wp.float32),
out: wp.array(dtype=wp.float32),
):
if i == 0:
out[0] = wp.degrees(x[0])
elif i == 1:
out[0] = wp.radians(x[0])
def test_scalar_math(test, device):
float_values = ScalarFloatValues(degrees=(0.123,), radians=(123.0,))
float_results_expected = ScalarFloatValues(degrees=7.047381, radians=2.146755)
adj_float_results_expected = ScalarFloatValues(degrees=57.29578, radians=0.017453)
for i, values in enumerate(float_values):
x = wp.array([values[0]], dtype=wp.float32, requires_grad=True, device=device)
out = wp.array([0.0], dtype=wp.float32, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(scalar_float_kernel, dim=1, inputs=[i, x, out], device=device)
assert_np_equal(out.numpy(), np.array([float_results_expected[i]]), tol=1e-6)
tape.backward(out)
assert_np_equal(tape.gradients[x].numpy(), np.array([adj_float_results_expected[i]]), tol=1e-6)
devices = get_test_devices()
class TestMath(unittest.TestCase):
def test_vec_type(self):
vec5 = wp.vec(length=5, dtype=float)
v = vec5()
w = vec5()
a = vec5(1.0)
b = vec5(0.0, 0.0, 0.0, 0.0, 0.0)
c = vec5(0.0)
v[0] = 1.0
v.x = 0.0
v[1:] = [1.0, 1.0, 1.0, 1.0]
w[0] = 1.0
w[1:] = [0.0, 0.0, 0.0, 0.0]
self.assertEqual(v[0], w[1], "vec setter error")
self.assertEqual(v.x, w.y, "vec setter error")
for x in v[1:]:
self.assertEqual(x, 1.0, "vec slicing error")
self.assertEqual(b, c, "vec equality error")
self.assertEqual(str(v), "[0.0, 1.0, 1.0, 1.0, 1.0]", "vec to string error")
def test_mat_type(self):
mat55 = wp.mat(shape=(5, 5), dtype=float)
m1 = mat55()
m2 = mat55()
for i in range(5):
for j in range(5):
if i == j:
m1[i, j] = 1.0
else:
m1[i, j] = 0.0
for i in range(5):
m2[i] = [1.0, 1.0, 1.0, 1.0, 1.0]
a = mat55(1.0)
# fmt: off
b = mat55(
1.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0,
)
# fmt: on
self.assertEqual(m1, b, "mat element setting error")
self.assertEqual(m2, a, "mat row setting error")
self.assertEqual(m1[0, 0], 1.0, "mat element getting error")
self.assertEqual(m2[0], [1.0, 1.0, 1.0, 1.0, 1.0], "mat row getting error")
self.assertEqual(
str(b),
"[[1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0]]",
"mat to string error",
)
add_function_test(TestMath, "test_scalar_math", test_scalar_math, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 3,857 | Python | 29.864 | 158 | 0.562095 |
NVIDIA/warp/warp/tests/test_volume.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# float volume tests
@wp.kernel
def test_volume_lookup_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = p[0] * p[1] * p[2]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
expected = 10.0
i = int(p[0])
j = int(p[1])
k = int(p[2])
expect_eq(wp.volume_lookup_f(volume, i, j, k), expected)
expect_eq(wp.volume_lookup(volume, i, j, k, dtype=wp.float32), expected)
@wp.kernel
def test_volume_sample_closest_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = round(p[0])
j = round(p[1])
k = round(p[2])
expected = i * j * k
if abs(i) > 10.0 or abs(j) > 10.0 or abs(k) > 10.0:
expected = 10.0
expect_eq(wp.volume_sample_f(volume, p, wp.Volume.CLOSEST), expected)
expect_eq(wp.volume_sample(volume, p, wp.Volume.CLOSEST, dtype=wp.float32), expected)
q = wp.volume_index_to_world(volume, p)
q_inv = wp.volume_world_to_index(volume, q)
expect_eq(p, q_inv)
@wp.kernel
def test_volume_sample_linear_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = p[0] * p[1] * p[2]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
expect_near(wp.volume_sample_f(volume, p, wp.Volume.LINEAR), expected, 2.0e-4)
expect_near(wp.volume_sample(volume, p, wp.Volume.LINEAR, dtype=wp.float32), expected, 2.0e-4)
@wp.kernel
def test_volume_sample_grad_linear_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected_val = p[0] * p[1] * p[2]
expected_gx = p[1] * p[2]
expected_gy = p[0] * p[2]
expected_gz = p[0] * p[1]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
grad = wp.vec3(0.0, 0.0, 0.0)
val = wp.volume_sample_grad_f(volume, p, wp.Volume.LINEAR, grad)
expect_near(val, expected_val, 2.0e-4)
expect_near(grad[0], expected_gx, 2.0e-4)
expect_near(grad[1], expected_gy, 2.0e-4)
expect_near(grad[2], expected_gz, 2.0e-4)
val = wp.volume_sample_grad(volume, p, wp.Volume.LINEAR, grad, dtype=wp.float32)
expect_near(val, expected_val, 2.0e-4)
expect_near(grad[0], expected_gx, 2.0e-4)
expect_near(grad[1], expected_gy, 2.0e-4)
expect_near(grad[2], expected_gz, 2.0e-4)
@wp.kernel
def test_volume_sample_local_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
p = points[tid]
values[tid] = wp.volume_sample_f(volume, p, wp.Volume.LINEAR)
@wp.kernel
def test_volume_sample_grad_local_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32), case_num: int
):
tid = wp.tid()
p = points[tid]
grad = wp.vec3(0.0, 0.0, 0.0)
val = wp.volume_sample_grad_f(volume, p, wp.Volume.LINEAR, grad)
if case_num == 0:
values[tid] = val
elif case_num == 1:
values[tid] = grad[0]
elif case_num == 2:
values[tid] = grad[1]
elif case_num == 3:
values[tid] = grad[2]
@wp.kernel
def test_volume_sample_world_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
q = points[tid]
p = wp.volume_world_to_index(volume, q)
values[tid] = wp.volume_sample_f(volume, p, wp.Volume.LINEAR)
@wp.kernel
def test_volume_sample_grad_world_f_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32), case_num: int
):
tid = wp.tid()
q = points[tid]
p = wp.volume_world_to_index(volume, q)
grad = wp.vec3(0.0, 0.0, 0.0)
val = wp.volume_sample_grad_f(volume, p, wp.Volume.LINEAR, grad)
if case_num == 0:
values[tid] = val
elif case_num == 1:
values[tid] = grad[0]
elif case_num == 2:
values[tid] = grad[1]
elif case_num == 3:
values[tid] = grad[2]
# vec3f volume tests
@wp.kernel
def test_volume_lookup_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = wp.vec3(
p[0] + 2.0 * p[1] + 3.0 * p[2], 4.0 * p[0] + 5.0 * p[1] + 6.0 * p[2], 7.0 * p[0] + 8.0 * p[1] + 9.0 * p[2]
)
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
expected = wp.vec3(10.8, -4.13, 10.26)
i = int(p[0])
j = int(p[1])
k = int(p[2])
expect_eq(wp.volume_lookup_v(volume, i, j, k), expected)
expect_eq(wp.volume_lookup(volume, i, j, k, dtype=wp.vec3), expected)
@wp.kernel
def test_volume_sample_closest_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = round(p[0])
j = round(p[1])
k = round(p[2])
expected = wp.vec3(i + 2.0 * j + 3.0 * k, 4.0 * i + 5.0 * j + 6.0 * k, 7.0 * i + 8.0 * j + 9.0 * k)
if abs(i) > 10.0 or abs(j) > 10.0 or abs(k) > 10.0:
expected = wp.vec3(10.8, -4.13, 10.26)
expect_eq(wp.volume_sample_v(volume, p, wp.Volume.CLOSEST), expected)
expect_eq(wp.volume_sample(volume, p, wp.Volume.CLOSEST, dtype=wp.vec3), expected)
q = wp.volume_index_to_world(volume, p)
q_inv = wp.volume_world_to_index(volume, q)
expect_eq(p, q_inv)
@wp.kernel
def test_volume_sample_linear_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
expected = wp.vec3(
p[0] + 2.0 * p[1] + 3.0 * p[2], 4.0 * p[0] + 5.0 * p[1] + 6.0 * p[2], 7.0 * p[0] + 8.0 * p[1] + 9.0 * p[2]
)
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
expect_near(wp.volume_sample_v(volume, p, wp.Volume.LINEAR), expected, 2.0e-4)
expect_near(wp.volume_sample(volume, p, wp.Volume.LINEAR, dtype=wp.vec3), expected, 2.0e-4)
@wp.kernel
def test_volume_sample_grad_linear_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
if abs(p[0]) > 10.0 or abs(p[1]) > 10.0 or abs(p[2]) > 10.0:
return # not testing against background values
expected_val = wp.vec3(
p[0] + 2.0 * p[1] + 3.0 * p[2], 4.0 * p[0] + 5.0 * p[1] + 6.0 * p[2], 7.0 * p[0] + 8.0 * p[1] + 9.0 * p[2]
)
expected_grad = wp.mat33(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
grad = wp.mat33(0.0)
val = wp.volume_sample_grad(volume, p, wp.Volume.LINEAR, grad, dtype=wp.vec3)
expect_near(val, expected_val, 2.0e-4)
expect_near(grad[0], expected_grad[0], 2.0e-4)
expect_near(grad[1], expected_grad[1], 2.0e-4)
expect_near(grad[2], expected_grad[2], 2.0e-4)
@wp.kernel
def test_volume_sample_local_v_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
p = points[tid]
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_sample_v(volume, p, wp.Volume.LINEAR), ones)
@wp.kernel
def test_volume_sample_world_v_linear_values(
volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
q = points[tid]
p = wp.volume_world_to_index(volume, q)
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_sample_v(volume, p, wp.Volume.LINEAR), ones)
# int32 volume tests
@wp.kernel
def test_volume_lookup_i(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
expected = i * j * k
if abs(i) > 10 or abs(j) > 10 or abs(k) > 10:
expected = 10
expect_eq(wp.volume_lookup_i(volume, i, j, k), expected)
expect_eq(wp.volume_lookup(volume, i, j, k, dtype=wp.int32), expected)
@wp.kernel
def test_volume_sample_i(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = round(p[0])
j = round(p[1])
k = round(p[2])
expected = int(i * j * k)
if abs(i) > 10.0 or abs(j) > 10.0 or abs(k) > 10.0:
expected = 10
expect_eq(wp.volume_sample_i(volume, p), expected)
expect_eq(wp.volume_sample(volume, p, wp.Volume.CLOSEST, dtype=wp.int32), expected)
q = wp.volume_index_to_world(volume, p)
q_inv = wp.volume_world_to_index(volume, q)
expect_eq(p, q_inv)
# Index/world transformation tests
@wp.kernel
def test_volume_index_to_world(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=wp.float32),
grad_values: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
p = points[tid]
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_index_to_world(volume, p), ones)
grad_values[tid] = wp.volume_index_to_world_dir(volume, ones)
@wp.kernel
def test_volume_world_to_index(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=wp.float32),
grad_values: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
p = points[tid]
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_world_to_index(volume, p), ones)
grad_values[tid] = wp.volume_world_to_index_dir(volume, ones)
# Volume write tests
@wp.kernel
def test_volume_store_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store(volume, i, j, k, float(i + 100 * j + 10000 * k))
values[tid] = wp.volume_lookup_f(volume, i, j, k)
@wp.kernel
def test_volume_store_v(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store(volume, i, j, k, p)
values[tid] = wp.volume_lookup_v(volume, i, j, k)
@wp.kernel
def test_volume_store_i(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.int32)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store(volume, i, j, k, i + 100 * j + 10000 * k)
values[tid] = wp.volume_lookup_i(volume, i, j, k)
devices = get_test_devices()
rng = np.random.default_rng(101215)
# Note about the test grids:
# test_grid and test_int32_grid
# active region: [-10,10]^3
# values: v[i,j,k] = i * j * k
# voxel size: 0.25
#
# test_vec_grid
# active region: [-10,10]^3
# values: v[i,j,k] = (i + 2*j + 3*k, 4*i + 5*j + 6*k, 7*i + 8*j + 9*k)
# voxel size: 0.25
#
# torus
# index to world transformation:
# [0.1, 0, 0, 0]
# [0, 0, 0.1, 0]
# [0, 0.1, 0, 0]
# [1, 2, 3, 1]
# (-90 degrees rotation along X)
# voxel size: 0.1
volume_paths = {
"float": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_grid.nvdb")),
"int32": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_int32_grid.nvdb")),
"vec3f": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_vec_grid.nvdb")),
"index": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_index_grid.nvdb")),
"torus": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/torus.nvdb")),
"float_write": os.path.abspath(os.path.join(os.path.dirname(__file__), "assets/test_grid.nvdb")),
}
test_volume_tiles = (
np.array([[i, j, k] for i in range(-2, 2) for j in range(-2, 2) for k in range(-2, 2)], dtype=np.int32) * 8
)
volumes = {}
for value_type, path in volume_paths.items():
volumes[value_type] = {}
volume_data = open(path, "rb").read()
for device in devices:
try:
volume = wp.Volume.load_from_nvdb(volume_data, device)
except RuntimeError as e:
raise RuntimeError(f'Failed to load volume from "{path}" to {device} memory:\n{e}') from e
volumes[value_type][device.alias] = volume
axis = np.linspace(-1, 1, 3)
point_grid = np.array([[x, y, z] for x in axis for y in axis for z in axis], dtype=np.float32)
def test_volume_sample_linear_f_gradient(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
values = wp.array(np.zeros(1), dtype=wp.float32, device=device, requires_grad=True)
for test_case in points:
uvws = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
xyzs = wp.array(test_case * 0.25, dtype=wp.vec3, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_local_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, uvws, values],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_expected = np.array([y * z, x * z, x * y])
grad_computed = tape.gradients[uvws].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_world_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, xyzs, values],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_expected = np.array([y * z, x * z, x * y]) / 0.25
grad_computed = tape.gradients[xyzs].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
def test_volume_sample_grad_linear_f_gradient(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
values = wp.array(np.zeros(1), dtype=wp.float32, device=device, requires_grad=True)
for test_case in points:
uvws = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
xyzs = wp.array(test_case * 0.25, dtype=wp.vec3, device=device, requires_grad=True)
for case_num in range(4):
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_grad_local_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, uvws, values, case_num],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_computed = tape.gradients[uvws].numpy()[0]
if case_num == 0:
grad_expected = np.array([y * z, x * z, x * y])
elif case_num == 1:
grad_expected = np.array([0.0, z, y])
elif case_num == 2:
grad_expected = np.array([z, 0.0, x])
elif case_num == 3:
grad_expected = np.array([y, x, 0.0])
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape.zero()
for case_num in range(4):
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_grad_world_f_linear_values,
dim=1,
inputs=[volumes["float"][device.alias].id, xyzs, values, case_num],
device=device,
)
tape.backward(values)
x, y, z = test_case
grad_computed = tape.gradients[xyzs].numpy()[0]
if case_num == 0:
grad_expected = np.array([y * z, x * z, x * y]) / 0.25
elif case_num == 1:
grad_expected = np.array([0.0, z, y]) / 0.25
elif case_num == 2:
grad_expected = np.array([z, 0.0, x]) / 0.25
elif case_num == 3:
grad_expected = np.array([y, x, 0.0]) / 0.25
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape.zero()
def test_volume_sample_linear_v_gradient(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
values = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
for test_case in points:
uvws = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
xyzs = wp.array(test_case * 0.25, dtype=wp.vec3, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_local_v_linear_values,
dim=1,
inputs=[volumes["vec3f"][device.alias].id, uvws, values],
device=device,
)
tape.backward(values)
grad_expected = np.array([12.0, 15.0, 18.0])
grad_computed = tape.gradients[uvws].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_world_v_linear_values,
dim=1,
inputs=[volumes["vec3f"][device.alias].id, xyzs, values],
device=device,
)
tape.backward(values)
grad_expected = np.array([12.0, 15.0, 18.0]) / 0.25
grad_computed = tape.gradients[xyzs].numpy()[0]
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
def test_volume_transform_gradient(test, device):
values = wp.zeros(1, dtype=wp.float32, device=device, requires_grad=True)
grad_values = wp.zeros(1, dtype=wp.vec3, device=device)
test_points = rng.uniform(-10.0, 10.0, size=(10, 3))
for test_case in test_points:
points = wp.array(test_case, dtype=wp.vec3, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_index_to_world,
dim=1,
inputs=[volumes["torus"][device.alias].id, points, values, grad_values],
device=device,
)
tape.backward(values)
grad_computed = tape.gradients[points].numpy()
grad_expected = grad_values.numpy()
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
grad_computed = tape.gradients[points].numpy()
grad_expected = grad_values.numpy()
np.testing.assert_allclose(grad_computed, grad_expected, rtol=1e-4)
def test_volume_store(test, device):
values_ref = np.array([x + 100 * y + 10000 * z for x, y, z in point_grid])
points = wp.array(point_grid, dtype=wp.vec3, device=device)
values = wp.empty(len(point_grid), dtype=wp.float32, device=device)
wp.launch(
test_volume_store_f,
dim=len(point_grid),
inputs=[volumes["float_write"][device.alias].id, points, values],
device=device,
)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocation_f(test, device):
bg_value = -123.0
points_np = np.append(point_grid, [[8096, 8096, 8096]], axis=0)
values_ref = np.append(np.array([x + 100 * y + 10000 * z for x, y, z in point_grid]), bg_value)
volume = wp.Volume.allocate(min=[-11, -11, -11], max=[11, 11, 11], voxel_size=0.1, bg_value=bg_value, device=device)
points = wp.array(points_np, dtype=wp.vec3, device=device)
values = wp.empty(len(points_np), dtype=wp.float32, device=device)
wp.launch(test_volume_store_f, dim=len(points_np), inputs=[volume.id, points, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocation_v(test, device):
bg_value = (-1, 2.0, -3)
points_np = np.append(point_grid, [[8096, 8096, 8096]], axis=0)
values_ref = np.append(point_grid, [bg_value], axis=0)
volume = wp.Volume.allocate(min=[-11, -11, -11], max=[11, 11, 11], voxel_size=0.1, bg_value=bg_value, device=device)
points = wp.array(points_np, dtype=wp.vec3, device=device)
values = wp.empty(len(points_np), dtype=wp.vec3, device=device)
wp.launch(test_volume_store_v, dim=len(points_np), inputs=[volume.id, points, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocation_i(test, device):
bg_value = -123
points_np = np.append(point_grid, [[8096, 8096, 8096]], axis=0)
values_ref = np.append(np.array([x + 100 * y + 10000 * z for x, y, z in point_grid], dtype=np.int32), bg_value)
volume = wp.Volume.allocate(min=[-11, -11, -11], max=[11, 11, 11], voxel_size=0.1, bg_value=bg_value, device=device)
points = wp.array(points_np, dtype=wp.vec3, device=device)
values = wp.empty(len(points_np), dtype=wp.int32, device=device)
wp.launch(test_volume_store_i, dim=len(points_np), inputs=[volume.id, points, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_introspection(test, device):
for volume_names in ("float", "vec3f"):
with test.subTest(volume_names=volume_names):
volume = volumes[volume_names][device.alias]
tiles_actual = volume.get_tiles().numpy()
tiles_sorted = tiles_actual[np.lexsort(tiles_actual.T[::-1])]
voxel_size = np.array(volume.get_voxel_size())
np.testing.assert_equal(test_volume_tiles, tiles_sorted)
np.testing.assert_equal([0.25] * 3, voxel_size)
voxel_count = volume.get_voxel_count()
voxels_actual = volume.get_voxels().numpy()
assert voxel_count == voxels_actual.shape[0]
# Voxel coordinates should be unique
voxels_unique = np.unique(voxels_actual, axis=0)
assert voxel_count == voxels_unique.shape[0]
# Get back tiles from voxels, should match get_tiles()
voxel_tiles = 8 * (voxels_unique // 8)
voxel_tiles_sorted = voxel_tiles[np.lexsort(voxel_tiles.T[::-1])]
voxel_tiles_unique = np.unique(voxel_tiles_sorted, axis=0)
np.testing.assert_equal(voxel_tiles_unique, tiles_sorted)
def test_volume_multiple_grids(test, device):
volume = volumes["index"][device.alias]
volume_2 = volume.load_next_grid()
test.assertIsNotNone(volume_2)
test.assertNotEqual(volume.id, volume_2.id)
test.assertNotEqual(volume.get_voxel_count(), volume_2.get_voxel_count())
test.assertEqual(volume.get_grid_info().grid_count, volume_2.get_grid_info().grid_count)
test.assertEqual(volume.get_grid_info().grid_index + 1, volume_2.get_grid_info().grid_index)
volume_3 = volume_2.load_next_grid()
test.assertIsNone(volume_3)
def test_volume_feature_array(test, device):
volume = volumes["index"][device.alias]
test.assertEqual(volume.get_feature_array_count(), 1)
array = volume.feature_array(0, dtype=wp.uint64)
test.assertEqual(array.device, device)
test.assertEqual(array.dtype, wp.uint64)
# fVDB convention, data starts with array ndim + shape
np.testing.assert_equal(array.numpy()[0:4], [3, volume.get_voxel_count(), 2, 3])
@wp.kernel
def fill_leaf_values_kernel(volume: wp.uint64, ijk: wp.array2d(dtype=wp.int32), values: wp.array(dtype=Any)):
tid = wp.tid()
i = ijk[tid, 0]
j = ijk[tid, 1]
k = ijk[tid, 2]
expect_eq(tid, wp.volume_lookup_index(volume, i, j, k))
values[tid] = wp.volume_lookup(volume, i, j, k, dtype=values.dtype)
@wp.kernel
def test_volume_sample_index_kernel(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=Any),
background: wp.array(dtype=Any),
sampled_values: wp.array(dtype=Any),
):
tid = wp.tid()
p = points[tid]
ref = wp.volume_sample(volume, p, wp.Volume.LINEAR, dtype=values.dtype)
sampled_values[tid] = wp.volume_sample_index(volume, p, wp.Volume.LINEAR, values, background[0])
expect_eq(sampled_values[tid], ref)
@wp.kernel
def test_volume_sample_grad_index_kernel(
volume: wp.uint64,
points: wp.array(dtype=wp.vec3),
values: wp.array(dtype=Any),
background: wp.array(dtype=Any),
sampled_values: wp.array(dtype=Any),
sampled_grads: wp.array(dtype=Any),
):
tid = wp.tid()
p = points[tid]
ref_grad = sampled_grads.dtype()
ref = wp.volume_sample_grad(volume, p, wp.Volume.LINEAR, ref_grad, dtype=values.dtype)
grad = type(ref_grad)()
sampled_values[tid] = wp.volume_sample_grad_index(volume, p, wp.Volume.LINEAR, values, background[0], grad)
expect_eq(sampled_values[tid], ref)
expect_eq(grad[0], ref_grad[0])
expect_eq(grad[1], ref_grad[1])
expect_eq(grad[2], ref_grad[2])
sampled_grads[tid] = grad
def test_volume_sample_index(test, device):
points = rng.uniform(-10.0, 10.0, size=(100, 3))
points[0:10, 0] += 100.0 # ensure some points are over unallocated voxels
uvws = wp.array(points, dtype=wp.vec3, device=device)
bg_values = {
"float": 10.0,
"vec3f": wp.vec3(10.8, -4.13, 10.26),
}
grad_types = {
"float": wp.vec3,
"vec3f": wp.mat33,
}
for volume_names in ("float", "vec3f"):
with test.subTest(volume_names=volume_names):
volume = volumes[volume_names][device.alias]
ijk = volume.get_voxels()
values = wp.empty(shape=volume.get_voxel_count(), dtype=volume.dtype, device=device, requires_grad=True)
vid = wp.uint64(volume.id)
wp.launch(fill_leaf_values_kernel, dim=values.shape, inputs=[vid, ijk, values], device=device)
sampled_values = wp.empty(shape=points.shape[0], dtype=volume.dtype, device=device, requires_grad=True)
background = wp.array([bg_values[volume_names]], dtype=volume.dtype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(
test_volume_sample_index_kernel,
dim=points.shape[0],
inputs=[vid, uvws, values, background, sampled_values],
device=device,
)
sampled_values.grad.fill_(1.0)
tape.backward()
# test adjoint w.r.t voxel and background value arrays
# we should have sum(sampled_values) = sum(adj_values * values) + (adj_background * background)
sum_sampled_values = np.sum(sampled_values.numpy(), axis=0)
sum_values_adj = np.sum(values.numpy() * values.grad.numpy(), axis=0)
sum_background_adj = background.numpy()[0] * background.grad.numpy()[0]
np.testing.assert_allclose(sum_sampled_values, sum_values_adj + sum_background_adj, rtol=1.0e-3)
tape.reset()
sampled_grads = wp.empty(
shape=points.shape[0], dtype=grad_types[volume_names], device=device, requires_grad=True
)
with tape:
wp.launch(
test_volume_sample_grad_index_kernel,
dim=points.shape[0],
inputs=[vid, uvws, values, background, sampled_values, sampled_grads],
device=device,
)
sampled_values.grad.fill_(1.0)
tape.backward()
# we should have sum(sampled_values) = sum(adj_values * values) + (adj_background * background)
sum_sampled_values = np.sum(sampled_values.numpy(), axis=0)
sum_values_adj = np.sum(values.numpy() * values.grad.numpy(), axis=0)
sum_background_adj = background.numpy()[0] * background.grad.numpy()[0]
np.testing.assert_allclose(sum_sampled_values, sum_values_adj + sum_background_adj, rtol=1.0e-3)
tape.zero()
sampled_values.grad.fill_(0.0)
sampled_grads.grad.fill_(1.0)
tape.backward()
# we should have sum(sampled_grad, axes=(0, -1)) = sum(adj_values * values) + (adj_background * background)
sum_sampled_grads = np.sum(np.sum(sampled_grads.numpy(), axis=0), axis=-1)
sum_values_adj = np.sum(values.numpy() * values.grad.numpy(), axis=0)
sum_background_adj = background.numpy()[0] * background.grad.numpy()[0]
np.testing.assert_allclose(sum_sampled_grads, sum_values_adj + sum_background_adj, rtol=1.0e-3)
def test_volume_from_numpy(test, device):
# Volume.allocate_from_tiles() is only available with CUDA
mins = np.array([-3.0, -3.0, -3.0])
voxel_size = 0.2
maxs = np.array([3.0, 3.0, 3.0])
nums = np.ceil((maxs - mins) / (voxel_size)).astype(dtype=int)
center = np.array([0.0, 0.0, 0.0])
rad = 2.5
sphere_sdf_np = np.zeros(tuple(nums))
for x in range(nums[0]):
for y in range(nums[1]):
for z in range(nums[2]):
pos = mins + voxel_size * np.array([x, y, z])
dis = np.linalg.norm(pos - center)
sphere_sdf_np[x, y, z] = dis - rad
sphere_vdb = wp.Volume.load_from_numpy(sphere_sdf_np, mins, voxel_size, rad + 3.0 * voxel_size, device=device)
test.assertNotEqual(sphere_vdb.id, 0)
sphere_vdb_array = sphere_vdb.array()
test.assertEqual(sphere_vdb_array.dtype, wp.uint8)
test.assertIsNone(sphere_vdb_array.deleter)
class TestVolume(unittest.TestCase):
pass
add_function_test(
TestVolume, "test_volume_sample_linear_f_gradient", test_volume_sample_linear_f_gradient, devices=devices
)
add_function_test(
TestVolume, "test_volume_sample_grad_linear_f_gradient", test_volume_sample_grad_linear_f_gradient, devices=devices
)
add_function_test(
TestVolume, "test_volume_sample_linear_v_gradient", test_volume_sample_linear_v_gradient, devices=devices
)
add_function_test(TestVolume, "test_volume_transform_gradient", test_volume_transform_gradient, devices=devices)
add_function_test(TestVolume, "test_volume_store", test_volume_store, devices=devices)
add_function_test(
TestVolume, "test_volume_allocation_f", test_volume_allocation_f, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestVolume, "test_volume_allocation_v", test_volume_allocation_v, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestVolume, "test_volume_allocation_i", test_volume_allocation_i, devices=get_selected_cuda_test_devices()
)
add_function_test(TestVolume, "test_volume_introspection", test_volume_introspection, devices=devices)
add_function_test(
TestVolume, "test_volume_from_numpy", test_volume_from_numpy, devices=get_selected_cuda_test_devices()
)
add_function_test(TestVolume, "test_volume_multiple_grids", test_volume_multiple_grids, devices=devices)
add_function_test(TestVolume, "test_volume_feature_array", test_volume_feature_array, devices=devices)
add_function_test(TestVolume, "test_volume_sample_index", test_volume_sample_index, devices=devices)
points = {}
points_jittered = {}
for device in devices:
points_jittered_np = point_grid + rng.uniform(-0.5, 0.5, size=point_grid.shape)
points[device.alias] = wp.array(point_grid, dtype=wp.vec3, device=device)
points_jittered[device.alias] = wp.array(points_jittered_np, dtype=wp.vec3, device=device)
add_kernel_test(
TestVolume,
test_volume_lookup_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points[device.alias]],
devices=[device],
)
add_kernel_test(
TestVolume,
test_volume_sample_closest_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_linear_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_grad_linear_f,
dim=len(point_grid),
inputs=[volumes["float"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_lookup_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_closest_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_linear_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_grad_linear_v,
dim=len(point_grid),
inputs=[volumes["vec3f"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_lookup_i,
dim=len(point_grid),
inputs=[volumes["int32"][device.alias].id, points[device.alias]],
devices=[device.alias],
)
add_kernel_test(
TestVolume,
test_volume_sample_i,
dim=len(point_grid),
inputs=[volumes["int32"][device.alias].id, points_jittered[device.alias]],
devices=[device.alias],
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 33,909 | Python | 34.24948 | 120 | 0.605591 |
NVIDIA/warp/warp/tests/test_volume_write.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# Volume write tests
@wp.kernel
def test_volume_store_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
wp.volume_store_f(volume, i, j, k, float(i + 100 * j + 10000 * k))
@wp.kernel
def test_volume_readback_f(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)):
tid = wp.tid()
p = points[tid]
i = int(p[0])
j = int(p[1])
k = int(p[2])
values[tid] = wp.volume_lookup_f(volume, i, j, k)
@wp.kernel
def test_get_list_of_tiles(
volume: wp.uint64,
points_is: wp.array2d(dtype=wp.int32),
points_ws: wp.array(dtype=wp.vec3),
tiles_is: wp.array2d(dtype=wp.int32),
tiles_ws: wp.array2d(dtype=wp.int32),
):
tid = wp.tid()
tiles_is[tid, 0] = points_is[tid, 0]
tiles_is[tid, 1] = points_is[tid, 1]
tiles_is[tid, 2] = points_is[tid, 2]
q = wp.volume_world_to_index(volume, points_ws[tid])
tiles_ws[tid, 0] = int(q[0] / 8.0) * 8
tiles_ws[tid, 1] = int(q[1] / 8.0) * 8
tiles_ws[tid, 2] = int(q[2] / 8.0) * 8
@wp.kernel
def test_volume_tile_store_f(volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32)):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
wp.volume_store_f(volume, ii, jj, kk, float(100 * ii + 10 * jj + kk))
@wp.kernel
def test_volume_tile_store_ws_f(volume: wp.uint64, tiles: wp.array(dtype=wp.vec3)):
tid = wp.tid()
q = wp.volume_world_to_index(volume, tiles[tid])
ti = int(wp.round(q[0]))
tj = int(wp.round(q[1]))
tk = int(wp.round(q[2]))
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
wp.volume_store_f(volume, ii, jj, kk, float(100 * ii + 10 * jj + kk))
@wp.kernel
def test_volume_tile_readback_f(
volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32), values: wp.array(dtype=wp.float32)
):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
values[tid * 512 + r] = wp.volume_lookup_f(volume, ii, jj, kk)
@wp.kernel
def test_volume_tile_store_v(volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32)):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
wp.volume_store_v(volume, ii, jj, kk, wp.vec3(float(ii), float(jj), float(kk)))
@wp.kernel
def test_volume_tile_readback_v(volume: wp.uint64, tiles: wp.array2d(dtype=wp.int32), values: wp.array(dtype=wp.vec3)):
tid = wp.tid()
ti = tiles[tid, 0]
tj = tiles[tid, 1]
tk = tiles[tid, 2]
for r in range(512):
ii = ti + (r / 64) % 8
jj = tj + (r / 8) % 8
kk = tk + r % 8
values[tid * 512 + r] = wp.volume_lookup_v(volume, ii, jj, kk)
def test_volume_allocation(test, device):
voxel_size = 0.125
background_value = 123.456
translation = wp.vec3(-12.3, 4.56, -789)
axis = np.linspace(-11, 11, 23)
points_ref = np.array([[x, y, z] for x in axis for y in axis for z in axis])
values_ref = np.array([x + 100 * y + 10000 * z for x in axis for y in axis for z in axis])
num_points = len(points_ref)
bb_max = np.array([11, 11, 11])
volume_a = wp.Volume.allocate(
-bb_max,
bb_max,
voxel_size=voxel_size,
bg_value=background_value,
translation=translation,
device=device,
)
volume_b = wp.Volume.allocate(
-bb_max * voxel_size + translation,
bb_max * voxel_size + translation,
voxel_size=voxel_size,
bg_value=background_value,
translation=translation,
points_in_world_space=True,
device=device,
)
assert wp.types.types_equal(volume_a.dtype, wp.float32)
assert wp.types.types_equal(volume_b.dtype, wp.float32)
points = wp.array(points_ref, dtype=wp.vec3, device=device)
values_a = wp.empty(num_points, dtype=wp.float32, device=device)
values_b = wp.empty(num_points, dtype=wp.float32, device=device)
wp.launch(test_volume_store_f, dim=num_points, inputs=[volume_a.id, points], device=device)
wp.launch(test_volume_store_f, dim=num_points, inputs=[volume_b.id, points], device=device)
wp.launch(test_volume_readback_f, dim=num_points, inputs=[volume_a.id, points, values_a], device=device)
wp.launch(test_volume_readback_f, dim=num_points, inputs=[volume_b.id, points, values_b], device=device)
np.testing.assert_equal(values_a.numpy(), values_ref)
np.testing.assert_equal(values_b.numpy(), values_ref)
def test_volume_allocate_by_tiles_f(test, device):
voxel_size = 0.125
background_value = 123.456
translation = wp.vec3(-12.3, 4.56, -789)
num_tiles = 1000
rng = np.random.default_rng(101215)
tiles = rng.integers(-512, 512, size=(num_tiles, 3), dtype=np.int32)
points_is = tiles * 8 # points in index space
points_ws = points_is * voxel_size + translation # points in world space
values_ref = np.empty(num_tiles * 512)
for t in range(num_tiles):
ti, tj, tk = points_is[t]
for i in range(8):
for j in range(8):
for k in range(8):
values_ref[t * 512 + i * 64 + j * 8 + k] = float(100 * (ti + i) + 10 * (tj + j) + (tk + k))
points_is_d = wp.array(points_is, dtype=wp.int32, device=device)
points_ws_d = wp.array(points_ws, dtype=wp.vec3, device=device)
volume_a = wp.Volume.allocate_by_tiles(points_is_d, voxel_size, background_value, translation, device=device)
volume_b = wp.Volume.allocate_by_tiles(points_ws_d, voxel_size, background_value, translation, device=device)
assert wp.types.types_equal(volume_a.dtype, wp.float32)
assert wp.types.types_equal(volume_b.dtype, wp.float32)
values_a = wp.empty(num_tiles * 512, dtype=wp.float32, device=device)
values_b = wp.empty(num_tiles * 512, dtype=wp.float32, device=device)
wp.launch(test_volume_tile_store_f, dim=num_tiles, inputs=[volume_a.id, points_is_d], device=device)
wp.launch(test_volume_tile_store_ws_f, dim=num_tiles, inputs=[volume_b.id, points_ws_d], device=device)
wp.launch(test_volume_tile_readback_f, dim=num_tiles, inputs=[volume_a.id, points_is_d, values_a], device=device)
wp.launch(test_volume_tile_readback_f, dim=num_tiles, inputs=[volume_b.id, points_is_d, values_b], device=device)
np.testing.assert_equal(values_a.numpy(), values_ref)
np.testing.assert_equal(values_b.numpy(), values_ref)
def test_volume_allocate_by_tiles_v(test, device):
num_tiles = 1000
rng = np.random.default_rng(101215)
tiles = rng.integers(-512, 512, size=(num_tiles, 3), dtype=np.int32)
points_is = tiles * 8
values_ref = np.empty((len(tiles) * 512, 3))
for t in range(len(tiles)):
ti, tj, tk = points_is[t]
for i in range(8):
for j in range(8):
for k in range(8):
values_ref[t * 512 + i * 64 + j * 8 + k] = [ti + i, tj + j, tk + k]
points_d = wp.array(points_is, dtype=wp.int32, device=device)
volume = wp.Volume.allocate_by_tiles(points_d, 0.1, wp.vec3(1, 2, 3), device=device)
assert wp.types.types_equal(volume.dtype, wp.vec3)
values = wp.empty(len(points_d) * 512, dtype=wp.vec3, device=device)
wp.launch(test_volume_tile_store_v, dim=len(points_d), inputs=[volume.id, points_d], device=device)
wp.launch(test_volume_tile_readback_v, dim=len(points_d), inputs=[volume.id, points_d, values], device=device)
values_res = values.numpy()
np.testing.assert_equal(values_res, values_ref)
def test_volume_allocate_by_tiles_index(test, device):
num_tiles = 10
rng = np.random.default_rng(101215)
tiles = rng.integers(-512, 512, size=(num_tiles, 3), dtype=np.int32)
points_is = tiles * 8
points_d = wp.array(points_is, dtype=wp.int32, device=device)
volume = wp.Volume.allocate_by_tiles(points_d, 0.1, bg_value=None, device=device)
assert volume.is_index
vol_tiles = volume.get_tiles().numpy() / 8
vol_tile_sorted = vol_tiles[np.lexsort(vol_tiles.T[::-1])]
vol_tile_unique = np.unique(vol_tile_sorted, axis=0)
tile_sorted = tiles[np.lexsort(tiles.T[::-1])]
tile_unique = np.unique(tile_sorted, axis=0)
np.testing.assert_equal(tile_unique, vol_tile_unique)
def test_volume_allocation_from_voxels(test, device):
point_count = 387
rng = np.random.default_rng(101215)
# Create from world-space points
points = wp.array(rng.uniform(5.0, 10.0, size=(point_count, 3)), dtype=float, device=device)
volume = wp.Volume.allocate_by_voxels(
voxel_points=points, voxel_size=0.25, translation=(0.0, 5.0, 10.0), device=device
)
assert volume.is_index
test.assertNotEqual(volume.id, 0)
test.assertAlmostEqual(volume.get_voxel_size(), (0.25, 0.25, 0.25))
voxel_count = volume.get_voxel_count()
test.assertGreaterEqual(point_count, voxel_count)
test.assertGreaterEqual(voxel_count, 1)
voxels = volume.get_voxels()
# Check that world-to-index transform has been correctly applied
voxel_low = np.min(voxels.numpy(), axis=0)
voxel_up = np.max(voxels.numpy(), axis=0)
np.testing.assert_array_less([19, -1, -21], voxel_low)
np.testing.assert_array_less(voxel_up, [41, 21, 1])
# Recreate the volume from ijk coords
volume_from_ijk = wp.Volume.allocate_by_voxels(
voxel_points=voxels, voxel_size=0.25, translation=(0.0, 5.0, 10.0), device=device
)
assert volume_from_ijk.is_index
assert volume_from_ijk.get_voxel_count() == voxel_count
ijk_voxels = volume_from_ijk.get_voxels().numpy()
voxels = voxels.numpy()
voxel_sorted = voxels[np.lexsort(voxels.T[::-1])]
ijk_voxel_sorted = ijk_voxels[np.lexsort(ijk_voxels.T[::-1])]
np.testing.assert_equal(voxel_sorted, ijk_voxel_sorted)
devices = get_selected_cuda_test_devices()
class TestVolumeWrite(unittest.TestCase):
pass
add_function_test(TestVolumeWrite, "test_volume_allocation", test_volume_allocation, devices=devices)
add_function_test(TestVolumeWrite, "test_volume_allocate_by_tiles_f", test_volume_allocate_by_tiles_f, devices=devices)
add_function_test(TestVolumeWrite, "test_volume_allocate_by_tiles_v", test_volume_allocate_by_tiles_v, devices=devices)
add_function_test(
TestVolumeWrite, "test_volume_allocate_by_tiles_index", test_volume_allocate_by_tiles_index, devices=devices
)
add_function_test(
TestVolumeWrite,
"test_volume_allocation_from_voxels",
test_volume_allocation_from_voxels,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 11,531 | Python | 33.017699 | 119 | 0.635678 |
NVIDIA/warp/warp/tests/test_struct.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.fem import Sample as StructFromAnotherModule
from warp.tests.unittest_utils import *
@wp.struct
class Model:
dt: float
gravity: wp.vec3
m: wp.array(dtype=float)
@wp.struct
class State:
x: wp.array(dtype=wp.vec3)
v: wp.array(dtype=wp.vec3)
@wp.kernel
def kernel_step(state_in: State, state_out: State, model: Model):
i = wp.tid()
state_out.v[i] = state_in.v[i] + model.gravity / model.m[i] * model.dt
state_out.x[i] = state_in.x[i] + state_out.v[i] * model.dt
@wp.kernel
def kernel_step_with_copy(state_in: State, state_out: State, model: Model):
i = wp.tid()
model_rescaled = Model(1.0, model.gravity / model.m[i] * model.dt, model.m)
state_out_copy = State(state_out.x, state_out.v)
state_out_copy.v[i] = state_in.v[i] + model_rescaled.gravity
state_out_copy.x[i] = state_in.x[i] + state_out_copy.v[i] * model.dt
def test_step(test, device):
rng = np.random.default_rng(123)
dim = 5
dt = 0.01
gravity = np.array([0, 0, -9.81])
m = np.ones(dim)
m_model = wp.array(m, dtype=float, device=device)
model = Model()
model.m = m_model
model.dt = dt
model.gravity = wp.vec3(0, 0, -9.81)
x = rng.normal(size=(dim, 3))
v = rng.normal(size=(dim, 3))
x_expected = x + (v + gravity / m[:, None] * dt) * dt
x_in = wp.array(x, dtype=wp.vec3, device=device)
v_in = wp.array(v, dtype=wp.vec3, device=device)
state_in = State()
state_in.x = x_in
state_in.v = v_in
state_out = State()
state_out.x = wp.empty_like(x_in)
state_out.v = wp.empty_like(v_in)
for step_kernel in [kernel_step, kernel_step_with_copy]:
with CheckOutput(test):
wp.launch(step_kernel, dim=dim, inputs=[state_in, state_out, model], device=device)
assert_np_equal(state_out.x.numpy(), x_expected, tol=1e-6)
@wp.kernel
def kernel_loss(x: wp.array(dtype=wp.vec3), loss: wp.array(dtype=float)):
i = wp.tid()
wp.atomic_add(loss, 0, x[i][0] * x[i][0] + x[i][1] * x[i][1] + x[i][2] * x[i][2])
def test_step_grad(test, device):
rng = np.random.default_rng(123)
dim = 5
dt = 0.01
gravity = np.array([0, 0, -9.81])
m = rng.random(size=dim) + 0.1
m_model = wp.array(m, dtype=float, device=device, requires_grad=True)
model = Model()
model.m = m_model
model.dt = dt
model.gravity = wp.vec3(0, 0, -9.81)
x = rng.normal(size=(dim, 3))
v = rng.normal(size=(dim, 3))
x_in = wp.array(x, dtype=wp.vec3, device=device, requires_grad=True)
v_in = wp.array(v, dtype=wp.vec3, device=device, requires_grad=True)
state_in = State()
state_in.x = x_in
state_in.v = v_in
state_out = State()
state_out.x = wp.empty_like(x_in, requires_grad=True)
state_out.v = wp.empty_like(v_in, requires_grad=True)
loss = wp.empty(1, dtype=float, device=device, requires_grad=True)
for step_kernel in [kernel_step, kernel_step_with_copy]:
tape = wp.Tape()
with tape:
wp.launch(step_kernel, dim=dim, inputs=[state_in, state_out, model], device=device)
wp.launch(kernel_loss, dim=dim, inputs=[state_out.x, loss], device=device)
tape.backward(loss)
dl_dx = 2 * state_out.x.numpy()
dl_dv = dl_dx * dt
dv_dm = -gravity * dt / m[:, None] ** 2
dl_dm = (dl_dv * dv_dm).sum(-1)
assert_np_equal(state_out.x.grad.numpy(), dl_dx, tol=1e-6)
assert_np_equal(state_in.x.grad.numpy(), dl_dx, tol=1e-6)
assert_np_equal(state_out.v.grad.numpy(), dl_dv, tol=1e-6)
assert_np_equal(state_in.v.grad.numpy(), dl_dv, tol=1e-6)
assert_np_equal(model.m.grad.numpy(), dl_dm, tol=1e-6)
tape.zero()
assert state_out.x.grad.numpy().sum() == 0.0
assert state_in.x.grad.numpy().sum() == 0.0
assert state_out.v.grad.numpy().sum() == 0.0
assert state_in.v.grad.numpy().sum() == 0.0
assert model.m.grad.numpy().sum() == 0.0
@wp.struct
class Empty:
pass
@wp.kernel
def test_empty(input: Empty):
tid = wp.tid()
@wp.struct
class Uninitialized:
data: wp.array(dtype=int)
@wp.kernel
def test_uninitialized(input: Uninitialized):
tid = wp.tid()
@wp.struct
class Baz:
data: wp.array(dtype=int)
z: wp.vec3
@wp.struct
class Bar:
baz: Baz
y: float
@wp.struct
class Foo:
bar: Bar
x: int
@wp.kernel
def kernel_nested_struct(foo: Foo):
tid = wp.tid()
foo.bar.baz.data[tid] = (
foo.bar.baz.data[tid] + foo.x + int(foo.bar.y * 100.0) + int(wp.length_sq(foo.bar.baz.z)) + tid * 2
)
def test_nested_struct(test, device):
dim = 3
foo = Foo()
foo.bar = Bar()
foo.bar.baz = Baz()
foo.bar.baz.data = wp.zeros(dim, dtype=int, device=device)
foo.bar.baz.z = wp.vec3(1, 2, 3)
foo.bar.y = 1.23
foo.x = 123
wp.launch(kernel_nested_struct, dim=dim, inputs=[foo], device=device)
assert_array_equal(
foo.bar.baz.data,
wp.array((260, 262, 264), dtype=int, device=device),
)
def test_struct_attribute_error(test, device):
@wp.kernel
def kernel(foo: Foo):
_ = foo.nonexisting
with test.assertRaisesRegex(AttributeError, r"`nonexisting` is not an attribute of 'foo' \([\w.]+\.Foo\)$"):
wp.launch(
kernel,
dim=1,
inputs=[Foo()],
device=device,
)
@wp.kernel
def test_struct_instantiate(data: wp.array(dtype=int)):
baz = Baz(data, wp.vec3(0.0, 0.0, 26.0))
bar = Bar(baz, 25.0)
foo = Foo(bar, 24)
wp.expect_eq(foo.x, 24)
wp.expect_eq(foo.bar.y, 25.0)
wp.expect_eq(foo.bar.baz.z[2], 26.0)
wp.expect_eq(foo.bar.baz.data[0], 1)
@wp.struct
class MathThings:
v1: wp.vec3
v2: wp.vec3
v3: wp.vec3
m1: wp.mat22
m2: wp.mat22
m3: wp.mat22
m4: wp.mat22
m5: wp.mat22
m6: wp.mat22
@wp.kernel
def check_math_conversions(s: MathThings):
wp.expect_eq(s.v1, wp.vec3(1.0, 2.0, 3.0))
wp.expect_eq(s.v2, wp.vec3(10.0, 20.0, 30.0))
wp.expect_eq(s.v3, wp.vec3(100.0, 200.0, 300.0))
wp.expect_eq(s.m1, wp.mat22(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(s.m2, wp.mat22(10.0, 20.0, 30.0, 40.0))
wp.expect_eq(s.m3, wp.mat22(100.0, 200.0, 300.0, 400.0))
wp.expect_eq(s.m4, wp.mat22(1.0, 2.0, 3.0, 4.0))
wp.expect_eq(s.m5, wp.mat22(10.0, 20.0, 30.0, 40.0))
wp.expect_eq(s.m6, wp.mat22(100.0, 200.0, 300.0, 400.0))
def test_struct_math_conversions(test, device):
s = MathThings()
# test assigning various containers to vector and matrix attributes
s.v1 = (1, 2, 3)
s.v2 = [10, 20, 30]
s.v3 = np.array([100, 200, 300])
# 2d containers for matrices
s.m1 = ((1, 2), (3, 4))
s.m2 = [[10, 20], [30, 40]]
s.m3 = np.array([[100, 200], [300, 400]])
# 1d containers for matrices
s.m4 = (1, 2, 3, 4)
s.m5 = [10, 20, 30, 40]
s.m6 = np.array([100, 200, 300, 400])
wp.launch(check_math_conversions, dim=1, inputs=[s], device=device)
@wp.struct
class TestData:
value: wp.int32
@wp.func
def GetTestData(value: wp.int32):
return TestData(value * 2)
@wp.kernel
def test_return_struct(data: wp.array(dtype=wp.int32)):
tid = wp.tid()
data[tid] = GetTestData(tid).value
wp.expect_eq(data[tid], tid * 2)
@wp.struct
class ReturnStruct:
a: int
b: int
@wp.func
def test_return_func():
a = ReturnStruct(1, 2)
return a
@wp.kernel
def test_return():
t = test_return_func()
wp.expect_eq(t.a, 1)
wp.expect_eq(t.b, 2)
@wp.struct
class DefaultAttribNested:
f: float
@wp.struct
class DefaultAttribStruct:
i: int
d: wp.float64
v: wp.vec3
m: wp.mat22
a: wp.array(dtype=wp.int32)
s: DefaultAttribNested
@wp.func
def check_default_attributes_func(data: DefaultAttribStruct):
wp.expect_eq(data.i, wp.int32(0))
wp.expect_eq(data.d, wp.float64(0))
wp.expect_eq(data.v, wp.vec3(0.0, 0.0, 0.0))
wp.expect_eq(data.m, wp.mat22(0.0, 0.0, 0.0, 0.0))
wp.expect_eq(data.a.shape[0], 0)
wp.expect_eq(data.s.f, wp.float32(0.0))
@wp.kernel
def check_default_attributes_kernel(data: DefaultAttribStruct):
check_default_attributes_func(data)
# check structs default initialized in kernels correctly
@wp.kernel
def test_struct_default_attributes_kernel():
s = DefaultAttribStruct()
check_default_attributes_func(s)
@wp.struct
class MutableStruct:
param1: int
param2: float
@wp.kernel
def test_struct_mutate_attributes_kernel():
t = MutableStruct()
t.param1 = 1
t.param2 = 1.1
wp.expect_eq(t.param1, 1)
wp.expect_eq(t.param2, 1.1)
@wp.struct
class InnerStruct:
i: int
@wp.struct
class ArrayStruct:
array: wp.array(dtype=InnerStruct)
@wp.kernel
def struct2_reader(test: ArrayStruct):
k = wp.tid()
wp.expect_eq(k + 1, test.array[k].i)
def test_nested_array_struct(test, device):
var1 = InnerStruct()
var1.i = 1
var2 = InnerStruct()
var2.i = 2
struct = ArrayStruct()
struct.array = wp.array([var1, var2], dtype=InnerStruct, device=device)
wp.launch(struct2_reader, dim=2, inputs=[struct], device=device)
@wp.struct
class VecStruct:
value: wp.vec3
@wp.struct
class Bar2:
z: wp.array(dtype=float)
@wp.struct
class Foo2:
x: wp.array(dtype=float)
y: Bar2
def test_convert_to_device(test, device):
foo = Foo2()
foo.x = wp.array((1.23, 2.34), dtype=float, device=device)
foo.y = Bar2()
foo.y.z = wp.array((3.45, 4.56), dtype=float, device=device)
if device.is_cpu and wp.is_cuda_available():
dst_device = "cuda:0"
elif device.is_cuda and wp.is_cpu_available():
dst_device = "cpu"
else:
return
result = foo.to(dst_device)
assert result.x.device == dst_device
assert result.y.z.device == dst_device
@wp.struct
class EmptyNest1:
a: Empty
z: int
@wp.struct
class EmptyNest2:
a: Empty
b: Empty
z: int
@wp.struct
class EmptyNest3:
a: Empty
b: Empty
c: Empty
z: int
@wp.struct
class EmptyNest4:
a: Empty
b: Empty
c: Empty
d: Empty
z: int
@wp.struct
class EmptyNest5:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
z: int
@wp.struct
class EmptyNest6:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
f: Empty
z: int
@wp.struct
class EmptyNest7:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
f: Empty
g: Empty
z: int
@wp.struct
class EmptyNest8:
a: Empty
b: Empty
c: Empty
d: Empty
e: Empty
f: Empty
g: Empty
h: Empty
z: int
@wp.kernel
def empty_nest_kernel(s: Any):
wp.expect_eq(s.z, 42)
wp.overload(empty_nest_kernel, [EmptyNest1])
wp.overload(empty_nest_kernel, [EmptyNest2])
wp.overload(empty_nest_kernel, [EmptyNest3])
wp.overload(empty_nest_kernel, [EmptyNest4])
wp.overload(empty_nest_kernel, [EmptyNest5])
wp.overload(empty_nest_kernel, [EmptyNest6])
wp.overload(empty_nest_kernel, [EmptyNest7])
wp.overload(empty_nest_kernel, [EmptyNest8])
def test_nested_empty_struct(test, device):
with wp.ScopedDevice(device):
e1 = EmptyNest1()
e1.z = 42
e2 = EmptyNest2()
e2.z = 42
e3 = EmptyNest3()
e3.z = 42
e4 = EmptyNest4()
e4.z = 42
e5 = EmptyNest5()
e5.z = 42
e6 = EmptyNest6()
e6.z = 42
e7 = EmptyNest7()
e7.z = 42
e8 = EmptyNest8()
e8.z = 42
wp.launch(empty_nest_kernel, dim=1, inputs=[e1])
wp.launch(empty_nest_kernel, dim=1, inputs=[e2])
wp.launch(empty_nest_kernel, dim=1, inputs=[e3])
wp.launch(empty_nest_kernel, dim=1, inputs=[e4])
wp.launch(empty_nest_kernel, dim=1, inputs=[e5])
wp.launch(empty_nest_kernel, dim=1, inputs=[e6])
wp.launch(empty_nest_kernel, dim=1, inputs=[e7])
wp.launch(empty_nest_kernel, dim=1, inputs=[e8])
wp.synchronize_device()
@wp.struct
class DependentModuleImport_A:
s: StructFromAnotherModule
@wp.struct
class DependentModuleImport_B:
s: StructFromAnotherModule
@wp.struct
class DependentModuleImport_C:
a: DependentModuleImport_A
b: DependentModuleImport_B
@wp.kernel
def test_dependent_module_import(c: DependentModuleImport_C):
wp.tid() # nop, we're just testing codegen
devices = get_test_devices()
class TestStruct(unittest.TestCase):
# check structs default initialized in Python correctly
def test_struct_default_attributes_python(self):
s = DefaultAttribStruct()
wp.launch(check_default_attributes_kernel, dim=1, inputs=[s])
def test_nested_vec_assignment(self):
v = VecStruct()
v.value[0] = 1.0
v.value[1] = 2.0
v.value[2] = 3.0
arr = wp.array([v], dtype=VecStruct)
expected = np.array(([1.0, 2.0, 3.0],))
assert np.all(arr.numpy().tolist() == expected)
add_function_test(TestStruct, "test_step", test_step, devices=devices)
add_function_test(TestStruct, "test_step_grad", test_step_grad, devices=devices)
add_kernel_test(TestStruct, kernel=test_empty, name="test_empty", dim=1, inputs=[Empty()], devices=devices)
add_kernel_test(
TestStruct,
kernel=test_uninitialized,
name="test_uninitialized",
dim=1,
inputs=[Uninitialized()],
devices=devices,
)
add_kernel_test(TestStruct, kernel=test_return, name="test_return", dim=1, inputs=[], devices=devices)
add_function_test(TestStruct, "test_nested_struct", test_nested_struct, devices=devices)
add_function_test(TestStruct, "test_nested_array_struct", test_nested_array_struct, devices=devices)
add_function_test(TestStruct, "test_convert_to_device", test_convert_to_device, devices=devices)
add_function_test(TestStruct, "test_nested_empty_struct", test_nested_empty_struct, devices=devices)
add_function_test(TestStruct, "test_struct_math_conversions", test_struct_math_conversions, devices=devices)
add_kernel_test(
TestStruct,
name="test_struct_default_attributes",
kernel=test_struct_default_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
add_kernel_test(
TestStruct,
name="test_struct_mutate_attributes",
kernel=test_struct_mutate_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
add_kernel_test(
TestStruct,
kernel=test_uninitialized,
name="test_uninitialized",
dim=1,
inputs=[Uninitialized()],
devices=devices,
)
add_kernel_test(TestStruct, kernel=test_return, name="test_return", dim=1, inputs=[], devices=devices)
add_function_test(TestStruct, "test_nested_struct", test_nested_struct, devices=devices)
add_function_test(TestStruct, "test_nested_array_struct", test_nested_array_struct, devices=devices)
add_function_test(TestStruct, "test_nested_empty_struct", test_nested_empty_struct, devices=devices)
add_function_test(TestStruct, "test_struct_math_conversions", test_struct_math_conversions, devices=devices)
add_kernel_test(
TestStruct,
name="test_struct_default_attributes",
kernel=test_struct_default_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
add_kernel_test(
TestStruct,
name="test_struct_mutate_attributes",
kernel=test_struct_mutate_attributes_kernel,
dim=1,
inputs=[],
devices=devices,
)
for device in devices:
add_kernel_test(
TestStruct,
kernel=test_struct_instantiate,
name="test_struct_instantiate",
dim=1,
inputs=[wp.array([1], dtype=int, device=device)],
devices=[device],
)
add_kernel_test(
TestStruct,
kernel=test_return_struct,
name="test_return_struct",
dim=1,
inputs=[wp.zeros(10, dtype=int, device=device)],
devices=[device],
)
add_kernel_test(
TestStruct,
kernel=test_dependent_module_import,
name="test_dependent_module_import",
dim=1,
inputs=[DependentModuleImport_C()],
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 16,581 | Python | 22.38787 | 112 | 0.628189 |
NVIDIA/warp/warp/tests/test_import.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
import warp.tests.test_func as test_func
from warp.tests.unittest_utils import *
@wp.kernel
def test_import_func():
# test a cross-module function reference is resolved correctly
x = test_func.sqr(2.0)
y = test_func.cube(2.0)
wp.expect_eq(x, 4.0)
wp.expect_eq(y, 8.0)
devices = get_test_devices()
class TestImport(unittest.TestCase):
pass
add_kernel_test(TestImport, kernel=test_import_func, name="test_import_func", dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,027 | Python | 26.052631 | 101 | 0.740993 |
NVIDIA/warp/warp/tests/test_mempool.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
def get_device_pair_with_mempool_access_support():
devices = wp.get_cuda_devices()
for target_device in devices:
for peer_device in devices:
if target_device != peer_device:
if wp.is_mempool_access_supported(target_device, peer_device):
return (target_device, peer_device)
return None
def get_device_pair_without_mempool_access_support():
devices = wp.get_cuda_devices()
for target_device in devices:
for peer_device in devices:
if target_device != peer_device:
if not wp.is_mempool_access_supported(target_device, peer_device):
return (target_device, peer_device)
return None
def test_mempool_release_threshold(test, device):
device = wp.get_device(device)
assert device.is_mempool_supported
test.assertEqual(wp.is_mempool_supported(device), device.is_mempool_supported)
was_enabled = wp.is_mempool_enabled(device)
# toggle
wp.set_mempool_enabled(device, not was_enabled)
test.assertEqual(wp.is_mempool_enabled(device), not was_enabled)
# restore
wp.set_mempool_enabled(device, was_enabled)
test.assertEqual(wp.is_mempool_enabled(device), was_enabled)
saved_threshold = wp.get_mempool_release_threshold(device)
# set new absolute threshold
wp.set_mempool_release_threshold(device, 42000)
test.assertEqual(wp.get_mempool_release_threshold(device), 42000)
# set new fractional threshold
wp.set_mempool_release_threshold(device, 0.5)
test.assertEqual(wp.get_mempool_release_threshold(device), int(0.5 * device.total_memory))
# restore threshold
wp.set_mempool_release_threshold(device, saved_threshold)
test.assertEqual(wp.get_mempool_release_threshold(device), saved_threshold)
def test_mempool_exceptions(test, device):
device = wp.get_device(device)
assert not device.is_mempool_supported
if device.is_cuda:
expected_error = RuntimeError
else:
expected_error = ValueError
with test.assertRaises(expected_error):
wp.get_mempool_release_threshold(device)
with test.assertRaises(expected_error):
wp.set_mempool_release_threshold(device, 42000)
def test_mempool_access_self(test, device):
device = wp.get_device(device)
assert device.is_mempool_supported
# setting mempool access to self is a no-op
wp.set_mempool_access_enabled(device, device, True)
wp.set_mempool_access_enabled(device, device, False)
# should always be enabled
enabled = wp.is_mempool_access_enabled(device, device)
test.assertTrue(enabled)
@unittest.skipUnless(get_device_pair_with_mempool_access_support(), "Requires devices with mempool access support")
def test_mempool_access(test, _):
target_device, peer_device = get_device_pair_with_mempool_access_support()
was_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
if was_enabled:
# try disabling
wp.set_mempool_access_enabled(target_device, peer_device, False)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertFalse(is_enabled)
# try re-enabling
wp.set_mempool_access_enabled(target_device, peer_device, True)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertTrue(is_enabled)
else:
# try enabling
wp.set_mempool_access_enabled(target_device, peer_device, True)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertTrue(is_enabled)
# try re-disabling
wp.set_mempool_access_enabled(target_device, peer_device, False)
is_enabled = wp.is_mempool_access_enabled(target_device, peer_device)
test.assertFalse(is_enabled)
@unittest.skipUnless(
get_device_pair_without_mempool_access_support(), "Requires devices without mempool access support"
)
def test_mempool_access_exceptions_unsupported(test, _):
# get a CUDA device pair without mempool access support
target_device, peer_device = get_device_pair_without_mempool_access_support()
# querying is ok, but must return False
test.assertFalse(wp.is_mempool_access_enabled(target_device, peer_device))
# enabling should raise RuntimeError
with test.assertRaises(RuntimeError):
wp.set_mempool_access_enabled(target_device, peer_device, True)
# disabling should not raise an error
wp.set_mempool_access_enabled(target_device, peer_device, False)
@unittest.skipUnless(wp.is_cpu_available() and wp.is_cuda_available(), "Requires both CUDA and CPU devices")
def test_mempool_access_exceptions_cpu(test, _):
# querying is ok, but must return False
test.assertFalse(wp.is_mempool_access_enabled("cuda:0", "cpu"))
test.assertFalse(wp.is_mempool_access_enabled("cpu", "cuda:0"))
# enabling should raise ValueError
with test.assertRaises(ValueError):
wp.set_mempool_access_enabled("cpu", "cuda:0", True)
with test.assertRaises(ValueError):
wp.set_mempool_access_enabled("cuda:0", "cpu", True)
# disabling should not raise an error
wp.set_mempool_access_enabled("cpu", "cuda:0", False)
wp.set_mempool_access_enabled("cuda:0", "cpu", False)
class TestMempool(unittest.TestCase):
pass
devices_with_mempools = [d for d in get_test_devices() if d.is_mempool_supported]
devices_without_mempools = [d for d in get_test_devices() if not d.is_mempool_supported]
# test devices with mempool support
add_function_test(
TestMempool, "test_mempool_release_threshold", test_mempool_release_threshold, devices=devices_with_mempools
)
add_function_test(TestMempool, "test_mempool_access_self", test_mempool_access_self, devices=devices_with_mempools)
# test devices without mempool support
add_function_test(TestMempool, "test_mempool_exceptions", test_mempool_exceptions, devices=devices_without_mempools)
# mempool access tests
add_function_test(TestMempool, "test_mempool_access", test_mempool_access)
# mempool access exceptions
add_function_test(TestMempool, "test_mempool_access_exceptions_unsupported", test_mempool_access_exceptions_unsupported)
add_function_test(TestMempool, "test_mempool_access_exceptions_cpu", test_mempool_access_exceptions_cpu)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,901 | Python | 35.909091 | 120 | 0.721055 |
NVIDIA/warp/warp/tests/test_runlength_encode.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from functools import partial
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import runlength_encode
def test_runlength_encode_int(test, device, n):
rng = np.random.default_rng(123)
values_np = np.sort(rng.integers(-10, high=10, size=n, dtype=int))
unique_values_np, unique_counts_np = np.unique(values_np, return_counts=True)
values = wp.array(values_np, device=device, dtype=int)
unique_values = wp.empty_like(values)
unique_counts = wp.empty_like(values)
run_count = runlength_encode(values, unique_values, unique_counts)
test.assertEqual(run_count, len(unique_values_np))
assert_np_equal(unique_values.numpy()[:run_count], unique_values_np[:run_count])
assert_np_equal(unique_counts.numpy()[:run_count], unique_counts_np[:run_count])
def test_runlength_encode_error_insufficient_storage(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty(1, dtype=int, device=device)
run_lengths = wp.empty(123, dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Output array storage sizes must be at least equal to value_count$",
):
runlength_encode(values, run_values, run_lengths)
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty(123, dtype=int, device="cpu")
run_lengths = wp.empty(1, dtype=int, device="cpu")
with test.assertRaisesRegex(
RuntimeError,
r"Output array storage sizes must be at least equal to value_count$",
):
runlength_encode(values, run_values, run_lengths)
def test_runlength_encode_error_dtypes_mismatch(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty(123, dtype=float, device=device)
run_lengths = wp.empty_like(values, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"values and run_values data types do not match$",
):
runlength_encode(values, run_values, run_lengths)
def test_runlength_encode_error_run_length_unsupported_dtype(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty(123, dtype=int, device=device)
run_lengths = wp.empty(123, dtype=float, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"run_lengths array must be of type int32$",
):
runlength_encode(values, run_values, run_lengths)
def test_runlength_encode_error_run_count_unsupported_dtype(test, device):
values = wp.zeros(123, dtype=int, device=device)
run_values = wp.empty_like(values, device=device)
run_lengths = wp.empty_like(values, device=device)
run_count = wp.empty(shape=(1,), dtype=float, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"run_count array must be of type int32$",
):
runlength_encode(values, run_values, run_lengths, run_count=run_count)
def test_runlength_encode_error_unsupported_dtype(test, device):
values = wp.zeros(123, dtype=float, device=device)
run_values = wp.empty(123, dtype=float, device=device)
run_lengths = wp.empty(123, dtype=int, device=device)
with test.assertRaisesRegex(
RuntimeError,
r"Unsupported data type$",
):
runlength_encode(values, run_values, run_lengths)
devices = get_test_devices()
class TestRunlengthEncode(unittest.TestCase):
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_runlength_encode_error_devices_mismatch(self):
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cuda:0")
run_lengths = wp.empty_like(values, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
runlength_encode(values, run_values, run_lengths)
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cpu")
run_lengths = wp.empty_like(values, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
runlength_encode(values, run_values, run_lengths)
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cuda:0")
run_lengths = wp.empty_like(values, device="cpu")
with self.assertRaisesRegex(
RuntimeError,
r"Array storage devices do not match$",
):
runlength_encode(values, run_values, run_lengths)
@unittest.skipUnless(wp.is_cuda_available(), "Requires CUDA")
def test_runlength_encode_error_run_count_device_mismatch(self):
values = wp.zeros(123, dtype=int, device="cpu")
run_values = wp.empty_like(values, device="cpu")
run_lengths = wp.empty_like(values, device="cpu")
run_count = wp.empty(shape=(1,), dtype=int, device="cuda:0")
with self.assertRaisesRegex(
RuntimeError,
r"run_count storage device does not match other arrays$",
):
runlength_encode(values, run_values, run_lengths, run_count=run_count)
add_function_test(
TestRunlengthEncode, "test_runlength_encode_int", partial(test_runlength_encode_int, n=100), devices=devices
)
add_function_test(
TestRunlengthEncode, "test_runlength_encode_empty", partial(test_runlength_encode_int, n=0), devices=devices
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_insufficient_storage",
test_runlength_encode_error_insufficient_storage,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_dtypes_mismatch",
test_runlength_encode_error_dtypes_mismatch,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_run_length_unsupported_dtype",
test_runlength_encode_error_run_length_unsupported_dtype,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_run_count_unsupported_dtype",
test_runlength_encode_error_run_count_unsupported_dtype,
devices=devices,
)
add_function_test(
TestRunlengthEncode,
"test_runlength_encode_error_unsupported_dtype",
test_runlength_encode_error_unsupported_dtype,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,954 | Python | 35.798942 | 112 | 0.688668 |
NVIDIA/warp/warp/tests/test_print.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_print_kernel():
wp.print(1.0)
wp.print("this is a string")
wp.printf("this is a float %f\n", 457.5)
wp.printf("this is an int %d\n", 123)
def test_print(test, device):
wp.load_module(device=device)
capture = StdOutCapture()
capture.begin()
wp.launch(kernel=test_print_kernel, dim=1, inputs=[], device=device)
wp.synchronize_device(device)
s = capture.end()
# We skip the win32 comparison for now since the capture sometimes is an empty string
if sys.platform != "win32":
test.assertRegex(
s,
rf"1{os.linesep}"
rf"this is a string{os.linesep}"
rf"this is a float 457\.500000{os.linesep}"
rf"this is an int 123",
)
class TestPrint(unittest.TestCase):
pass
devices = get_test_devices()
add_function_test(TestPrint, "test_print", test_print, devices=devices, check_output=False)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,536 | Python | 27.999999 | 91 | 0.686198 |
NVIDIA/warp/warp/tests/test_lerp.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from dataclasses import dataclass
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@dataclass
class TestData:
a: Any
b: Any
t: float
expected: Any
expected_adj_a: Any = None
expected_adj_b: Any = None
expected_adj_t: float = None
def check_backwards(self):
return self.expected_adj_a is not None and self.expected_adj_b is not None and self.expected_adj_t is not None
TEST_DATA = {
wp.float32: (
TestData(
a=1.0,
b=5.0,
t=0.75,
expected=4.0,
expected_adj_a=0.25,
expected_adj_b=0.75,
expected_adj_t=4.0,
),
TestData(
a=-2.0,
b=5.0,
t=0.25,
expected=-0.25,
expected_adj_a=0.75,
expected_adj_b=0.25,
expected_adj_t=7.0,
),
TestData(
a=1.23,
b=2.34,
t=0.5,
expected=1.785,
expected_adj_a=0.5,
expected_adj_b=0.5,
expected_adj_t=1.11,
),
),
wp.vec2: (
TestData(
a=[1, 2],
b=[3, 4],
t=0.5,
expected=[2, 3],
),
),
wp.vec3: (
TestData(
a=[1, 2, 3],
b=[3, 4, 5],
t=0.5,
expected=[2, 3, 4],
),
),
wp.vec4: (
TestData(
a=[1, 2, 3, 4],
b=[3, 4, 5, 6],
t=0.5,
expected=[2, 3, 4, 5],
),
),
wp.mat22: (
TestData(
a=[[1, 2], [2, 1]],
b=[[3, 4], [4, 3]],
t=0.5,
expected=[[2, 3], [3, 2]],
),
),
wp.mat33: (
TestData(
a=[[1, 2, 3], [3, 1, 2], [2, 3, 1]],
b=[[3, 4, 5], [5, 3, 4], [4, 5, 3]],
t=0.5,
expected=[[2, 3, 4], [4, 2, 3], [3, 4, 2]],
),
),
wp.mat44: (
TestData(
a=[[1, 2, 3, 4], [4, 1, 2, 3], [3, 4, 1, 2], [2, 3, 4, 1]],
b=[[3, 4, 5, 6], [6, 3, 4, 5], [5, 6, 3, 4], [4, 5, 6, 3]],
t=0.5,
expected=[[2, 3, 4, 5], [5, 2, 3, 4], [4, 5, 2, 3], [3, 4, 5, 2]],
),
),
wp.quat: (
TestData(
a=[1, 2, 3, 4],
b=[3, 4, 5, 6],
t=0.5,
expected=[2, 3, 4, 5],
),
),
wp.transform: (
TestData(
a=[1, 2, 3, 4, 5, 6, 7],
b=[3, 4, 5, 6, 7, 8, 9],
t=0.5,
expected=[2, 3, 4, 5, 6, 7, 8],
),
),
wp.spatial_vector: (
TestData(
a=[1, 2, 3, 4, 5, 6],
b=[3, 4, 5, 6, 7, 8],
t=0.5,
expected=[2, 3, 4, 5, 6, 7],
),
),
wp.spatial_matrix: (
TestData(
a=[
[1, 2, 3, 4, 5, 6],
[6, 1, 2, 3, 4, 5],
[5, 6, 1, 2, 3, 4],
[4, 5, 6, 1, 2, 3],
[3, 4, 5, 6, 1, 2],
[2, 3, 4, 5, 6, 1],
],
b=[
[3, 4, 5, 6, 7, 8],
[8, 3, 4, 5, 6, 7],
[7, 8, 3, 4, 5, 6],
[6, 7, 8, 3, 4, 5],
[5, 6, 7, 8, 3, 4],
[4, 5, 6, 7, 8, 3],
],
t=0.5,
expected=[
[2, 3, 4, 5, 6, 7],
[7, 2, 3, 4, 5, 6],
[6, 7, 2, 3, 4, 5],
[5, 6, 7, 2, 3, 4],
[4, 5, 6, 7, 2, 3],
[3, 4, 5, 6, 7, 2],
],
),
),
}
def test_lerp(test, device):
def make_kernel_fn(data_type):
def fn(
a: wp.array(dtype=data_type),
b: wp.array(dtype=data_type),
t: wp.array(dtype=float),
out: wp.array(dtype=data_type),
):
out[0] = wp.lerp(a[0], b[0], t[0])
return fn
for data_type in TEST_DATA:
kernel_fn = make_kernel_fn(data_type)
kernel = wp.Kernel(func=kernel_fn, key=f"test_lerp_{data_type.__name__}_kernel")
with test.subTest(data_type=data_type):
for test_data in TEST_DATA[data_type]:
a = wp.array([test_data.a], dtype=data_type, device=device, requires_grad=True)
b = wp.array([test_data.b], dtype=data_type, device=device, requires_grad=True)
t = wp.array([test_data.t], dtype=float, device=device, requires_grad=True)
out = wp.array(
[0] * wp.types.type_length(data_type), dtype=data_type, device=device, requires_grad=True
)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[a, b, t, out], device=device)
assert_np_equal(out.numpy(), np.array([test_data.expected]), tol=1e-6)
if test_data.check_backwards():
tape.backward(out)
assert_np_equal(tape.gradients[a].numpy(), np.array([test_data.expected_adj_a]), tol=1e-6)
assert_np_equal(tape.gradients[b].numpy(), np.array([test_data.expected_adj_b]), tol=1e-6)
assert_np_equal(tape.gradients[t].numpy(), np.array([test_data.expected_adj_t]), tol=1e-6)
devices = get_test_devices()
class TestLerp(unittest.TestCase):
pass
add_function_test(TestLerp, "test_lerp", test_lerp, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,038 | Python | 26.701835 | 118 | 0.424644 |
NVIDIA/warp/warp/tests/test_special_values.py | # Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
import warp as wp
from warp.tests.unittest_utils import *
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def test_infinity_scalar(test, device, dtype, register_kernels=False):
def check_infinity(outputs: wp.array(dtype=dtype), bool_outputs: wp.array(dtype=wp.bool)):
outputs[0] = dtype(wp.inf)
outputs[1] = dtype(-wp.inf)
outputs[2] = dtype(2.0 * wp.inf)
outputs[3] = dtype(-2.0 * wp.inf)
outputs[4] = dtype(2.0 / 0.0)
outputs[5] = dtype(-2.0 / 0.0)
outputs[6] = wp.log(dtype(0))
outputs[7] = wp.exp(dtype(800))
# Fill out bool outputs
bool_outputs[0] = wp.isinf(dtype(wp.inf))
bool_outputs[1] = wp.isfinite(dtype(wp.inf))
bool_outputs[2] = wp.isinf(dtype(-wp.inf))
bool_outputs[3] = wp.isfinite(dtype(-wp.inf))
bool_outputs[4] = wp.isinf(dtype(0))
bool_outputs[5] = wp.isinf(wp.exp(dtype(800)))
kernel = getkernel(check_infinity, suffix=dtype.__name__)
if register_kernels:
return
outputs = wp.empty(8, dtype=dtype, device=device)
outputs_bool = wp.empty(6, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[], outputs=[outputs, outputs_bool], device=device)
outputs_cpu = outputs.to("cpu").list()
test.assertEqual(outputs_cpu[0], math.inf)
test.assertEqual(outputs_cpu[1], -math.inf)
test.assertEqual(outputs_cpu[2], math.inf)
test.assertEqual(outputs_cpu[3], -math.inf)
test.assertEqual(outputs_cpu[4], math.inf)
test.assertEqual(outputs_cpu[5], -math.inf)
test.assertEqual(outputs_cpu[6], -math.inf)
test.assertEqual(outputs_cpu[7], math.inf)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isinf(wp.inf) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isfinite(wp.inf) is not False")
test.assertTrue(outputs_bool_cpu[2], "wp.isinf(-wp.inf) is not True")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(-wp.inf) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(0) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isinf(wp.exp(800)) is not True")
def test_nan_scalar(test, device, dtype, register_kernels=False):
def check_nan(outputs: wp.array(dtype=dtype), bool_outputs: wp.array(dtype=wp.bool)):
outputs[0] = dtype(wp.nan)
outputs[1] = dtype(-wp.nan)
outputs[2] = dtype(2.0 * wp.nan)
outputs[3] = dtype(2.0 + wp.nan)
outputs[4] = dtype(0.0 / 0.0)
outputs[5] = wp.sqrt(dtype(-1))
outputs[6] = wp.log(dtype(-1))
outputs[7] = dtype(wp.inf) - dtype(wp.inf)
# Fill out bool outputs
bool_outputs[0] = dtype(wp.nan) == dtype(wp.nan)
bool_outputs[1] = dtype(wp.nan) != dtype(wp.nan)
bool_outputs[2] = dtype(wp.nan) == dtype(1)
bool_outputs[3] = dtype(wp.nan) != dtype(1)
bool_outputs[4] = wp.isnan(wp.nan)
bool_outputs[5] = wp.isnan(dtype(0.0))
bool_outputs[6] = wp.isnan(dtype(wp.inf))
bool_outputs[7] = dtype(wp.nan) > dtype(1)
bool_outputs[8] = dtype(wp.nan) >= dtype(1)
bool_outputs[9] = dtype(wp.nan) < dtype(1)
bool_outputs[10] = dtype(wp.nan) <= dtype(1)
bool_outputs[11] = dtype(wp.nan) > dtype(wp.nan)
bool_outputs[12] = dtype(wp.nan) >= dtype(wp.nan)
bool_outputs[13] = dtype(wp.nan) < dtype(wp.nan)
bool_outputs[14] = dtype(wp.nan) <= dtype(wp.nan)
bool_outputs[15] = wp.isfinite(dtype(wp.nan))
bool_outputs[16] = wp.isinf(dtype(wp.nan))
kernel = getkernel(check_nan, suffix=dtype.__name__)
if register_kernels:
return
outputs = wp.empty(8, dtype=dtype, device=device)
outputs_bool = wp.empty(17, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[], outputs=[outputs, outputs_bool], device=device)
outputs_cpu = outputs.to("cpu").list()
test.assertTrue(math.isnan(outputs_cpu[0]), "wp.nan is not NaN")
test.assertTrue(math.isnan(outputs_cpu[1]), "-wp.nan is not NaN")
test.assertTrue(math.isnan(outputs_cpu[2]), "2.0*wp.nan is not NaN")
test.assertTrue(math.isnan(outputs_cpu[3]), "2.0+wp.nan is not NaN ")
test.assertTrue(math.isnan(outputs_cpu[4]), "0.0/0.0 is not NaN")
test.assertTrue(math.isnan(outputs_cpu[5]), "Sqrt of a negative number is not NaN")
test.assertTrue(math.isnan(outputs_cpu[6]), "Log of a negative number is not NaN")
test.assertTrue(math.isnan(outputs_cpu[7]), "Subtracting infinity from infinity is not NaN")
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertFalse(outputs_bool_cpu[0], "wp.nan == wp.nan is not False")
test.assertTrue(outputs_bool_cpu[1], "wp.nan != wp.nan is not True")
test.assertFalse(outputs_bool_cpu[2], "wp.nan == 1 is not False")
test.assertTrue(outputs_bool_cpu[3], "wp.nan != 1 is not True")
test.assertTrue(outputs_bool_cpu[4], "isnan(wp.nan) is not True")
test.assertFalse(outputs_bool_cpu[5], "isnan(0.0) is not False")
test.assertFalse(outputs_bool_cpu[6], "isnan(wp.inf) is not False")
test.assertFalse(outputs_bool_cpu[7], "wp.nan > 1 is not False")
test.assertFalse(outputs_bool_cpu[8], "wp.nan >= 1 is not False")
test.assertFalse(outputs_bool_cpu[9], "wp.nan < 1 is not False")
test.assertFalse(outputs_bool_cpu[10], "wp.nan <= 1 is not False")
test.assertFalse(outputs_bool_cpu[11], "wp.nan > wp.nan is not False")
test.assertFalse(outputs_bool_cpu[12], "wp.nan >= wp.nan is not False")
test.assertFalse(outputs_bool_cpu[13], "wp.nan < wp.nan is not False")
test.assertFalse(outputs_bool_cpu[14], "wp.nan <= wp.nan is not False")
test.assertFalse(outputs_bool_cpu[15], "wp.isfinite(wp.nan) is not False")
test.assertFalse(outputs_bool_cpu[16], "wp.isinf(wp.nan) is not False")
def test_is_special_vec(test, device, dtype, register_kernels=False):
vector_type = wp.types.vector(5, dtype)
def check_special_vec(bool_outputs: wp.array(dtype=wp.bool)):
zeros_vector = vector_type()
bool_outputs[0] = wp.isfinite(zeros_vector)
bool_outputs[1] = wp.isinf(zeros_vector)
bool_outputs[2] = wp.isnan(zeros_vector)
nan_vector = vector_type()
nan_vector[0] = dtype(wp.NAN)
bool_outputs[3] = wp.isfinite(nan_vector)
bool_outputs[4] = wp.isinf(nan_vector)
bool_outputs[5] = wp.isnan(nan_vector)
inf_vector = vector_type()
inf_vector[0] = dtype(wp.inf)
bool_outputs[6] = wp.isfinite(inf_vector)
bool_outputs[7] = wp.isinf(inf_vector)
bool_outputs[8] = wp.isnan(inf_vector)
kernel = getkernel(check_special_vec, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(9, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(zeros_vector) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(zeros_vector) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(zeros_vector) is not False")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(nan_vector) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(nan_vector) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isnan(nan_vector) is not True")
test.assertFalse(outputs_bool_cpu[6], "wp.isfinite(inf_vector) is not False")
test.assertTrue(outputs_bool_cpu[7], "wp.isinf(inf_vector) is not True")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(inf_vector) is not False")
def test_is_special_mat(test, device, dtype, register_kernels=False):
mat_type = wp.types.matrix((5, 5), dtype)
def check_special_mat(bool_outputs: wp.array(dtype=wp.bool)):
zeros_mat = mat_type()
bool_outputs[0] = wp.isfinite(zeros_mat)
bool_outputs[1] = wp.isinf(zeros_mat)
bool_outputs[2] = wp.isnan(zeros_mat)
nan_mat = mat_type()
nan_mat[0, 0] = dtype(wp.NAN)
bool_outputs[3] = wp.isfinite(nan_mat)
bool_outputs[4] = wp.isinf(nan_mat)
bool_outputs[5] = wp.isnan(nan_mat)
inf_mat = mat_type()
inf_mat[0, 0] = dtype(wp.inf)
bool_outputs[6] = wp.isfinite(inf_mat)
bool_outputs[7] = wp.isinf(inf_mat)
bool_outputs[8] = wp.isnan(inf_mat)
kernel = getkernel(check_special_mat, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(9, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(zeros_mat) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(zeros_mat) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(zeros_mat) is not False")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(nan_mat) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(nan_mat) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isnan(nan_mat) is not True")
test.assertFalse(outputs_bool_cpu[6], "wp.isfinite(inf_mat) is not False")
test.assertTrue(outputs_bool_cpu[7], "wp.isinf(inf_mat) is not True")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(inf_mat) is not False")
def test_is_special_quat(test, device, dtype, register_kernels=False):
quat_type = wp.types.quaternion(dtype)
def check_special_quat(bool_outputs: wp.array(dtype=wp.bool)):
zeros_quat = quat_type()
bool_outputs[0] = wp.isfinite(zeros_quat)
bool_outputs[1] = wp.isinf(zeros_quat)
bool_outputs[2] = wp.isnan(zeros_quat)
nan_quat = quat_type(dtype(wp.NAN), dtype(0), dtype(0), dtype(0))
bool_outputs[3] = wp.isfinite(nan_quat)
bool_outputs[4] = wp.isinf(nan_quat)
bool_outputs[5] = wp.isnan(nan_quat)
inf_quat = quat_type(dtype(wp.INF), dtype(0), dtype(0), dtype(0))
bool_outputs[6] = wp.isfinite(inf_quat)
bool_outputs[7] = wp.isinf(inf_quat)
bool_outputs[8] = wp.isnan(inf_quat)
kernel = getkernel(check_special_quat, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(9, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(zeros_quat) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(zeros_quat) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(zeros_quat) is not False")
test.assertFalse(outputs_bool_cpu[3], "wp.isfinite(nan_quat) is not False")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(nan_quat) is not False")
test.assertTrue(outputs_bool_cpu[5], "wp.isnan(nan_quat) is not True")
test.assertFalse(outputs_bool_cpu[6], "wp.isfinite(inf_quat) is not False")
test.assertTrue(outputs_bool_cpu[7], "wp.isinf(inf_quat) is not True")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(inf_quat) is not False")
def test_is_special_int(test, device, dtype, register_kernels=False):
vector_type = wp.types.vector(5, dtype)
matrix_type = wp.types.matrix((5, 5), dtype)
quat_type = wp.types.quaternion(dtype)
def check_is_special_int(bool_outputs: wp.array(dtype=wp.bool)):
bool_outputs[0] = wp.isfinite(dtype(0))
bool_outputs[1] = wp.isnan(dtype(0))
bool_outputs[2] = wp.isinf(dtype(0))
bool_outputs[3] = wp.isfinite(vector_type())
bool_outputs[4] = wp.isnan(vector_type())
bool_outputs[5] = wp.isinf(vector_type())
bool_outputs[6] = wp.isfinite(matrix_type())
bool_outputs[7] = wp.isnan(matrix_type())
bool_outputs[8] = wp.isinf(matrix_type())
bool_outputs[9] = wp.isfinite(quat_type())
bool_outputs[10] = wp.isnan(quat_type())
bool_outputs[11] = wp.isinf(quat_type())
kernel = getkernel(check_is_special_int, suffix=dtype.__name__)
if register_kernels:
return
outputs_bool = wp.empty(12, dtype=wp.bool, device=device)
wp.launch(kernel, dim=1, inputs=[outputs_bool], device=device)
outputs_bool_cpu = outputs_bool.to("cpu").list()
test.assertTrue(outputs_bool_cpu[0], "wp.isfinite(0) is not True")
test.assertFalse(outputs_bool_cpu[1], "wp.isinf(0) is not False")
test.assertFalse(outputs_bool_cpu[2], "wp.isnan(0) is not False")
test.assertTrue(outputs_bool_cpu[3], "wp.isfinite(vec) is not True")
test.assertFalse(outputs_bool_cpu[4], "wp.isinf(vec) is not False")
test.assertFalse(outputs_bool_cpu[5], "wp.isnan(vec) is not False")
test.assertTrue(outputs_bool_cpu[6], "wp.isfinite(matrix) is not True")
test.assertFalse(outputs_bool_cpu[7], "wp.isinf(matrix) is not False")
test.assertFalse(outputs_bool_cpu[8], "wp.isnan(matrix) is not False")
test.assertTrue(outputs_bool_cpu[9], "wp.isfinite(quat) is not True")
test.assertFalse(outputs_bool_cpu[10], "wp.isinf(quat) is not False")
test.assertFalse(outputs_bool_cpu[11], "wp.isnan(quat) is not False")
devices = get_test_devices()
class TestSpecialValues(unittest.TestCase):
pass
for dtype in [wp.float16, wp.float32, wp.float64]:
add_function_test_register_kernel(
TestSpecialValues, f"test_infinity_{dtype.__name__}", test_infinity_scalar, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_nan_{dtype.__name__}", test_nan_scalar, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_vec_{dtype.__name__}", test_is_special_vec, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_mat_{dtype.__name__}", test_is_special_mat, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_quat_{dtype.__name__}", test_is_special_quat, devices=devices, dtype=dtype
)
# Ensure functions like wp.isfinite work on integer types
for dtype in wp.types.int_types:
add_function_test_register_kernel(
TestSpecialValues, f"test_is_special_int_{dtype.__name__}", test_is_special_int, devices=devices, dtype=dtype
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 15,163 | Python | 40.774105 | 119 | 0.65429 |
NVIDIA/warp/warp/tests/test_model.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.sim import ModelBuilder
from warp.tests.unittest_utils import *
class TestModel(unittest.TestCase):
def test_add_triangles(self):
rng = np.random.default_rng(123)
pts = np.array(
[
[-0.00585869, 0.34189449, -1.17415233],
[-1.894547, 0.1788074, 0.9251329],
[-1.26141048, 0.16140787, 0.08823282],
[-0.08609255, -0.82722546, 0.65995427],
[0.78827592, -1.77375711, -0.55582718],
]
)
tris = np.array([[0, 3, 4], [0, 2, 3], [2, 1, 3], [1, 4, 3]])
builder1 = ModelBuilder()
builder2 = ModelBuilder()
for pt in pts:
builder1.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
builder2.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
# test add_triangle(s) with default arguments:
areas = builder2.add_triangles(tris[:, 0], tris[:, 1], tris[:, 2])
for i, t in enumerate(tris):
area = builder1.add_triangle(t[0], t[1], t[2])
self.assertAlmostEqual(area, areas[i], places=6)
# test add_triangle(s) with non default arguments:
tri_ke = rng.standard_normal(size=pts.shape[0])
tri_ka = rng.standard_normal(size=pts.shape[0])
tri_kd = rng.standard_normal(size=pts.shape[0])
tri_drag = rng.standard_normal(size=pts.shape[0])
tri_lift = rng.standard_normal(size=pts.shape[0])
for i, t in enumerate(tris):
builder1.add_triangle(
t[0],
t[1],
t[2],
tri_ke[i],
tri_ka[i],
tri_kd[i],
tri_drag[i],
tri_lift[i],
)
builder2.add_triangles(tris[:, 0], tris[:, 1], tris[:, 2], tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
assert_np_equal(np.array(builder1.tri_indices), np.array(builder2.tri_indices))
assert_np_equal(np.array(builder1.tri_poses), np.array(builder2.tri_poses), tol=1.0e-6)
assert_np_equal(np.array(builder1.tri_activations), np.array(builder2.tri_activations))
assert_np_equal(np.array(builder1.tri_materials), np.array(builder2.tri_materials))
def test_add_edges(self):
rng = np.random.default_rng(123)
pts = np.array(
[
[-0.00585869, 0.34189449, -1.17415233],
[-1.894547, 0.1788074, 0.9251329],
[-1.26141048, 0.16140787, 0.08823282],
[-0.08609255, -0.82722546, 0.65995427],
[0.78827592, -1.77375711, -0.55582718],
]
)
edges = np.array([[0, 4, 3, 1], [3, 2, 4, 1]])
builder1 = ModelBuilder()
builder2 = ModelBuilder()
for pt in pts:
builder1.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
builder2.add_particle(wp.vec3(pt), wp.vec3(), 1.0)
# test defaults:
for i in range(2):
builder1.add_edge(edges[i, 0], edges[i, 1], edges[i, 2], edges[i, 3])
builder2.add_edges(edges[:, 0], edges[:, 1], edges[:, 2], edges[:, 3])
# test non defaults:
rest = rng.standard_normal(size=2)
edge_ke = rng.standard_normal(size=2)
edge_kd = rng.standard_normal(size=2)
for i in range(2):
builder1.add_edge(edges[i, 0], edges[i, 1], edges[i, 2], edges[i, 3], rest[i], edge_ke[i], edge_kd[i])
builder2.add_edges(edges[:, 0], edges[:, 1], edges[:, 2], edges[:, 3], rest, edge_ke, edge_kd)
assert_np_equal(np.array(builder1.edge_indices), np.array(builder2.edge_indices))
assert_np_equal(np.array(builder1.edge_rest_angle), np.array(builder2.edge_rest_angle), tol=1.0e-4)
assert_np_equal(np.array(builder1.edge_bending_properties), np.array(builder2.edge_bending_properties))
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 4,409 | Python | 39.458715 | 114 | 0.576321 |
NVIDIA/warp/warp/tests/test_modules_lite.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
devices = get_test_devices()
class TestModuleLite(unittest.TestCase):
def test_module_lite_load(self):
# Load current module
wp.load_module()
# Load named module
wp.load_module(wp.config)
# Load named module (string)
wp.load_module(wp.config, recursive=True)
def test_module_lite_options(self):
wp.set_module_options({"max_unroll": 8})
module_options = wp.get_module_options()
self.assertIsInstance(module_options, dict)
self.assertEqual(module_options["max_unroll"], 8)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,158 | Python | 30.324324 | 76 | 0.709845 |
NVIDIA/warp/warp/tests/aux_test_conditional_unequal_types_kernels.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""This file defines a kernel that fails on codegen.py"""
import warp as wp
@wp.kernel
def unequal_types_kernel():
x = wp.int32(10)
y = 10
z = True
# Throws a TypeError
if x == y == z:
pass
| 646 | Python | 28.40909 | 76 | 0.733746 |
NVIDIA/warp/warp/tests/test_rand.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_kernel(
kernel_seed: int,
int_a: wp.array(dtype=int),
int_ab: wp.array(dtype=int),
float_01: wp.array(dtype=float),
float_ab: wp.array(dtype=float),
):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
int_a[tid] = wp.randi(state)
int_ab[tid] = wp.randi(state, 0, 100)
float_01[tid] = wp.randf(state)
float_ab[tid] = wp.randf(state, 0.0, 100.0)
def test_rand(test, device):
N = 10
int_a_device = wp.zeros(N, dtype=int, device=device)
int_a_host = wp.zeros(N, dtype=int, device="cpu")
int_ab_device = wp.zeros(N, dtype=int, device=device)
int_ab_host = wp.zeros(N, dtype=int, device="cpu")
float_01_device = wp.zeros(N, dtype=float, device=device)
float_01_host = wp.zeros(N, dtype=float, device="cpu")
float_ab_device = wp.zeros(N, dtype=float, device=device)
float_ab_host = wp.zeros(N, dtype=float, device="cpu")
seed = 42
wp.launch(
kernel=test_kernel,
dim=N,
inputs=[seed, int_a_device, int_ab_device, float_01_device, float_ab_device],
outputs=[],
device=device,
)
wp.copy(int_a_host, int_a_device)
wp.copy(int_ab_host, int_ab_device)
wp.copy(float_01_host, float_01_device)
wp.copy(float_ab_host, float_ab_device)
wp.synchronize_device(device)
int_a = int_a_host.numpy()
int_ab = int_ab_host.numpy()
float_01 = float_01_host.numpy()
float_ab = float_ab_host.numpy()
int_a_true = np.array(
[
-575632308,
59537738,
1898992239,
442961864,
-1069147335,
-478445524,
1803659809,
2122909397,
-1888556360,
334603718,
]
)
int_ab_true = np.array([46, 58, 46, 83, 85, 39, 72, 99, 18, 41])
float_01_true = np.array(
[
0.72961855,
0.86200964,
0.28770837,
0.8187722,
0.186335,
0.6101239,
0.56432086,
0.70428324,
0.64812654,
0.27679986,
]
)
float_ab_true = np.array(
[96.04259, 73.33809, 63.601555, 38.647305, 71.813896, 64.65809, 77.79791, 46.579605, 94.614456, 91.921814]
)
test.assertTrue((int_a == int_a_true).all())
test.assertTrue((int_ab == int_ab_true).all())
err = np.max(np.abs(float_01 - float_01_true))
test.assertTrue(err < 1e-04)
err = np.max(np.abs(float_ab - float_ab_true))
test.assertTrue(err < 1e-04)
@wp.kernel
def sample_cdf_kernel(kernel_seed: int, cdf: wp.array(dtype=float), samples: wp.array(dtype=int)):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
samples[tid] = wp.sample_cdf(state, cdf)
def test_sample_cdf(test, device):
seed = 42
cdf = np.arange(0.0, 1.0, 0.01, dtype=float)
cdf = cdf * cdf
cdf = wp.array(cdf, dtype=float, device=device)
num_samples = 1000
samples = wp.zeros(num_samples, dtype=int, device=device)
wp.launch(kernel=sample_cdf_kernel, dim=num_samples, inputs=[seed, cdf, samples], device=device)
# histogram should be linear
# plt.hist(samples.numpy())
# plt.show()
@wp.kernel
def sampling_kernel(
kernel_seed: int,
triangle_samples: wp.array(dtype=wp.vec2),
square_samples: wp.array(dtype=wp.vec2),
ring_samples: wp.array(dtype=wp.vec2),
disk_samples: wp.array(dtype=wp.vec2),
sphere_surface_samples: wp.array(dtype=wp.vec3),
sphere_samples: wp.array(dtype=wp.vec3),
hemisphere_surface_samples: wp.array(dtype=wp.vec3),
hemisphere_samples: wp.array(dtype=wp.vec3),
cube_samples: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
triangle_samples[tid] = wp.sample_triangle(state)
ring_samples[tid] = wp.sample_unit_ring(state)
disk_samples[tid] = wp.sample_unit_disk(state)
sphere_surface_samples[tid] = wp.sample_unit_sphere_surface(state)
sphere_samples[tid] = wp.sample_unit_sphere(state)
hemisphere_surface_samples[tid] = wp.sample_unit_hemisphere_surface(state)
hemisphere_samples[tid] = wp.sample_unit_hemisphere(state)
square_samples[tid] = wp.sample_unit_square(state)
cube_samples[tid] = wp.sample_unit_cube(state)
def test_sampling_methods(test, device):
seed = 42
num_samples = 100
triangle_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
square_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
ring_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
disk_samples = wp.zeros(num_samples, dtype=wp.vec2, device=device)
sphere_surface_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
sphere_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
hemisphere_surface_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
hemisphere_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
cube_samples = wp.zeros(num_samples, dtype=wp.vec3, device=device)
wp.launch(
kernel=sampling_kernel,
dim=num_samples,
inputs=[
seed,
triangle_samples,
square_samples,
ring_samples,
disk_samples,
sphere_surface_samples,
sphere_samples,
hemisphere_surface_samples,
hemisphere_samples,
cube_samples,
],
device=device,
)
# bounds check
test.assertTrue((triangle_samples.numpy()[:, 0] <= 1.0).all())
test.assertTrue((triangle_samples.numpy()[:, 0] >= 0.0).all())
test.assertTrue((triangle_samples.numpy()[:, 1] >= 0.0).all())
test.assertTrue((triangle_samples.numpy()[:, 1] >= 0.0).all())
test.assertTrue((square_samples.numpy()[:, 0] >= -0.5).all())
test.assertTrue((square_samples.numpy()[:, 0] <= 1.5).all())
test.assertTrue((square_samples.numpy()[:, 1] >= -0.5).all())
test.assertTrue((square_samples.numpy()[:, 1] <= 0.5).all())
test.assertTrue((cube_samples.numpy()[:, 0] >= -0.5).all())
test.assertTrue((cube_samples.numpy()[:, 0] <= 0.5).all())
test.assertTrue((cube_samples.numpy()[:, 1] >= -0.5).all())
test.assertTrue((cube_samples.numpy()[:, 1] <= 0.5).all())
test.assertTrue((cube_samples.numpy()[:, 2] >= -0.5).all())
test.assertTrue((cube_samples.numpy()[:, 2] <= 0.5).all())
test.assertTrue((hemisphere_surface_samples.numpy()[:, 2] >= 0.0).all())
test.assertTrue((hemisphere_samples.numpy()[:, 2] >= 0.0).all())
test.assertTrue((np.linalg.norm(ring_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(disk_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(sphere_surface_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(sphere_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(hemisphere_surface_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
test.assertTrue((np.linalg.norm(hemisphere_samples.numpy(), axis=1) <= 1.0 + 1e6).all())
@wp.kernel
def sample_poisson_kernel(
kernel_seed: int, poisson_samples_low: wp.array(dtype=wp.uint32), poisson_samples_high: wp.array(dtype=wp.uint32)
):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
x = wp.poisson(state, 3.0)
y = wp.poisson(state, 42.0)
poisson_samples_low[tid] = x
poisson_samples_high[tid] = y
def test_poisson(test, device):
seed = 13
N = 20000
poisson_low = wp.zeros(N, dtype=wp.uint32, device=device)
poisson_high = wp.zeros(N, dtype=wp.uint32, device=device)
wp.launch(kernel=sample_poisson_kernel, dim=N, inputs=[seed, poisson_low, poisson_high], device=device)
# bins = np.arange(100)
# _ = plt.hist(poisson_high.numpy(), bins)
# plt.show()
rng = np.random.default_rng(seed)
np_poisson_low = rng.poisson(lam=3.0, size=N)
np_poisson_high = rng.poisson(lam=42.0, size=N)
poisson_low_mean = np.mean(poisson_low.numpy())
np_poisson_low_mean = np.mean(np_poisson_low)
poisson_high_mean = np.mean(poisson_high.numpy())
np_poisson_high_mean = np.mean(np_poisson_high)
poisson_low_std = np.std(poisson_low.numpy())
np_poisson_low_std = np.std(np_poisson_low)
poisson_high_std = np.std(poisson_high.numpy())
np_poisson_high_std = np.std(np_poisson_high)
# compare basic distribution characteristics
test.assertTrue(np.abs(poisson_low_mean - np_poisson_low_mean) <= 5e-1)
test.assertTrue(np.abs(poisson_high_mean - np_poisson_high_mean) <= 5e-1)
test.assertTrue(np.abs(poisson_low_std - np_poisson_low_std) <= 2e-1)
test.assertTrue(np.abs(poisson_high_std - np_poisson_high_std) <= 2e-1)
devices = get_test_devices()
class TestRand(unittest.TestCase):
pass
add_function_test(TestRand, "test_rand", test_rand, devices=devices)
add_function_test(TestRand, "test_sample_cdf", test_sample_cdf, devices=devices)
add_function_test(TestRand, "test_sampling_methods", test_sampling_methods, devices=devices)
add_function_test(TestRand, "test_poisson", test_poisson, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 9,760 | Python | 33.369718 | 117 | 0.636373 |
NVIDIA/warp/warp/tests/disabled_kinematics.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
import warp as wp
import warp.sim
from warp.tests.unittest_utils import *
def build_ant(num_envs):
builder = wp.sim.ModelBuilder()
for i in range(num_envs):
wp.sim.parse_mjcf(
os.path.join(os.path.dirname(__file__), "../../examples/assets/nv_ant.xml"),
builder,
up_axis="y",
)
coord_count = 15
dof_count = 14
coord_start = i * coord_count
dof_start = i * dof_count
# base
builder.joint_q[coord_start : coord_start + 3] = [i * 2.0, 0.70, 0.0]
builder.joint_q[coord_start + 3 : coord_start + 7] = wp.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5)
# joints
builder.joint_q[coord_start + 7 : coord_start + coord_count] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
builder.joint_qd[dof_start + 6 : dof_start + dof_count] = [1.0, 1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0]
return builder
def build_complex_joint_mechanism(chain_length):
builder = wp.sim.ModelBuilder()
com0 = wp.vec3(1.0, 2.0, 3.0)
com1 = wp.vec3(4.0, 5.0, 6.0)
com2 = wp.vec3(7.0, 8.0, 9.0)
ax0 = wp.normalize(wp.vec3(-1.0, 2.0, 3.0))
ax1 = wp.normalize(wp.vec3(4.0, -1.0, 2.0))
ax2 = wp.normalize(wp.vec3(-3.0, 4.0, -1.0))
# declare some transforms with nonzero translation and orientation
tf0 = wp.transform(wp.vec3(1.0, 2.0, 3.0), wp.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi * 0.25))
tf1 = wp.transform(wp.vec3(4.0, 5.0, 6.0), wp.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi * 0.5))
tf2 = wp.transform(wp.vec3(7.0, 8.0, 9.0), wp.quat_from_axis_angle((0.0, 0.0, 1.0), math.pi * 0.75))
parent = -1
for _i in range(chain_length):
b0 = builder.add_body(com=com0)
builder.add_joint_fixed(parent=parent, child=b0, parent_xform=tf1, child_xform=tf0)
assert builder.articulation_count == 1
b1 = builder.add_body(com=com1)
builder.add_joint_revolute(parent=b0, child=b1, parent_xform=tf1, child_xform=tf2, axis=ax1)
builder.joint_q[-1] = 0.3
builder.joint_qd[-1] = 1.0
b2 = builder.add_body(com=com2)
builder.add_joint_universal(parent=b1, child=b2, parent_xform=tf2, child_xform=tf0, axis_0=ax0, axis_1=ax1)
builder.joint_q[-2:] = [0.3, 0.5]
builder.joint_qd[-2:] = [1.0, -1.0]
b3 = builder.add_body(com=com0)
builder.add_joint_ball(parent=b2, child=b3, parent_xform=tf0, child_xform=tf1)
builder.joint_q[-4:] = list(wp.quat_from_axis_angle(ax0, 0.7))
builder.joint_qd[-3:] = [1.0, -0.6, 1.5]
b4 = builder.add_body(com=com1)
builder.add_joint_compound(
parent=b3,
child=b4,
parent_xform=tf2,
child_xform=tf1,
axis_0=(0, 0, 1),
axis_1=(1, 0, 0),
axis_2=(0, 1, 0),
)
builder.joint_q[-3:] = [0.3, 0.5, 0.27]
builder.joint_qd[-3:] = [1.23, -1.0, 0.5]
b5 = builder.add_body(com=com2)
builder.add_joint_prismatic(
parent=b4,
child=b5,
parent_xform=tf2,
child_xform=tf0,
axis=ax0,
)
builder.joint_q[-1] = 0.92
builder.joint_qd[-1] = -0.63
b6 = builder.add_body(com=com0)
builder.add_joint_d6(
parent=b5,
child=b6,
parent_xform=tf0,
child_xform=tf2,
linear_axes=[ax0, ax1, wp.cross(ax0, ax1)],
angular_axes=[ax1, ax2, wp.cross(ax1, ax2)],
)
builder.joint_q[-6:] = [0.3, 0.5, 0.7, 0.9, 1.1, 1.3]
builder.joint_qd[-6:] = [1.0, -1.0, 0.5, 0.8, -0.3, 0.1]
b7 = builder.add_body(com=com1)
builder.add_joint_free(
parent=b6,
child=b7,
parent_xform=tf1,
child_xform=tf2,
)
builder.joint_q[-7:] = [0.5, -0.9, 1.4] + list(wp.quat_rpy(0.3, -0.5, 0.7))
builder.joint_qd[-6:] = [1.0, -1.0, 0.5, 0.8, -0.3, 0.1]
b8 = builder.add_body(com=com2)
builder.add_joint_distance(
parent=b7,
child=b8,
parent_xform=tf1,
child_xform=tf2,
)
builder.joint_q[-7:] = [-0.3, -0.7, 0.2] + list(wp.quat_rpy(0.1, 0.1, 0.4))
builder.joint_qd[-6:] = [-0.34, 0.5, -0.6, -0.4, 0.2, 0.1]
# D6 joint that behaves like a fixed joint
b9 = builder.add_body(com=com0)
builder.add_joint_d6(
parent=b8,
child=b9,
parent_xform=tf0,
child_xform=tf2,
linear_axes=[],
angular_axes=[],
)
b10 = builder.add_body(com=com0)
builder.add_joint_d6(
parent=b9,
child=b10,
parent_xform=tf1,
child_xform=tf2,
linear_axes=[ax1],
angular_axes=[ax2, ax0],
)
builder.joint_q[-3:] = [0.3, 0.5, 0.7]
builder.joint_qd[-3:] = [1.0, -1.0, 0.5]
b11 = builder.add_body(com=com1)
builder.add_joint_d6(
parent=b10,
child=b11,
parent_xform=tf1,
child_xform=tf2,
linear_axes=[ax1, ax0, wp.cross(ax1, ax0)],
angular_axes=[],
)
builder.joint_q[-3:] = [0.3, 0.5, 0.7]
builder.joint_qd[-3:] = [1.0, -1.0, 0.5]
b12 = builder.add_body(com=com2)
builder.add_joint_d6(
parent=b11,
child=b12,
parent_xform=tf1,
child_xform=tf2,
linear_axes=[],
angular_axes=[ax1, ax2, wp.cross(ax1, ax2)],
)
builder.joint_q[-3:] = [0.3, 0.5, 0.7]
builder.joint_qd[-3:] = [1.0, -1.0, 0.5]
parent = b12
return builder
def check_fk_ik(builder, device):
model = builder.finalize(device)
state = model.state()
q_fk = model.joint_q.numpy()
qd_fk = model.joint_qd.numpy()
wp.sim.eval_fk(model, model.joint_q, model.joint_qd, None, state)
q_ik = wp.zeros_like(model.joint_q)
qd_ik = wp.zeros_like(model.joint_qd)
wp.sim.eval_ik(model, state, q_ik, qd_ik)
# adjust numpy print settings
# np.set_printoptions(precision=4, floatmode="fixed", suppress=True)
# print("q:")
# print(np.array(q_fk))
# print(q_ik.numpy())
# print("qd:")
# print(np.array(qd_fk))
# print(qd_ik.numpy())
assert_np_equal(q_ik.numpy(), q_fk, tol=1e-4)
assert_np_equal(qd_ik.numpy(), qd_fk, tol=1e-4)
def test_fk_ik_ant(test, device):
builder = build_ant(3)
check_fk_ik(builder, device)
def test_fk_ik_complex_joint_mechanism(test, device):
builder = build_complex_joint_mechanism(2)
check_fk_ik(builder, device)
devices = get_test_devices()
class TestKinematics(unittest.TestCase):
pass
add_function_test(TestKinematics, "test_fk_ik_ant", test_fk_ik_ant, devices=devices)
add_function_test(
TestKinematics, "test_fk_ik_complex_joint_mechanism", test_fk_ik_complex_joint_mechanism, devices=devices
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 7,602 | Python | 30.945378 | 117 | 0.551697 |
NVIDIA/warp/warp/tests/test_array_reduce.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import array_inner, array_sum
def make_test_array_sum(dtype):
N = 1000
def test_array_sum(test, device):
rng = np.random.default_rng(123)
cols = wp.types.type_length(dtype)
values_np = rng.random(size=(N, cols))
values = wp.array(values_np, device=device, dtype=dtype)
vsum = array_sum(values)
ref_vsum = values_np.sum(axis=0)
assert_np_equal(vsum / N, ref_vsum / N, 0.0001)
return test_array_sum
def make_test_array_sum_axis(dtype):
I = 5
J = 10
K = 2
N = I * J * K
def test_array_sum(test, device):
rng = np.random.default_rng(123)
values_np = rng.random(size=(I, J, K))
values = wp.array(values_np, shape=(I, J, K), device=device, dtype=dtype)
for axis in range(3):
vsum = array_sum(values, axis=axis)
ref_vsum = values_np.sum(axis=axis)
assert_np_equal(vsum.numpy() / N, ref_vsum / N, 0.0001)
return test_array_sum
def test_array_sum_empty(test, device):
values = wp.array([], device=device, dtype=wp.vec2)
assert_np_equal(array_sum(values), np.zeros(2))
values = wp.array([], shape=(0, 3), device=device, dtype=float)
assert_np_equal(array_sum(values, axis=0).numpy(), np.zeros((1, 3)))
def make_test_array_inner(dtype):
N = 1000
def test_array_inner(test, device):
rng = np.random.default_rng(123)
cols = wp.types.type_length(dtype)
a_np = rng.random(size=(N, cols))
b_np = rng.random(size=(N, cols))
a = wp.array(a_np, device=device, dtype=dtype)
b = wp.array(b_np, device=device, dtype=dtype)
ab = array_inner(a, b)
ref_ab = np.dot(a_np.flatten(), b_np.flatten())
test.assertAlmostEqual(ab / N, ref_ab / N, places=5)
return test_array_inner
def make_test_array_inner_axis(dtype):
I = 5
J = 10
K = 2
N = I * J * K
def test_array_inner(test, device):
rng = np.random.default_rng(123)
a_np = rng.random(size=(I, J, K))
b_np = rng.random(size=(I, J, K))
a = wp.array(a_np, shape=(I, J, K), device=device, dtype=dtype)
b = wp.array(b_np, shape=(I, J, K), device=device, dtype=dtype)
ab = array_inner(a, b, axis=0)
ref_ab = np.einsum(a_np, [0, 1, 2], b_np, [0, 1, 2], [1, 2])
assert_np_equal(ab.numpy() / N, ref_ab / N, 0.0001)
ab = array_inner(a, b, axis=1)
ref_ab = np.einsum(a_np, [0, 1, 2], b_np, [0, 1, 2], [0, 2])
assert_np_equal(ab.numpy() / N, ref_ab / N, 0.0001)
ab = array_inner(a, b, axis=2)
ref_ab = np.einsum(a_np, [0, 1, 2], b_np, [0, 1, 2], [0, 1])
assert_np_equal(ab.numpy() / N, ref_ab / N, 0.0001)
return test_array_inner
def test_array_inner_empty(test, device):
values = wp.array([], device=device, dtype=wp.vec2)
test.assertEqual(array_inner(values, values), 0.0)
values = wp.array([], shape=(0, 3), device=device, dtype=float)
assert_np_equal(array_inner(values, values, axis=0).numpy(), np.zeros((1, 3)))
devices = get_test_devices()
class TestArrayReduce(unittest.TestCase):
pass
add_function_test(TestArrayReduce, "test_array_sum_double", make_test_array_sum(wp.float64), devices=devices)
add_function_test(TestArrayReduce, "test_array_sum_vec3", make_test_array_sum(wp.vec3), devices=devices)
add_function_test(TestArrayReduce, "test_array_sum_axis_float", make_test_array_sum_axis(wp.float32), devices=devices)
add_function_test(TestArrayReduce, "test_array_sum_empty", test_array_sum_empty, devices=devices)
add_function_test(TestArrayReduce, "test_array_inner_double", make_test_array_inner(wp.float64), devices=devices)
add_function_test(TestArrayReduce, "test_array_inner_vec3", make_test_array_inner(wp.vec3), devices=devices)
add_function_test(
TestArrayReduce, "test_array_inner_axis_float", make_test_array_inner_axis(wp.float32), devices=devices
)
add_function_test(TestArrayReduce, "test_array_inner_empty", test_array_inner_empty, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 4,684 | Python | 30.442953 | 118 | 0.639197 |
NVIDIA/warp/warp/tests/test_operators.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_operators_scalar_float():
a = 1.0
b = 2.0
c = a * b
d = a + b
e = a / b
f = a - b
g = b**8.0
h = 10.0 // 3.0
expect_eq(c, 2.0)
expect_eq(d, 3.0)
expect_eq(e, 0.5)
expect_eq(f, -1.0)
expect_eq(g, 256.0)
expect_eq(h, 3.0)
@wp.kernel
def test_operators_scalar_int():
a = 1
b = 2
c = a * b
d = a + b
e = a / b
f = a - b
# g = b**8 # integer pow not implemented
h = 10 // 3
i = 10 % 3
j = 2 << 3
k = 16 >> 1
expect_eq(c, 2)
expect_eq(d, 3)
expect_eq(e, 0)
expect_eq(f, -1)
# expect_eq(g, 256)
expect_eq(h, 3)
expect_eq(i, 1)
expect_eq(j, 16)
expect_eq(k, 8)
f0 = wp.uint32(1 << 0)
f1 = wp.uint32(1 << 3)
expect_eq(f0 | f1, f0 + f1)
expect_eq(f0 & f1, wp.uint32(0))
l = wp.uint8(0)
for n in range(8):
l |= wp.uint8(1 << n)
expect_eq(l, ~wp.uint8(0))
@wp.kernel
def test_operators_vector_index():
v = wp.vec4(1.0, 2.0, 3.0, 4.0)
expect_eq(v[0], 1.0)
expect_eq(v[1], 2.0)
expect_eq(v[2], 3.0)
expect_eq(v[3], 4.0)
@wp.kernel
def test_operators_matrix_index():
m22 = wp.mat22(1.0, 2.0, 3.0, 4.0)
expect_eq(m22[0, 0], 1.0)
expect_eq(m22[0, 1], 2.0)
expect_eq(m22[1, 0], 3.0)
expect_eq(m22[1, 1], 4.0)
@wp.kernel
def test_operators_vec3():
v = vec3(1.0, 2.0, 3.0)
r0 = v * 3.0
r1 = 3.0 * v
expect_eq(r0, vec3(3.0, 6.0, 9.0))
expect_eq(r1, vec3(3.0, 6.0, 9.0))
col0 = vec3(1.0, 0.0, 0.0)
col1 = vec3(0.0, 2.0, 0.0)
col2 = vec3(0.0, 0.0, 3.0)
m = mat33(col0, col1, col2)
expect_eq(m * vec3(1.0, 0.0, 0.0), col0)
expect_eq(m * vec3(0.0, 1.0, 0.0), col1)
expect_eq(m * vec3(0.0, 0.0, 1.0), col2)
two = vec3(1.0) * 2.0
expect_eq(two, vec3(2.0, 2.0, 2.0))
@wp.kernel
def test_operators_vec4():
v = vec4(1.0, 2.0, 3.0, 4.0)
r0 = v * 3.0
r1 = 3.0 * v
expect_eq(r0, vec4(3.0, 6.0, 9.0, 12.0))
expect_eq(r1, vec4(3.0, 6.0, 9.0, 12.0))
col0 = vec4(1.0, 0.0, 0.0, 0.0)
col1 = vec4(0.0, 2.0, 0.0, 0.0)
col2 = vec4(0.0, 0.0, 3.0, 0.0)
col3 = vec4(0.0, 0.0, 0.0, 4.0)
m = mat44(col0, col1, col2, col3)
expect_eq(m * vec4(1.0, 0.0, 0.0, 0.0), col0)
expect_eq(m * vec4(0.0, 1.0, 0.0, 0.0), col1)
expect_eq(m * vec4(0.0, 0.0, 1.0, 0.0), col2)
expect_eq(m * vec4(0.0, 0.0, 0.0, 1.0), col3)
two = vec4(1.0) * 2.0
expect_eq(two, vec4(2.0, 2.0, 2.0, 2.0))
@wp.kernel
def test_operators_mat22():
m = mat22(1.0, 2.0, 3.0, 4.0)
r = mat22(3.0, 6.0, 9.0, 12.0)
r0 = m * 3.0
r1 = 3.0 * m
expect_eq(r0, r)
expect_eq(r1, r)
expect_eq(r0[0, 0], 3.0)
expect_eq(r0[0, 1], 6.0)
expect_eq(r0[1, 0], 9.0)
expect_eq(r0[1, 1], 12.0)
expect_eq(r0[0], wp.vec2(3.0, 6.0))
expect_eq(r0[1], wp.vec2(9.0, 12.0))
@wp.kernel
def test_operators_mat33():
m = mat33(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
r = mat33(3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0)
r0 = m * 3.0
r1 = 3.0 * m
expect_eq(r0, r)
expect_eq(r1, r)
expect_eq(r0[0, 0], 3.0)
expect_eq(r0[0, 1], 6.0)
expect_eq(r0[0, 2], 9.0)
expect_eq(r0[1, 0], 12.0)
expect_eq(r0[1, 1], 15.0)
expect_eq(r0[1, 2], 18.0)
expect_eq(r0[2, 0], 21.0)
expect_eq(r0[2, 1], 24.0)
expect_eq(r0[2, 2], 27.0)
expect_eq(r0[0], wp.vec3(3.0, 6.0, 9.0))
expect_eq(r0[1], wp.vec3(12.0, 15.0, 18.0))
expect_eq(r0[2], wp.vec3(21.0, 24.0, 27.0))
@wp.kernel
def test_operators_mat44():
m = mat44(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0)
r = mat44(3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0, 36.0, 39.0, 42.0, 45.0, 48.0)
r0 = m * 3.0
r1 = 3.0 * m
expect_eq(r0, r)
expect_eq(r1, r)
expect_eq(r0[0, 0], 3.0)
expect_eq(r0[0, 1], 6.0)
expect_eq(r0[0, 2], 9.0)
expect_eq(r0[0, 3], 12.0)
expect_eq(r0[1, 0], 15.0)
expect_eq(r0[1, 1], 18.0)
expect_eq(r0[1, 2], 21.0)
expect_eq(r0[1, 3], 24.0)
expect_eq(r0[2, 0], 27.0)
expect_eq(r0[2, 1], 30.0)
expect_eq(r0[2, 2], 33.0)
expect_eq(r0[2, 3], 36.0)
expect_eq(r0[3, 0], 39.0)
expect_eq(r0[3, 1], 42.0)
expect_eq(r0[3, 2], 45.0)
expect_eq(r0[3, 3], 48.0)
expect_eq(r0[0], wp.vec4(3.0, 6.0, 9.0, 12.0))
expect_eq(r0[1], wp.vec4(15.0, 18.0, 21.0, 24.0))
expect_eq(r0[2], wp.vec4(27.0, 30.0, 33.0, 36.0))
expect_eq(r0[3], wp.vec4(39.0, 42.0, 45.0, 48.0))
devices = get_test_devices()
class TestOperators(unittest.TestCase):
pass
add_kernel_test(TestOperators, test_operators_scalar_float, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_scalar_int, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_matrix_index, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vector_index, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vec3, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_vec4, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat22, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat33, dim=1, devices=devices)
add_kernel_test(TestOperators, test_operators_mat44, dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,972 | Python | 22.987952 | 106 | 0.55576 |
NVIDIA/warp/warp/tests/aux_test_reference_reference.py | # This file is used to test reloading module references.
import warp as wp
@wp.func
def more_magic():
return 2.0
| 120 | Python | 12.444443 | 56 | 0.708333 |
NVIDIA/warp/warp/tests/test_codegen.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_rename():
a = 0
b = 1
a = b
a = 2
wp.expect_eq(a, 2)
wp.expect_eq(b, 1)
@wp.kernel
def test_inplace():
a = 1.0
a += 2.0
wp.expect_eq(a, 3.0)
@wp.kernel
def test_constant(c: float):
a = 0.0
a = c + 1.0
wp.expect_eq(a, 2.0)
@wp.kernel
def test_dynamic_for_rename(n: int):
f0 = int(0.0)
f1 = int(1.0)
for _i in range(0, n):
f = f0 + f1
f0 = f1
f1 = f
wp.expect_eq(f1, 89)
@wp.kernel
def test_dynamic_for_inplace(n: int):
a = float(0.0)
for _i in range(0, n):
a += 1.0
wp.expect_eq(a, float(n))
@wp.kernel
def test_reassign():
f0 = 1.0
f1 = f0
f1 = f1 + 2.0
wp.expect_eq(f1, 3.0)
wp.expect_eq(f0, 1.0)
@wp.kernel
def test_dynamic_reassign(n: int):
f0 = wp.vec3()
f1 = f0
for _i in range(0, n):
f1 = f1 - wp.vec3(2.0, 0.0, 0.0)
wp.expect_eq(f1, wp.vec3(-4.0, 0.0, 0.0))
wp.expect_eq(f0, wp.vec3())
@wp.kernel
def test_range_static_sum(result: wp.array(dtype=int)):
a = int(0)
for _i in range(10):
a = a + 1
b = int(0)
for _i in range(0, 10):
b = b + 1
c = int(0)
for _i in range(0, 20, 2):
c = c + 1
result[0] = a
result[1] = b
result[2] = c
@wp.kernel
def test_range_dynamic_sum(start: int, end: int, step: int, result: wp.array(dtype=int)):
a = int(0)
for _i in range(end):
a = a + 1
b = int(0)
for _i in range(start, end):
b = b + 1
c = int(0)
for _i in range(start, end * step, step):
c = c + 1
d = int(0)
for _i in range(end * step, start, -step):
d = d + 1
result[0] = a
result[1] = b
result[2] = c
result[3] = d
@wp.kernel
def test_range_dynamic(start: int, end: int, step: int, result: wp.array(dtype=int)):
output = int(0)
for i in range(start, end, step):
result[output] = i
output += 1
@wp.kernel
def test_range_dynamic_nested(n: int):
sum1 = float(0.0)
sum2 = float(0.0)
sum3 = float(0.0)
for _i in range(n):
sum1 = sum1 + 1.0
sum3 = sum3 + 1.0
for _j in range(n):
sum2 = sum2 + 1.0
sum3 = sum3 + 1.0
sum3 = sum3 + 1.0
wp.expect_eq(sum1, float(n))
wp.expect_eq(sum2, float(n * n))
wp.expect_eq(sum3, float(n * n + 2 * n))
@wp.kernel
def test_while(n: int):
i = int(0)
while i < n:
i = i + 1
wp.expect_eq(i, n)
@wp.kernel
def test_pass(n: int):
i = int(0)
while i < n:
if False:
pass
else:
i = i + 1
wp.expect_eq(i, n)
@wp.kernel
def test_break(n: int):
a = int(0)
for _i in range(0, n):
if a == 5:
break
a += 1
wp.expect_eq(a, 5)
@wp.kernel
def test_break_early(n: int):
a = int(0)
for i in range(0, n):
if i > 5:
a = 1
break
wp.expect_eq(a, 1)
@wp.kernel
def test_break_unroll():
a = int(0)
for i in range(0, 10):
if i > 5:
a = i
break
wp.expect_eq(a, 6)
@wp.kernel
def test_break_while():
a = int(0)
while a < 10:
if a > 5:
break
a += 1
wp.expect_eq(a, 6)
@wp.kernel
def test_break_multiple(n: int):
a = int(0)
for i in range(0, n):
if i == 6:
a = 1
break
if i == 5:
a = 2
break
if i == 7:
a = 3
break
wp.expect_eq(a, 2)
@wp.kernel
def test_continue(n: int):
a = int(0)
for i in range(0, n):
if i == 5:
continue
a += 1
wp.expect_eq(a, n - 1)
@wp.kernel
def test_continue_unroll():
a = int(0)
for i in range(0, 10):
if i == 5:
continue
a += 1
wp.expect_eq(a, 9)
lower = wp.constant(-3)
upper = wp.constant(3)
step = wp.constant(2)
# test unrolling of loops with constant size params
# we can't easily test if unrolling has occurred
# so just verify correctness at this stage
@wp.kernel
def test_range_constant():
s = 0
for i in range(upper):
s += i
# sum [0, 3)
wp.expect_eq(s, 3)
s = 0
for i in range(lower, upper):
s += i
# sum [-3, 3)
wp.expect_eq(s, -3)
s = 0
for i in range(lower, upper, step):
s += i
# sum [-3, 3)
wp.expect_eq(s, -3)
N = wp.constant(3)
# test a dynamic loop nested between loops expected to be unrolled.
@wp.kernel
def test_range_constant_dynamic_nested(m: int):
s = int(0)
for _i in range(N):
for _k in range(m):
for _j in range(N):
s += 1
wp.expect_eq(s, N * m * N)
@wp.kernel
def test_range_expression():
idx = 1
batch_size = 100
a = wp.float(0.0)
c = wp.float(1.0)
# constant expression with a function
for _i in range(4 * idx, wp.min(4 * idx + 4, batch_size)):
a += c
for _i in range(4 * idx, min(4 * idx + 4, batch_size)):
a += c
tid = wp.tid()
# dynamic expression with a function
for _i in range(4 * idx, wp.min(4 * idx, tid + 1000)):
a += c
for _i in range(4 * idx, min(4 * idx, tid + 1000)):
a += c
wp.expect_eq(a, 8.0)
def test_unresolved_func(test, device):
# kernel with unresolved function must be in a separate module, otherwise the current module would fail to load
from warp.tests.aux_test_unresolved_func import unresolved_func_kernel
# ensure that an appropriate exception is raised when the bad module gets loaded
with test.assertRaisesRegex(RuntimeError, "Could not find function wp.missing_func"):
wp.launch(unresolved_func_kernel, dim=1, inputs=[], device=device)
# remove all references to the bad module so that subsequent calls to wp.force_load()
# won't try to load it unless we explicitly re-import it again
del wp.context.user_modules["warp.tests.aux_test_unresolved_func"]
del sys.modules["warp.tests.aux_test_unresolved_func"]
def test_unresolved_symbol(test, device):
# kernel with unresolved symbol must be in a separate module, otherwise the current module would fail to load
from warp.tests.aux_test_unresolved_symbol import unresolved_symbol_kernel
# ensure that an appropriate exception is raised when the bad module gets loaded
with test.assertRaisesRegex(KeyError, "Referencing undefined symbol: missing_symbol"):
wp.launch(unresolved_symbol_kernel, dim=1, inputs=[], device=device)
# remove all references to the bad module so that subsequent calls to wp.force_load()
# won't try to load it unless we explicitly re-import it again
del wp.context.user_modules["warp.tests.aux_test_unresolved_symbol"]
del sys.modules["warp.tests.aux_test_unresolved_symbol"]
def test_error_global_var(test, device):
arr = wp.array(
(1.0, 2.0, 3.0),
dtype=float,
device=device,
)
def kernel_1_fn(
out: wp.array(dtype=float),
):
out[0] = arr[0]
def kernel_2_fn(
out: wp.array(dtype=float),
):
out[0] = arr
def kernel_3_fn(
out: wp.array(dtype=float),
):
out[0] = wp.lower_bound(arr, 2.0)
out = wp.empty_like(arr)
kernel = wp.Kernel(func=kernel_1_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Cannot reference a global variable from a kernel unless `wp.constant\(\)` is being used",
):
wp.launch(kernel, dim=out.shape, inputs=(), outputs=(out,))
kernel = wp.Kernel(func=kernel_2_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Cannot reference a global variable from a kernel unless `wp.constant\(\)` is being used",
):
wp.launch(kernel, dim=out.shape, inputs=(), outputs=(out,))
kernel = wp.Kernel(func=kernel_3_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Cannot reference a global variable from a kernel unless `wp.constant\(\)` is being used",
):
wp.launch(kernel, dim=out.shape, inputs=(), outputs=(out,))
class TestCodeGen(unittest.TestCase):
pass
devices = get_test_devices()
add_kernel_test(TestCodeGen, name="test_inplace", kernel=test_inplace, dim=1, devices=devices)
add_kernel_test(TestCodeGen, name="test_rename", kernel=test_rename, dim=1, devices=devices)
add_kernel_test(TestCodeGen, name="test_constant", kernel=test_constant, inputs=[1.0], dim=1, devices=devices)
add_kernel_test(
TestCodeGen, name="test_dynamic_for_rename", kernel=test_dynamic_for_rename, inputs=[10], dim=1, devices=devices
)
add_kernel_test(
TestCodeGen,
name="test_dynamic_for_inplace",
kernel=test_dynamic_for_inplace,
inputs=[10],
dim=1,
devices=devices,
)
add_kernel_test(TestCodeGen, name="test_reassign", kernel=test_reassign, dim=1, devices=devices)
add_kernel_test(
TestCodeGen, name="test_dynamic_reassign", kernel=test_dynamic_reassign, inputs=[2], dim=1, devices=devices
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_forward",
kernel=test_range_dynamic,
dim=1,
inputs=[0, 4, 1],
expect=[0, 1, 2, 3],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_reverse",
kernel=test_range_dynamic,
dim=1,
inputs=[4, 0, -1],
expect=[4, 3, 2, 1],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_forward_step",
kernel=test_range_dynamic,
dim=1,
inputs=[0, 8, 2],
expect=[0, 2, 4, 6],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_reverse_step",
kernel=test_range_dynamic,
dim=1,
inputs=[8, 0, -2],
expect=[8, 6, 4, 2],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_static_sum",
kernel=test_range_static_sum,
dim=1,
expect=[10, 10, 10],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_sum",
kernel=test_range_dynamic_sum,
dim=1,
inputs=[0, 10, 2],
expect=[10, 10, 10, 10],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_sum_zero",
kernel=test_range_dynamic_sum,
dim=1,
inputs=[0, 0, 1],
expect=[0, 0, 0, 0],
devices=devices,
)
add_kernel_test(TestCodeGen, name="test_range_constant", kernel=test_range_constant, dim=1, devices=devices)
add_kernel_test(
TestCodeGen,
name="test_range_constant_dynamic_nested",
kernel=test_range_constant_dynamic_nested,
dim=1,
inputs=[10],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_dynamic_nested",
kernel=test_range_dynamic_nested,
dim=1,
inputs=[4],
devices=devices,
)
add_kernel_test(
TestCodeGen,
name="test_range_expression",
kernel=test_range_expression,
dim=1,
devices=devices,
)
add_kernel_test(TestCodeGen, name="test_while_zero", kernel=test_while, dim=1, inputs=[0], devices=devices)
add_kernel_test(TestCodeGen, name="test_while_positive", kernel=test_while, dim=1, inputs=[16], devices=devices)
add_kernel_test(TestCodeGen, name="test_pass", kernel=test_pass, dim=1, inputs=[16], devices=devices)
add_kernel_test(TestCodeGen, name="test_break", kernel=test_break, dim=1, inputs=[10], devices=devices)
add_kernel_test(TestCodeGen, name="test_break_early", kernel=test_break_early, dim=1, inputs=[10], devices=devices)
add_kernel_test(TestCodeGen, name="test_break_unroll", kernel=test_break_unroll, dim=1, devices=devices)
add_kernel_test(TestCodeGen, name="test_break_while", kernel=test_break_while, dim=1, devices=devices)
add_kernel_test(
TestCodeGen, name="test_break_multiple", kernel=test_break_multiple, dim=1, inputs=[10], devices=devices
)
add_kernel_test(TestCodeGen, name="test_continue", kernel=test_continue, dim=1, inputs=[10], devices=devices)
add_kernel_test(TestCodeGen, name="test_continue_unroll", kernel=test_continue_unroll, dim=1, devices=devices)
add_function_test(TestCodeGen, func=test_unresolved_func, name="test_unresolved_func", devices=devices)
add_function_test(TestCodeGen, func=test_unresolved_symbol, name="test_unresolved_symbol", devices=devices)
add_function_test(TestCodeGen, func=test_error_global_var, name="test_error_global_var", devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 12,967 | Python | 22.033748 | 116 | 0.608391 |
NVIDIA/warp/warp/tests/test_fabricarray.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# types to test fabric arrays
_fabric_types = [
*wp.types.scalar_types,
*[wp.types.vector(2, T) for T in wp.types.scalar_types],
*[wp.types.vector(3, T) for T in wp.types.scalar_types],
*[wp.types.vector(4, T) for T in wp.types.scalar_types],
*[wp.types.matrix((2, 2), T) for T in wp.types.scalar_types],
*[wp.types.matrix((3, 3), T) for T in wp.types.scalar_types],
*[wp.types.matrix((4, 4), T) for T in wp.types.scalar_types],
*[wp.types.quaternion(T) for T in wp.types.float_types],
]
def _warp_type_to_fabric(dtype, is_array=False):
scalar_map = {
wp.bool: "b",
wp.int8: "i1",
wp.int16: "i2",
wp.int32: "i4",
wp.int64: "i8",
wp.uint8: "u1",
wp.uint16: "u2",
wp.uint32: "u4",
wp.uint64: "u8",
wp.float16: "f2",
wp.float32: "f4",
wp.float64: "f8",
}
if hasattr(dtype, "_wp_scalar_type_"):
type_str = scalar_map[dtype._wp_scalar_type_]
if len(dtype._shape_) == 1:
role = "vector"
else:
role = "matrix"
else:
type_str = scalar_map[dtype]
role = ""
if is_array:
array_depth = 1
else:
array_depth = 0
return (True, type_str, dtype._length_, array_depth, role)
# returns a fabric array interface constructed from a regular array
def _create_fabric_array_interface(data: wp.array, attrib: str, bucket_sizes: list = None, copy=False):
assert isinstance(data, wp.array)
assert data.ndim == 1
assert isinstance(attrib, str)
if copy:
data = wp.clone(data)
if bucket_sizes is not None:
assert hasattr(bucket_sizes, "__len__")
# verify total size
total_size = 0
for bucket_size in bucket_sizes:
total_size += bucket_size
if total_size != data.size:
raise RuntimeError("Bucket sizes don't add up to the size of data array")
elif data.size > 0:
rng = np.random.default_rng(123)
# generate random bucket sizes
bucket_min = 1
bucket_max = math.ceil(0.5 * data.size)
total_size = data.size
size_remaining = total_size
bucket_sizes = []
while size_remaining >= bucket_max:
bucket_size = rng.integers(bucket_min, high=bucket_max, dtype=int)
bucket_sizes.append(bucket_size)
size_remaining -= bucket_size
if size_remaining > 0:
bucket_sizes.append(size_remaining)
else:
# empty data array
bucket_sizes = []
dtype_size = wp.types.type_size_in_bytes(data.dtype)
p = int(data.ptr) if data.ptr else 0
pointers = []
counts = []
for bucket_size in bucket_sizes:
pointers.append(p)
counts.append(bucket_size)
p += bucket_size * dtype_size
attrib_info = {}
attrib_info["type"] = _warp_type_to_fabric(data.dtype)
attrib_info["access"] = 2 # ReadWrite
attrib_info["pointers"] = pointers
attrib_info["counts"] = counts
iface = {}
iface["version"] = 1
iface["device"] = str(data.device)
iface["attribs"] = {attrib: attrib_info}
iface["_ref"] = data # backref to keep the array alive
return iface
# returns a fabric array array interface constructed from a list of regular arrays
def _create_fabric_array_array_interface(data: list, attrib: str, bucket_sizes: list = None):
# data should be a list of arrays
assert isinstance(data, list)
num_arrays = len(data)
assert num_arrays > 0
device = data[0].device
dtype = data[0].dtype
assert isinstance(attrib, str)
if bucket_sizes is not None:
assert hasattr(bucket_sizes, "__len__")
# verify total size
total_size = 0
for bucket_size in bucket_sizes:
total_size += bucket_size
if total_size != num_arrays:
raise RuntimeError("Bucket sizes don't add up to the number of given arrays")
else:
rng = np.random.default_rng(123)
# generate random bucket sizes
bucket_min = 1
bucket_max = math.ceil(0.5 * num_arrays)
total_size = num_arrays
size_remaining = total_size
bucket_sizes = []
while size_remaining >= bucket_max:
bucket_size = rng.integers(bucket_min, high=bucket_max, dtype=int)
bucket_sizes.append(bucket_size)
size_remaining -= bucket_size
if size_remaining > 0:
bucket_sizes.append(size_remaining)
# initialize array of pointers to arrays and their lengths
_array_pointers = []
_array_lengths = []
for i in range(num_arrays):
_array_pointers.append(data[i].ptr)
_array_lengths.append(data[i].size)
array_pointers = wp.array(_array_pointers, dtype=wp.uint64, device=device)
pointer_size = wp.types.type_size_in_bytes(array_pointers.dtype)
lengths = wp.array(_array_lengths, dtype=wp.uint64, device=device)
length_size = wp.types.type_size_in_bytes(lengths.dtype)
p_pointers = int(array_pointers.ptr)
p_lengths = int(lengths.ptr)
pointers = []
counts = []
array_lengths = []
for bucket_size in bucket_sizes:
pointers.append(p_pointers)
counts.append(bucket_size)
array_lengths.append(p_lengths)
p_pointers += bucket_size * pointer_size
p_lengths += bucket_size * length_size
attrib_info = {}
attrib_info["type"] = _warp_type_to_fabric(dtype, is_array=True)
attrib_info["access"] = 2 # ReadWrite
attrib_info["pointers"] = pointers
attrib_info["counts"] = counts
attrib_info["array_lengths"] = array_lengths
iface = {}
iface["version"] = 1
iface["device"] = str(device)
iface["attribs"] = {attrib: attrib_info}
iface["_ref"] = data # backref to keep the data arrays alive
iface["_ref_pointers"] = array_pointers # backref to keep the array pointers alive
iface["_ref_lengths"] = lengths # backref to keep the lengths array alive
return iface
@wp.kernel
def fa_kernel(a: wp.fabricarray(dtype=float), expected: wp.array(dtype=float)):
i = wp.tid()
wp.expect_eq(a[i], expected[i])
a[i] = 2.0 * a[i]
wp.atomic_add(a, i, 1.0)
wp.expect_eq(a[i], 2.0 * expected[i] + 1.0)
@wp.kernel
def fa_kernel_indexed(a: wp.indexedfabricarray(dtype=float), expected: wp.indexedarray(dtype=float)):
i = wp.tid()
wp.expect_eq(a[i], expected[i])
a[i] = 2.0 * a[i]
wp.atomic_add(a, i, 1.0)
wp.expect_eq(a[i], 2.0 * expected[i] + 1.0)
def test_fabricarray_kernel(test, device):
data = wp.array(data=np.arange(100, dtype=np.float32), device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
test.assertEqual(fa.dtype, data.dtype)
test.assertEqual(fa.ndim, 1)
test.assertEqual(fa.shape, data.shape)
test.assertEqual(fa.size, data.size)
wp.launch(fa_kernel, dim=fa.size, inputs=[fa, data], device=device)
# reset data
wp.copy(fa, data)
# test indexed
indices = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices]
idata = data[indices]
test.assertEqual(ifa.dtype, idata.dtype)
test.assertEqual(ifa.ndim, 1)
test.assertEqual(ifa.shape, idata.shape)
test.assertEqual(ifa.size, idata.size)
wp.launch(fa_kernel_indexed, dim=ifa.size, inputs=[ifa, idata], device=device)
wp.synchronize_device(device)
@wp.kernel
def fa_generic_dtype_kernel(a: wp.fabricarray(dtype=Any), b: wp.fabricarray(dtype=Any)):
i = wp.tid()
b[i] = a[i] + a[i]
@wp.kernel
def fa_generic_dtype_kernel_indexed(a: wp.indexedfabricarray(dtype=Any), b: wp.indexedfabricarray(dtype=Any)):
i = wp.tid()
b[i] = a[i] + a[i]
def test_fabricarray_generic_dtype(test, device):
for T in _fabric_types:
if hasattr(T, "_wp_scalar_type_"):
nptype = wp.types.warp_type_to_np_dtype[T._wp_scalar_type_]
else:
nptype = wp.types.warp_type_to_np_dtype[T]
data = wp.array(data=np.arange(10, dtype=nptype), device=device)
data_iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=data_iface, attrib="foo")
result = wp.zeros_like(data)
result_iface = _create_fabric_array_interface(result, "foo", copy=True)
fb = wp.fabricarray(data=result_iface, attrib="foo")
test.assertEqual(fa.dtype, fb.dtype)
test.assertEqual(fa.ndim, fb.ndim)
test.assertEqual(fa.shape, fb.shape)
test.assertEqual(fa.size, fb.size)
wp.launch(fa_generic_dtype_kernel, dim=fa.size, inputs=[fa, fb], device=device)
assert_np_equal(fb.numpy(), 2 * fa.numpy())
# reset data
wp.copy(fa, data)
wp.copy(fb, result)
# test indexed
indices = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices]
ifb = fb[indices]
test.assertEqual(ifa.dtype, ifb.dtype)
test.assertEqual(ifa.ndim, ifb.ndim)
test.assertEqual(ifa.shape, ifb.shape)
test.assertEqual(ifa.size, ifb.size)
wp.launch(fa_generic_dtype_kernel_indexed, dim=ifa.size, inputs=[ifa, ifb], device=device)
assert_np_equal(ifb.numpy(), 2 * ifa.numpy())
@wp.kernel
def fa_generic_array_kernel(a: Any, b: Any):
i = wp.tid()
b[i] = a[i] + a[i]
def test_fabricarray_generic_array(test, device):
for T in _fabric_types:
if hasattr(T, "_wp_scalar_type_"):
nptype = wp.types.warp_type_to_np_dtype[T._wp_scalar_type_]
else:
nptype = wp.types.warp_type_to_np_dtype[T]
data = wp.array(data=np.arange(100, dtype=nptype), device=device)
data_iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=data_iface, attrib="foo")
result = wp.zeros_like(data)
result_iface = _create_fabric_array_interface(result, "foo", copy=True)
fb = wp.fabricarray(data=result_iface, attrib="foo")
test.assertEqual(fa.dtype, fb.dtype)
test.assertEqual(fa.ndim, fb.ndim)
test.assertEqual(fa.shape, fb.shape)
test.assertEqual(fa.size, fb.size)
wp.launch(fa_generic_array_kernel, dim=fa.size, inputs=[fa, fb], device=device)
assert_np_equal(fb.numpy(), 2 * fa.numpy())
# reset data
wp.copy(fa, data)
wp.copy(fb, result)
# test indexed
indices = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices]
ifb = fb[indices]
test.assertEqual(ifa.dtype, ifb.dtype)
test.assertEqual(ifa.ndim, ifb.ndim)
test.assertEqual(ifa.shape, ifb.shape)
test.assertEqual(ifa.size, ifb.size)
wp.launch(fa_generic_array_kernel, dim=ifa.size, inputs=[ifa, ifb], device=device)
assert_np_equal(ifb.numpy(), 2 * ifa.numpy())
def test_fabricarray_empty(test, device):
# Test whether common operations work with empty (zero-sized) indexed arrays
# without throwing exceptions.
def test_empty_ops(nrows, ncols, wptype, nptype):
# scalar, vector, or matrix
if ncols > 0:
if nrows > 0:
wptype = wp.types.matrix((nrows, ncols), wptype)
else:
wptype = wp.types.vector(ncols, wptype)
dtype_shape = wptype._shape_
else:
dtype_shape = ()
fill_value = wptype(42)
# create an empty data array
data = wp.empty(0, dtype=wptype, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
test.assertEqual(fa.size, 0)
test.assertEqual(fa.shape, (0,))
# all of these methods should succeed with zero-sized arrays
fa.zero_()
fa.fill_(fill_value)
fb = fa.contiguous()
fb = wp.empty_like(fa)
fb = wp.zeros_like(fa)
fb = wp.full_like(fa, fill_value)
fb = wp.clone(fa)
wp.copy(fa, fb)
fa.assign(fb)
na = fa.numpy()
test.assertEqual(na.size, 0)
test.assertEqual(na.shape, (0, *dtype_shape))
test.assertEqual(na.dtype, nptype)
test.assertEqual(fa.list(), [])
# test indexed
# create a zero-sized array of indices
indices = wp.empty(0, dtype=int, device=device)
ifa = fa[indices]
test.assertEqual(ifa.size, 0)
test.assertEqual(ifa.shape, (0,))
# all of these methods should succeed with zero-sized arrays
ifa.zero_()
ifa.fill_(fill_value)
ifb = ifa.contiguous()
ifb = wp.empty_like(ifa)
ifb = wp.zeros_like(ifa)
ifb = wp.full_like(ifa, fill_value)
ifb = wp.clone(ifa)
wp.copy(ifa, ifb)
ifa.assign(ifb)
na = ifa.numpy()
test.assertEqual(na.size, 0)
test.assertEqual(na.shape, (0, *dtype_shape))
test.assertEqual(na.dtype, nptype)
test.assertEqual(ifa.list(), [])
# test with scalars, vectors, and matrices
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# scalars
test_empty_ops(0, 0, wptype, nptype)
for ncols in [2, 3, 4, 5]:
# vectors
test_empty_ops(0, ncols, wptype, nptype)
# square matrices (the Fabric interface only supports square matrices right now)
test_empty_ops(ncols, ncols, wptype, nptype)
def test_fabricarray_fill_scalar(test, device):
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# create a data array
data = wp.zeros(100, dtype=wptype, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
assert_np_equal(fa.numpy(), np.zeros(fa.shape, dtype=nptype))
# fill with int value
fill_value = 42
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full(fa.shape, fill_value, dtype=nptype))
fa.zero_()
assert_np_equal(fa.numpy(), np.zeros(fa.shape, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value
fill_value = 13.37
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full(fa.shape, fill_value, dtype=nptype))
# fill with Warp scalar value
fill_value = wptype(17)
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full(fa.shape, fill_value.value, dtype=nptype))
# reset data
wp.copy(fa, data)
# test indexed
indices1 = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices1]
# ensure that the other indices remain unchanged
indices2 = wp.array(data=np.arange(0, data.size, 2, dtype=np.int32), device=device)
ifb = fa[indices2]
assert_np_equal(ifa.numpy(), np.zeros(ifa.shape, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
# fill with int value
fill_value = 42
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full(ifa.shape, fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
ifa.zero_()
assert_np_equal(ifa.numpy(), np.zeros(ifa.shape, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value
fill_value = 13.37
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full(ifa.shape, fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
# fill with Warp scalar value
fill_value = wptype(17)
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full(ifa.shape, fill_value.value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros(ifb.shape, dtype=nptype))
def test_fabricarray_fill_vector(test, device):
# test filling a vector array with scalar or vector values (vec_type, list, or numpy array)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# vector types
vector_types = [
wp.types.vector(2, wptype),
wp.types.vector(3, wptype),
wp.types.vector(4, wptype),
wp.types.vector(5, wptype),
]
for vec_type in vector_types:
vec_len = vec_type._length_
data = wp.zeros(100, dtype=vec_type, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, vec_len), dtype=nptype))
# fill with int scalar
fill_value = 42
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full((*fa.shape, vec_len), fill_value, dtype=nptype))
# test zeroing
fa.zero_()
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, vec_len), dtype=nptype))
# vector values can be passed as a list, numpy array, or Warp vector instance
fill_list = [17, 42, 99, 101, 127][:vec_len]
fill_arr = np.array(fill_list, dtype=nptype)
fill_vec = vec_type(fill_list)
expected = np.tile(fill_arr, fa.size).reshape((*fa.shape, vec_len))
# fill with list of vector length
fa.fill_(fill_list)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with numpy array of vector length
fa.fill_(fill_arr)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with vec instance
fa.fill_(fill_vec)
assert_np_equal(fa.numpy(), expected)
if wptype in wp.types.float_types:
# fill with float scalar
fill_value = 13.37
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full((*fa.shape, vec_len), fill_value, dtype=nptype))
# fill with float list of vector length
fill_list = [-2.5, -1.25, 1.25, 2.5, 5.0][:vec_len]
fa.fill_(fill_list)
expected = np.tile(np.array(fill_list, dtype=nptype), fa.size).reshape((*fa.shape, vec_len))
assert_np_equal(fa.numpy(), expected)
# reset data
wp.copy(fa, data)
# test indexed
indices1 = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices1]
# ensure that the other indices remain unchanged
indices2 = wp.array(data=np.arange(0, data.size, 2, dtype=np.int32), device=device)
ifb = fa[indices2]
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, vec_len), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# fill with int scalar
fill_value = 42
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full((*ifa.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# test zeroing
ifa.zero_()
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, vec_len), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# vector values can be passed as a list, numpy array, or Warp vector instance
fill_list = [17, 42, 99, 101, 127][:vec_len]
fill_arr = np.array(fill_list, dtype=nptype)
fill_vec = vec_type(fill_list)
expected = np.tile(fill_arr, ifa.size).reshape((*ifa.shape, vec_len))
# fill with list of vector length
ifa.fill_(fill_list)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# clear
ifa.zero_()
# fill with numpy array of vector length
ifa.fill_(fill_arr)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# clear
ifa.zero_()
# fill with vec instance
ifa.fill_(fill_vec)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
if wptype in wp.types.float_types:
# fill with float scalar
fill_value = 13.37
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full((*ifa.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
# fill with float list of vector length
fill_list = [-2.5, -1.25, 1.25, 2.5, 5.0][:vec_len]
ifa.fill_(fill_list)
expected = np.tile(np.array(fill_list, dtype=nptype), ifa.size).reshape((*ifa.shape, vec_len))
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, vec_len), dtype=nptype))
def test_fabricarray_fill_matrix(test, device):
# test filling a matrix array with scalar or matrix values (mat_type, nested list, or 2d numpy array)
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# matrix types
matrix_types = [
# square matrices only
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
]
for mat_type in matrix_types:
mat_len = mat_type._length_
mat_shape = mat_type._shape_
data = wp.zeros(100, dtype=mat_type, device=device)
iface = _create_fabric_array_interface(data, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, *mat_shape), dtype=nptype))
# fill with scalar
fill_value = 42
fa.fill_(fill_value)
assert_np_equal(fa.numpy(), np.full((*fa.shape, *mat_shape), fill_value, dtype=nptype))
# test zeroing
fa.zero_()
assert_np_equal(fa.numpy(), np.zeros((*fa.shape, *mat_shape), dtype=nptype))
# matrix values can be passed as a 1d numpy array, 2d numpy array, flat list, nested list, or Warp matrix instance
if wptype != wp.bool:
fill_arr1 = np.arange(mat_len, dtype=nptype)
else:
fill_arr1 = np.ones(mat_len, dtype=nptype)
fill_arr2 = fill_arr1.reshape(mat_shape)
fill_list1 = list(fill_arr1)
fill_list2 = [list(row) for row in fill_arr2]
fill_mat = mat_type(fill_arr1)
expected = np.tile(fill_arr1, fa.size).reshape((*fa.shape, *mat_shape))
# fill with 1d numpy array
fa.fill_(fill_arr1)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with 2d numpy array
fa.fill_(fill_arr2)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with flat list
fa.fill_(fill_list1)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with nested list
fa.fill_(fill_list2)
assert_np_equal(fa.numpy(), expected)
# clear
fa.zero_()
# fill with mat instance
fa.fill_(fill_mat)
assert_np_equal(fa.numpy(), expected)
# reset data
wp.copy(fa, data)
# test indexed
indices1 = wp.array(data=np.arange(1, data.size, 2, dtype=np.int32), device=device)
ifa = fa[indices1]
# ensure that the other indices remain unchanged
indices2 = wp.array(data=np.arange(0, data.size, 2, dtype=np.int32), device=device)
ifb = fa[indices2]
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, *mat_shape), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# fill with scalar
fill_value = 42
ifa.fill_(fill_value)
assert_np_equal(ifa.numpy(), np.full((*ifa.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# test zeroing
ifa.zero_()
assert_np_equal(ifa.numpy(), np.zeros((*ifa.shape, *mat_shape), dtype=nptype))
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# matrix values can be passed as a 1d numpy array, 2d numpy array, flat list, nested list, or Warp matrix instance
if wptype != wp.bool:
fill_arr1 = np.arange(mat_len, dtype=nptype)
else:
fill_arr1 = np.ones(mat_len, dtype=nptype)
fill_arr2 = fill_arr1.reshape(mat_shape)
fill_list1 = list(fill_arr1)
fill_list2 = [list(row) for row in fill_arr2]
fill_mat = mat_type(fill_arr1)
expected = np.tile(fill_arr1, ifa.size).reshape((*ifa.shape, *mat_shape))
# fill with 1d numpy array
ifa.fill_(fill_arr1)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with 2d numpy array
ifa.fill_(fill_arr2)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with flat list
ifa.fill_(fill_list1)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with nested list
ifa.fill_(fill_list2)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
# clear
ifa.zero_()
# fill with mat instance
ifa.fill_(fill_mat)
assert_np_equal(ifa.numpy(), expected)
assert_np_equal(ifb.numpy(), np.zeros((*ifb.shape, *mat_shape), dtype=nptype))
@wp.kernel
def fa_generic_sums_kernel(a: wp.fabricarrayarray(dtype=Any), sums: wp.array(dtype=Any)):
i = wp.tid()
# get sub-array using wp::view()
row = a[i]
# get sub-array length
count = row.shape[0]
# compute sub-array sum
for j in range(count):
sums[i] = sums[i] + row[j]
@wp.kernel
def fa_generic_sums_kernel_indexed(a: wp.indexedfabricarrayarray(dtype=Any), sums: wp.array(dtype=Any)):
i = wp.tid()
# get sub-array using wp::view()
row = a[i]
# get sub-array length
count = row.shape[0]
# compute sub-array sum
for j in range(count):
sums[i] = sums[i] + row[j]
def test_fabricarrayarray(test, device):
for T in _fabric_types:
if hasattr(T, "_wp_scalar_type_"):
nptype = wp.types.warp_type_to_np_dtype[T._wp_scalar_type_]
else:
nptype = wp.types.warp_type_to_np_dtype[T]
n = 100
min_length = 1
max_length = 10
arrays = []
expected_sums = []
expected_sums_indexed = []
# generate data arrays
length = min_length
for i in range(n):
if length > max_length:
length = min_length
na = np.arange(1, length + 1, dtype=nptype)
arrays.append(wp.array(data=na, device=device))
expected_sums.append(na.sum())
# every second index
if i % 2 == 0:
expected_sums_indexed.append(na.sum())
length += 1
data_iface = _create_fabric_array_array_interface(arrays, "foo")
fa = wp.fabricarrayarray(data=data_iface, attrib="foo")
sums = wp.zeros_like(fa)
test.assertEqual(fa.dtype, sums.dtype)
test.assertEqual(fa.ndim, 2)
test.assertEqual(sums.ndim, 1)
test.assertEqual(fa.shape, sums.shape)
test.assertEqual(fa.size, sums.size)
wp.launch(fa_generic_sums_kernel, dim=fa.size, inputs=[fa, sums], device=device)
assert_np_equal(sums.numpy(), np.array(expected_sums, dtype=nptype))
# test indexed
indices = wp.array(data=np.arange(0, n, 2, dtype=np.int32), device=device)
ifa = fa[indices]
sums = wp.zeros_like(ifa)
test.assertEqual(ifa.dtype, sums.dtype)
test.assertEqual(ifa.ndim, 2)
test.assertEqual(sums.ndim, 1)
test.assertEqual(ifa.shape, sums.shape)
test.assertEqual(ifa.size, sums.size)
wp.launch(fa_generic_sums_kernel_indexed, dim=ifa.size, inputs=[ifa, sums], device=device)
assert_np_equal(sums.numpy(), np.array(expected_sums_indexed, dtype=nptype))
# explicit kernel overloads
for T in _fabric_types:
wp.overload(fa_generic_dtype_kernel, [wp.fabricarray(dtype=T), wp.fabricarray(dtype=T)])
wp.overload(fa_generic_dtype_kernel_indexed, [wp.indexedfabricarray(dtype=T), wp.indexedfabricarray(dtype=T)])
wp.overload(fa_generic_array_kernel, [wp.fabricarray(dtype=T), wp.fabricarray(dtype=T)])
wp.overload(fa_generic_array_kernel, [wp.indexedfabricarray(dtype=T), wp.indexedfabricarray(dtype=T)])
wp.overload(fa_generic_sums_kernel, [wp.fabricarrayarray(dtype=T), wp.array(dtype=T)])
wp.overload(fa_generic_sums_kernel_indexed, [wp.indexedfabricarrayarray(dtype=T), wp.array(dtype=T)])
devices = get_test_devices()
class TestFabricArray(unittest.TestCase):
pass
# fabric arrays
add_function_test(TestFabricArray, "test_fabricarray_kernel", test_fabricarray_kernel, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_empty", test_fabricarray_empty, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_generic_dtype", test_fabricarray_generic_dtype, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_generic_array", test_fabricarray_generic_array, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_fill_scalar", test_fabricarray_fill_scalar, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_fill_vector", test_fabricarray_fill_vector, devices=devices)
add_function_test(TestFabricArray, "test_fabricarray_fill_matrix", test_fabricarray_fill_matrix, devices=devices)
# fabric arrays of arrays
add_function_test(TestFabricArray, "test_fabricarrayarray", test_fabricarrayarray, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 32,138 | Python | 32.724029 | 126 | 0.591792 |
NVIDIA/warp/warp/tests/test_jax.py | # Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# basic kernel with one input and output
@wp.kernel
def triple_kernel(input: wp.array(dtype=float), output: wp.array(dtype=float)):
tid = wp.tid()
output[tid] = 3.0 * input[tid]
# generic kernel with one scalar input and output
@wp.kernel
def triple_kernel_scalar(input: wp.array(dtype=Any), output: wp.array(dtype=Any)):
tid = wp.tid()
output[tid] = input.dtype(3) * input[tid]
# generic kernel with one vector/matrix input and output
@wp.kernel
def triple_kernel_vecmat(input: wp.array(dtype=Any), output: wp.array(dtype=Any)):
tid = wp.tid()
output[tid] = input.dtype.dtype(3) * input[tid]
# kernel with multiple inputs and outputs
@wp.kernel
def multiarg_kernel(
# inputs
a: wp.array(dtype=float),
b: wp.array(dtype=float),
c: wp.array(dtype=float),
# outputs
ab: wp.array(dtype=float),
bc: wp.array(dtype=float),
):
tid = wp.tid()
ab[tid] = a[tid] + b[tid]
bc[tid] = b[tid] + c[tid]
# various types for testing
scalar_types = wp.types.scalar_types
vector_types = []
matrix_types = []
for dim in [2, 3, 4]:
for T in scalar_types:
vector_types.append(wp.vec(dim, T))
matrix_types.append(wp.mat((dim, dim), T))
# explicitly overload generic kernels to avoid module reloading during tests
for T in scalar_types:
wp.overload(triple_kernel_scalar, [wp.array(dtype=T), wp.array(dtype=T)])
for T in [*vector_types, *matrix_types]:
wp.overload(triple_kernel_vecmat, [wp.array(dtype=T), wp.array(dtype=T)])
def _jax_version():
try:
import jax
return jax.__version_info__
except ImportError:
return (0, 0, 0)
def test_dtype_from_jax(test, device):
import jax.numpy as jp
def test_conversions(jax_type, warp_type):
test.assertEqual(wp.dtype_from_jax(jax_type), warp_type)
test.assertEqual(wp.dtype_from_jax(jp.dtype(jax_type)), warp_type)
test_conversions(jp.float16, wp.float16)
test_conversions(jp.float32, wp.float32)
test_conversions(jp.float64, wp.float64)
test_conversions(jp.int8, wp.int8)
test_conversions(jp.int16, wp.int16)
test_conversions(jp.int32, wp.int32)
test_conversions(jp.int64, wp.int64)
test_conversions(jp.uint8, wp.uint8)
test_conversions(jp.uint16, wp.uint16)
test_conversions(jp.uint32, wp.uint32)
test_conversions(jp.uint64, wp.uint64)
test_conversions(jp.bool_, wp.bool)
def test_dtype_to_jax(test, device):
import jax.numpy as jp
def test_conversions(warp_type, jax_type):
test.assertEqual(wp.dtype_to_jax(warp_type), jax_type)
test_conversions(wp.float16, jp.float16)
test_conversions(wp.float32, jp.float32)
test_conversions(wp.float64, jp.float64)
test_conversions(wp.int8, jp.int8)
test_conversions(wp.int16, jp.int16)
test_conversions(wp.int32, jp.int32)
test_conversions(wp.int64, jp.int64)
test_conversions(wp.uint8, jp.uint8)
test_conversions(wp.uint16, jp.uint16)
test_conversions(wp.uint32, jp.uint32)
test_conversions(wp.uint64, jp.uint64)
test_conversions(wp.bool, jp.bool_)
def test_device_conversion(test, device):
jax_device = wp.device_to_jax(device)
warp_device = wp.device_from_jax(jax_device)
test.assertEqual(warp_device, device)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_basic(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
n = 64
jax_triple = jax_kernel(triple_kernel)
@jax.jit
def f():
x = jp.arange(n, dtype=jp.float32)
return jax_triple(x)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
y = f()
result = np.asarray(y).reshape((n,))
expected = 3 * np.arange(n, dtype=np.float32)
assert_np_equal(result, expected)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_scalar(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
n = 64
for T in scalar_types:
jp_dtype = wp.dtype_to_jax(T)
np_dtype = wp.dtype_to_numpy(T)
with test.subTest(msg=T.__name__):
# get the concrete overload
kernel_instance = triple_kernel_scalar.add_overload([wp.array(dtype=T), wp.array(dtype=T)])
jax_triple = jax_kernel(kernel_instance)
@jax.jit
def f(jax_triple=jax_triple, jp_dtype=jp_dtype):
x = jp.arange(n, dtype=jp_dtype)
return jax_triple(x)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
y = f()
result = np.asarray(y).reshape((n,))
expected = 3 * np.arange(n, dtype=np_dtype)
assert_np_equal(result, expected)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_vecmat(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
for T in [*vector_types, *matrix_types]:
jp_dtype = wp.dtype_to_jax(T._wp_scalar_type_)
np_dtype = wp.dtype_to_numpy(T._wp_scalar_type_)
n = 64 // T._length_
scalar_shape = (n, *T._shape_)
scalar_len = n * T._length_
with test.subTest(msg=T.__name__):
# get the concrete overload
kernel_instance = triple_kernel_vecmat.add_overload([wp.array(dtype=T), wp.array(dtype=T)])
jax_triple = jax_kernel(kernel_instance)
@jax.jit
def f(jax_triple=jax_triple, jp_dtype=jp_dtype, scalar_len=scalar_len, scalar_shape=scalar_shape):
x = jp.arange(scalar_len, dtype=jp_dtype).reshape(scalar_shape)
return jax_triple(x)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
y = f()
result = np.asarray(y).reshape(scalar_shape)
expected = 3 * np.arange(scalar_len, dtype=np_dtype).reshape(scalar_shape)
assert_np_equal(result, expected)
@unittest.skipUnless(_jax_version() >= (0, 4, 25), "Jax version too old")
def test_jax_kernel_multiarg(test, device):
import jax.numpy as jp
from warp.jax_experimental import jax_kernel
n = 64
jax_multiarg = jax_kernel(multiarg_kernel)
@jax.jit
def f():
a = jp.full(n, 1, dtype=jp.float32)
b = jp.full(n, 2, dtype=jp.float32)
c = jp.full(n, 3, dtype=jp.float32)
return jax_multiarg(a, b, c)
# run on the given device
with jax.default_device(wp.device_to_jax(device)):
x, y = f()
result_x, result_y = np.asarray(x), np.asarray(y)
expected_x = np.full(n, 3, dtype=np.float32)
expected_y = np.full(n, 5, dtype=np.float32)
assert_np_equal(result_x, expected_x)
assert_np_equal(result_y, expected_y)
class TestJax(unittest.TestCase):
pass
# try adding Jax tests if Jax is installed correctly
try:
# prevent Jax from gobbling up GPU memory
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
import jax
import jax.dlpack
# NOTE: we must enable 64-bit types in Jax to test the full gamut of types
jax.config.update("jax_enable_x64", True)
# check which Warp devices work with Jax
# CUDA devices may fail if Jax cannot find a CUDA Toolkit
test_devices = get_test_devices()
jax_compatible_devices = []
jax_compatible_cuda_devices = []
for d in test_devices:
try:
with jax.default_device(wp.device_to_jax(d)):
j = jax.numpy.arange(10, dtype=jax.numpy.float32)
j += 1
jax_compatible_devices.append(d)
if d.is_cuda:
jax_compatible_cuda_devices.append(d)
except Exception as e:
print(f"Skipping Jax DLPack tests on device '{d}' due to exception: {e}")
add_function_test(TestJax, "test_dtype_from_jax", test_dtype_from_jax, devices=None)
add_function_test(TestJax, "test_dtype_to_jax", test_dtype_to_jax, devices=None)
if jax_compatible_devices:
add_function_test(TestJax, "test_device_conversion", test_device_conversion, devices=jax_compatible_devices)
if jax_compatible_cuda_devices:
add_function_test(TestJax, "test_jax_kernel_basic", test_jax_kernel_basic, devices=jax_compatible_cuda_devices)
add_function_test(
TestJax, "test_jax_kernel_scalar", test_jax_kernel_scalar, devices=jax_compatible_cuda_devices
)
add_function_test(
TestJax, "test_jax_kernel_vecmat", test_jax_kernel_vecmat, devices=jax_compatible_cuda_devices
)
add_function_test(
TestJax, "test_jax_kernel_multiarg", test_jax_kernel_multiarg, devices=jax_compatible_cuda_devices
)
except Exception as e:
print(f"Skipping Jax tests due to exception: {e}")
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 9,634 | Python | 30.486928 | 119 | 0.647914 |
NVIDIA/warp/warp/tests/test_pinned.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
def test_pinned(test: unittest.TestCase, device):
assert wp.get_device(device).is_cuda, "Test device must be a CUDA device"
n = 1024 * 1024
ones = np.ones(n, dtype=np.float32)
# pageable host arrays for synchronous transfers
a_pageable1 = wp.array(ones, dtype=float, device="cpu")
a_pageable2 = wp.zeros_like(a_pageable1)
test.assertFalse(a_pageable1.pinned)
test.assertFalse(a_pageable2.pinned)
# pinned host arrays for asynchronous transfers
a_pinned1 = wp.array(ones, dtype=float, device="cpu", pinned=True)
a_pinned2 = wp.zeros_like(a_pinned1)
test.assertTrue(a_pinned1.pinned)
test.assertTrue(a_pinned2.pinned)
# device array
a_device = wp.zeros(n, dtype=float, device=device)
test.assertFalse(a_device.pinned)
wp.synchronize_device(device)
with wp.ScopedTimer("Synchronous copy", print=False) as pageable_timer:
wp.copy(a_device, a_pageable1)
wp.copy(a_pageable2, a_device)
wp.synchronize_device(device)
with wp.ScopedTimer("Asynchronous copy", print=False) as pinned_timer:
wp.copy(a_device, a_pinned1)
wp.copy(a_pinned2, a_device)
wp.synchronize_device(device)
# ensure correct results
assert_np_equal(a_pageable2.numpy(), ones)
assert_np_equal(a_pinned2.numpy(), ones)
# ensure that launching asynchronous transfers took less CPU time
test.assertTrue(pinned_timer.elapsed < pageable_timer.elapsed, "Pinned transfers did not take less CPU time")
devices = get_selected_cuda_test_devices()
class TestPinned(unittest.TestCase):
pass
add_function_test(TestPinned, "test_pinned", test_pinned, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 2,290 | Python | 28.753246 | 113 | 0.722271 |
NVIDIA/warp/warp/tests/test_adam.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
import warp.optim
import warp.sim
from warp.tests.unittest_utils import *
@wp.kernel
def objective(params: wp.array(dtype=float), score: wp.array(dtype=float)):
tid = wp.tid()
U = params[tid] * params[tid]
wp.atomic_add(score, 0, U)
# This test inspired by https://machinelearningmastery.com/adam-optimization-from-scratch/
def test_adam_solve_float(test, device):
with wp.ScopedDevice(device):
params_start = np.array([0.1, 0.2], dtype=float)
score = wp.zeros(1, dtype=float, requires_grad=True)
params = wp.array(params_start, dtype=float, requires_grad=True)
tape = wp.Tape()
opt = warp.optim.Adam([params], lr=0.02, betas=(0.8, 0.999))
def gradient_func():
tape.reset()
score.zero_()
with tape:
wp.launch(kernel=objective, dim=len(params), inputs=[params, score])
tape.backward(score)
return [tape.gradients[params]]
niters = 100
opt.reset_internal_state()
for _ in range(niters):
opt.step(gradient_func())
result = params.numpy()
# optimum is at the origin, so the result should be close to it in all N dimensions.
tol = 1e-5
for r in result:
test.assertLessEqual(r, tol)
@wp.kernel
def objective_vec3(params: wp.array(dtype=wp.vec3), score: wp.array(dtype=float)):
tid = wp.tid()
U = wp.dot(params[tid], params[tid])
wp.atomic_add(score, 0, U)
# This test inspired by https://machinelearningmastery.com/adam-optimization-from-scratch/
def test_adam_solve_vec3(test, device):
with wp.ScopedDevice(device):
params_start = np.array([[0.1, 0.2, -0.1]], dtype=float)
score = wp.zeros(1, dtype=float, requires_grad=True)
params = wp.array(params_start, dtype=wp.vec3, requires_grad=True)
tape = wp.Tape()
opt = warp.optim.Adam([params], lr=0.02, betas=(0.8, 0.999))
def gradient_func():
tape.reset()
score.zero_()
with tape:
wp.launch(kernel=objective_vec3, dim=len(params), inputs=[params, score])
tape.backward(score)
return [tape.gradients[params]]
niters = 100
opt.reset_internal_state()
for _ in range(niters):
opt.step(gradient_func())
result = params.numpy()
tol = 1e-5
# optimum is at the origin, so the result should be close to it in all N dimensions.
for r in result:
for v in r:
test.assertLessEqual(v, tol)
@wp.kernel
def objective_two_inputs_vec3(
params1: wp.array(dtype=wp.vec3), params2: wp.array(dtype=wp.vec3), score: wp.array(dtype=float)
):
tid = wp.tid()
U = wp.dot(params1[tid], params1[tid])
V = wp.dot(params2[tid], params2[tid])
wp.atomic_add(score, 0, U + V)
# This test inspired by https://machinelearningmastery.com/adam-optimization-from-scratch/
def test_adam_solve_two_inputs(test, device):
with wp.ScopedDevice(device):
params_start1 = np.array([[0.1, 0.2, -0.1]], dtype=float)
params_start2 = np.array([[0.2, 0.1, 0.1]], dtype=float)
score = wp.zeros(1, dtype=float, requires_grad=True)
params1 = wp.array(params_start1, dtype=wp.vec3, requires_grad=True)
params2 = wp.array(params_start2, dtype=wp.vec3, requires_grad=True)
tape = wp.Tape()
opt = warp.optim.Adam([params1, params2], lr=0.02, betas=(0.8, 0.999))
def gradient_func():
tape.reset()
score.zero_()
with tape:
wp.launch(kernel=objective_two_inputs_vec3, dim=len(params1), inputs=[params1, params2, score])
tape.backward(score)
return [tape.gradients[params1], tape.gradients[params2]]
niters = 100
opt.reset_internal_state()
for _ in range(niters):
opt.step(gradient_func())
result = params1.numpy()
tol = 1e-5
# optimum is at the origin, so the result should be close to it in all N dimensions.
for r in result:
for v in r:
test.assertLessEqual(v, tol)
result = params2.numpy()
tol = 1e-5
# optimum is at the origin, so the result should be close to it in all N dimensions.
for r in result:
for v in r:
test.assertLessEqual(v, tol)
devices = get_test_devices()
class TestAdam(unittest.TestCase):
pass
add_function_test(TestAdam, "test_adam_solve_float", test_adam_solve_float, devices=devices)
add_function_test(TestAdam, "test_adam_solve_vec3", test_adam_solve_vec3, devices=devices)
add_function_test(TestAdam, "test_adam_solve_two_inputs", test_adam_solve_two_inputs, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 5,367 | Python | 33.410256 | 111 | 0.626048 |
NVIDIA/warp/warp/tests/test_mat_lite.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
mat32d = wp.mat(shape=(3, 2), dtype=wp.float64)
@wp.kernel
def test_matrix_constructor_value_func():
a = wp.mat22()
b = wp.matrix(a, shape=(2, 2))
c = mat32d()
d = mat32d(c, shape=(3, 2))
e = mat32d(wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0))
f = mat32d(
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
)
# Test matrix constructors using explicit type (float16)
# note that these tests are specifically not using generics / closure
# args to create kernels dynamically (like the rest of this file)
# as those use different code paths to resolve arg types which
# has lead to regressions.
@wp.kernel
def test_constructors_explicit_precision():
# construction for custom matrix types
eye = wp.identity(dtype=wp.float16, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=wp.float16)
custom = wp.matrix(wp.float16(0.0), wp.float16(1.0), wp.float16(2.0), wp.float16(3.0), shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], wp.float16(1.0))
else:
wp.expect_eq(eye[i, j], wp.float16(0.0))
wp.expect_eq(zeros[i, j], wp.float16(0.0))
wp.expect_eq(custom[i, j], wp.float16(i) * wp.float16(2.0) + wp.float16(j))
# Same as above but with a default (float/int) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructors_default_precision():
# construction for default (float) matrix types
eye = wp.identity(dtype=float, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=float)
custom = wp.matrix(0.0, 1.0, 2.0, 3.0, shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], 1.0)
else:
wp.expect_eq(eye[i, j], 0.0)
wp.expect_eq(zeros[i, j], 0.0)
wp.expect_eq(custom[i, j], float(i) * 2.0 + float(j))
@wp.kernel
def test_matrix_mutation(expected: wp.types.matrix(shape=(10, 3), dtype=float)):
m = wp.matrix(shape=(10, 3), dtype=float)
# test direct element indexing
m[0, 0] = 1.0
m[0, 1] = 2.0
m[0, 2] = 3.0
# The nested indexing (matrix->vector->scalar) below does not
# currently modify m because m[0] returns row vector by
# value rather than reference, this is different from NumPy
# which always returns by ref. Not clear how we can support
# this as well as auto-diff.
# m[0][1] = 2.0
# m[0][2] = 3.0
# test setting rows
for i in range(1, 10):
m[i] = m[i - 1] + wp.vec3(1.0, 2.0, 3.0)
wp.expect_eq(m, expected)
devices = get_test_devices()
class TestMatLite(unittest.TestCase):
pass
add_kernel_test(TestMatLite, test_matrix_constructor_value_func, dim=1, devices=devices)
add_kernel_test(TestMatLite, test_constructors_explicit_precision, dim=1, devices=devices)
add_kernel_test(TestMatLite, test_constructors_default_precision, dim=1, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 3,783 | Python | 32.192982 | 116 | 0.646048 |
NVIDIA/warp/warp/tests/test_quat.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
import warp.sim
from warp.tests.unittest_utils import *
np_float_types = [np.float32, np.float64, np.float16]
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
############################################################
def test_constructors(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
quat = wp.types.quaternion(dtype=wptype)
def check_component_constructor(
input: wp.array(dtype=wptype),
q: wp.array(dtype=wptype),
):
qresult = quat(input[0], input[1], input[2], input[3])
# multiply the output by 2 so we've got something to backpropagate:
q[0] = wptype(2) * qresult[0]
q[1] = wptype(2) * qresult[1]
q[2] = wptype(2) * qresult[2]
q[3] = wptype(2) * qresult[3]
def check_vector_constructor(
input: wp.array(dtype=wptype),
q: wp.array(dtype=wptype),
):
qresult = quat(vec3(input[0], input[1], input[2]), input[3])
# multiply the output by 2 so we've got something to backpropagate:
q[0] = wptype(2) * qresult[0]
q[1] = wptype(2) * qresult[1]
q[2] = wptype(2) * qresult[2]
q[3] = wptype(2) * qresult[3]
kernel = getkernel(check_component_constructor, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
vec_kernel = getkernel(check_vector_constructor, suffix=dtype.__name__)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=4).astype(dtype), requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=tol)
for i in range(4):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
input = wp.array(rng.standard_normal(size=4).astype(dtype), requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(vec_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy(), tol=tol)
for i in range(4):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(vec_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
def test_casting_constructors(test, device, dtype, register_kernels=False):
np_type = np.dtype(dtype)
wp_type = wp.types.np_dtype_to_warp_type[np_type]
quat = wp.types.quaternion(dtype=wp_type)
np16 = np.dtype(np.float16)
wp16 = wp.types.np_dtype_to_warp_type[np16]
np32 = np.dtype(np.float32)
wp32 = wp.types.np_dtype_to_warp_type[np32]
np64 = np.dtype(np.float64)
wp64 = wp.types.np_dtype_to_warp_type[np64]
def cast_float16(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp16, ndim=2)):
tid = wp.tid()
q1 = quat(a[tid, 0], a[tid, 1], a[tid, 2], a[tid, 3])
q2 = wp.quaternion(q1, dtype=wp16)
b[tid, 0] = q2[0]
b[tid, 1] = q2[1]
b[tid, 2] = q2[2]
b[tid, 3] = q2[3]
def cast_float32(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp32, ndim=2)):
tid = wp.tid()
q1 = quat(a[tid, 0], a[tid, 1], a[tid, 2], a[tid, 3])
q2 = wp.quaternion(q1, dtype=wp32)
b[tid, 0] = q2[0]
b[tid, 1] = q2[1]
b[tid, 2] = q2[2]
b[tid, 3] = q2[3]
def cast_float64(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp64, ndim=2)):
tid = wp.tid()
q1 = quat(a[tid, 0], a[tid, 1], a[tid, 2], a[tid, 3])
q2 = wp.quaternion(q1, dtype=wp64)
b[tid, 0] = q2[0]
b[tid, 1] = q2[1]
b[tid, 2] = q2[2]
b[tid, 3] = q2[3]
kernel_16 = getkernel(cast_float16, suffix=dtype.__name__)
kernel_32 = getkernel(cast_float32, suffix=dtype.__name__)
kernel_64 = getkernel(cast_float64, suffix=dtype.__name__)
if register_kernels:
return
# check casting to float 16
a = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 4), dtype=np16), dtype=wp16, requires_grad=True, device=device)
b_result = np.ones((1, 4), dtype=np16)
b_grad = wp.array(np.ones((1, 4), dtype=np16), dtype=wp16, device=device)
a_grad = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_16, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 32
a = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 4), dtype=np32), dtype=wp32, requires_grad=True, device=device)
b_result = np.ones((1, 4), dtype=np32)
b_grad = wp.array(np.ones((1, 4), dtype=np32), dtype=wp32, device=device)
a_grad = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_32, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 64
a = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 4), dtype=np64), dtype=wp64, requires_grad=True, device=device)
b_result = np.ones((1, 4), dtype=np64)
b_grad = wp.array(np.ones((1, 4), dtype=np64), dtype=wp64, device=device)
a_grad = wp.array(np.ones((1, 4), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_64, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
def test_inverse(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_quat_inverse(
input: wp.array(dtype=wptype),
shouldbeidentity: wp.array(dtype=quat),
q: wp.array(dtype=wptype),
):
qread = quat(input[0], input[1], input[2], input[3])
qresult = wp.quat_inverse(qread)
# this inverse should work for normalized quaternions:
shouldbeidentity[0] = wp.normalize(qread) * wp.quat_inverse(wp.normalize(qread))
# multiply the output by 2 so we've got something to backpropagate:
q[0] = wptype(2) * qresult[0]
q[1] = wptype(2) * qresult[1]
q[2] = wptype(2) * qresult[2]
q[3] = wptype(2) * qresult[3]
kernel = getkernel(check_quat_inverse, suffix=dtype.__name__)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=4).astype(dtype), requires_grad=True, device=device)
shouldbeidentity = wp.array(np.zeros((1, 4)), dtype=quat, requires_grad=True, device=device)
output = wp.zeros_like(input)
wp.launch(kernel, dim=1, inputs=[input], outputs=[shouldbeidentity, output], device=device)
assert_np_equal(shouldbeidentity.numpy(), np.array([0, 0, 0, 1]), tol=tol)
for i in range(4):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[input], outputs=[shouldbeidentity, output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = -2 if i != 3 else 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
def test_dotproduct(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_dot(
s: wp.array(dtype=quat),
v: wp.array(dtype=quat),
dot: wp.array(dtype=wptype),
):
dot[0] = wptype(2) * wp.dot(v[0], s[0])
dotkernel = getkernel(check_quat_dot, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
dot = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
dotkernel,
dim=1,
inputs=[
s,
v,
],
outputs=[dot],
device=device,
)
assert_np_equal(dot.numpy()[0], 2.0 * (v.numpy() * s.numpy()).sum(), tol=tol)
tape.backward(loss=dot)
sgrads = tape.gradients[s].numpy()[0]
expected_grads = 2.0 * v.numpy()[0]
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v].numpy()[0]
expected_grads = 2.0 * s.numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
def test_length(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-7,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_length(
q: wp.array(dtype=quat),
l: wp.array(dtype=wptype),
l2: wp.array(dtype=wptype),
):
l[0] = wptype(2) * wp.length(q[0])
l2[0] = wptype(2) * wp.length_sq(q[0])
kernel = getkernel(check_quat_length, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
l = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
],
outputs=[l, l2],
device=device,
)
assert_np_equal(l.numpy()[0], 2 * np.linalg.norm(q.numpy()), tol=10 * tol)
assert_np_equal(l2.numpy()[0], 2 * np.linalg.norm(q.numpy()) ** 2, tol=10 * tol)
tape.backward(loss=l)
grad = tape.gradients[q].numpy()[0]
expected_grad = 2 * q.numpy()[0] / np.linalg.norm(q.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l2)
grad = tape.gradients[q].numpy()[0]
expected_grad = 4 * q.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
def test_normalize(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_normalize(
q: wp.array(dtype=quat),
n0: wp.array(dtype=wptype),
n1: wp.array(dtype=wptype),
n2: wp.array(dtype=wptype),
n3: wp.array(dtype=wptype),
):
n = wptype(2) * (wp.normalize(q[0]))
n0[0] = n[0]
n1[0] = n[1]
n2[0] = n[2]
n3[0] = n[3]
def check_normalize_alt(
q: wp.array(dtype=quat),
n0: wp.array(dtype=wptype),
n1: wp.array(dtype=wptype),
n2: wp.array(dtype=wptype),
n3: wp.array(dtype=wptype),
):
n = wptype(2) * (q[0] / wp.length(q[0]))
n0[0] = n[0]
n1[0] = n[1]
n2[0] = n[2]
n3[0] = n[3]
normalize_kernel = getkernel(check_normalize, suffix=dtype.__name__)
normalize_alt_kernel = getkernel(check_normalize_alt, suffix=dtype.__name__)
if register_kernels:
return
# I've already tested the things I'm using in check_normalize_alt, so I'll just
# make sure the two are giving the same results/gradients
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
n0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n0_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n1_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n2_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n3_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
outputs0 = [
n0,
n1,
n2,
n3,
]
tape0 = wp.Tape()
with tape0:
wp.launch(normalize_kernel, dim=1, inputs=[q], outputs=outputs0, device=device)
outputs1 = [
n0_alt,
n1_alt,
n2_alt,
n3_alt,
]
tape1 = wp.Tape()
with tape1:
wp.launch(
normalize_alt_kernel,
dim=1,
inputs=[
q,
],
outputs=outputs1,
device=device,
)
assert_np_equal(n0.numpy()[0], n0_alt.numpy()[0], tol=tol)
assert_np_equal(n1.numpy()[0], n1_alt.numpy()[0], tol=tol)
assert_np_equal(n2.numpy()[0], n2_alt.numpy()[0], tol=tol)
assert_np_equal(n3.numpy()[0], n3_alt.numpy()[0], tol=tol)
for ncmp, ncmpalt in zip(outputs0, outputs1):
tape0.backward(loss=ncmp)
tape1.backward(loss=ncmpalt)
assert_np_equal(tape0.gradients[q].numpy()[0], tape1.gradients[q].numpy()[0], tol=tol)
tape0.zero()
tape1.zero()
def test_addition(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_add(
q: wp.array(dtype=quat),
v: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = q[0] + v[0]
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_add, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
v,
],
outputs=[r0, r1, r2, r3],
device=device,
)
assert_np_equal(r0.numpy()[0], 2 * (v.numpy()[0, 0] + q.numpy()[0, 0]), tol=tol)
assert_np_equal(r1.numpy()[0], 2 * (v.numpy()[0, 1] + q.numpy()[0, 1]), tol=tol)
assert_np_equal(r2.numpy()[0], 2 * (v.numpy()[0, 2] + q.numpy()[0, 2]), tol=tol)
assert_np_equal(r3.numpy()[0], 2 * (v.numpy()[0, 3] + q.numpy()[0, 3]), tol=tol)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
qgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(qgrads)
expected_grads[i] = 2
assert_np_equal(qgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v].numpy()[0]
assert_np_equal(vgrads, expected_grads, tol=tol)
tape.zero()
def test_subtraction(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_sub(
q: wp.array(dtype=quat),
v: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = v[0] - q[0]
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_sub, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(rng.standard_normal(size=4).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
q,
v,
],
outputs=[r0, r1, r2, r3],
device=device,
)
assert_np_equal(r0.numpy()[0], 2 * (v.numpy()[0, 0] - q.numpy()[0, 0]), tol=tol)
assert_np_equal(r1.numpy()[0], 2 * (v.numpy()[0, 1] - q.numpy()[0, 1]), tol=tol)
assert_np_equal(r2.numpy()[0], 2 * (v.numpy()[0, 2] - q.numpy()[0, 2]), tol=tol)
assert_np_equal(r3.numpy()[0], 2 * (v.numpy()[0, 3] - q.numpy()[0, 3]), tol=tol)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
qgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(qgrads)
expected_grads[i] = -2
assert_np_equal(qgrads, expected_grads, tol=10 * tol)
vgrads = tape.gradients[v].numpy()[0]
expected_grads[i] = 2
assert_np_equal(vgrads, expected_grads, tol=tol)
tape.zero()
def test_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_scalar_mul(
s: wp.array(dtype=wptype),
q: wp.array(dtype=quat),
l0: wp.array(dtype=wptype),
l1: wp.array(dtype=wptype),
l2: wp.array(dtype=wptype),
l3: wp.array(dtype=wptype),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
lresult = s[0] * q[0]
rresult = q[0] * s[0]
# multiply outputs by 2 so we've got something to backpropagate:
l0[0] = wptype(2) * lresult[0]
l1[0] = wptype(2) * lresult[1]
l2[0] = wptype(2) * lresult[2]
l3[0] = wptype(2) * lresult[3]
r0[0] = wptype(2) * rresult[0]
r1[0] = wptype(2) * rresult[1]
r2[0] = wptype(2) * rresult[2]
r3[0] = wptype(2) * rresult[3]
kernel = getkernel(check_quat_scalar_mul, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=1).astype(dtype), requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
l0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
l0,
l1,
l2,
l3,
r0,
r1,
r2,
r3,
],
device=device,
)
assert_np_equal(l0.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 0], tol=tol)
assert_np_equal(l1.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 1], tol=tol)
assert_np_equal(l2.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 2], tol=tol)
assert_np_equal(l3.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 3], tol=tol)
assert_np_equal(r0.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 0], tol=tol)
assert_np_equal(r1.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 1], tol=tol)
assert_np_equal(r2.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 2], tol=tol)
assert_np_equal(r3.numpy()[0], 2 * s.numpy()[0] * q.numpy()[0, 3], tol=tol)
if dtype in np_float_types:
for i, outputs in enumerate([(l0, r0), (l1, r1), (l2, r2), (l3, r3)]):
for l in outputs:
tape.backward(loss=l)
sgrad = tape.gradients[s].numpy()[0]
assert_np_equal(sgrad, 2 * q.numpy()[0, i], tol=tol)
allgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = s.numpy()[0] * 2
assert_np_equal(allgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_scalar_division(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_scalar_div(
s: wp.array(dtype=wptype),
q: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = q[0] / s[0]
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_scalar_div, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=1).astype(dtype), requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
r0,
r1,
r2,
r3,
],
device=device,
)
assert_np_equal(r0.numpy()[0], 2 * q.numpy()[0, 0] / s.numpy()[0], tol=tol)
assert_np_equal(r1.numpy()[0], 2 * q.numpy()[0, 1] / s.numpy()[0], tol=tol)
assert_np_equal(r2.numpy()[0], 2 * q.numpy()[0, 2] / s.numpy()[0], tol=tol)
assert_np_equal(r3.numpy()[0], 2 * q.numpy()[0, 3] / s.numpy()[0], tol=tol)
if dtype in np_float_types:
for i, r in enumerate([r0, r1, r2, r3]):
tape.backward(loss=r)
sgrad = tape.gradients[s].numpy()[0]
assert_np_equal(sgrad, -2 * q.numpy()[0, i] / (s.numpy()[0] * s.numpy()[0]), tol=tol)
allgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = 2 / s.numpy()[0]
assert_np_equal(allgrads, expected_grads, tol=10 * tol)
tape.zero()
def test_quat_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_mul(
s: wp.array(dtype=quat),
q: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = s[0] * q[0]
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_mul, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q],
outputs=[
r0,
r1,
r2,
r3,
],
device=device,
)
a = s.numpy()
b = q.numpy()
assert_np_equal(
r0.numpy()[0], 2 * (a[0, 3] * b[0, 0] + b[0, 3] * a[0, 0] + a[0, 1] * b[0, 2] - b[0, 1] * a[0, 2]), tol=tol
)
assert_np_equal(
r1.numpy()[0], 2 * (a[0, 3] * b[0, 1] + b[0, 3] * a[0, 1] + a[0, 2] * b[0, 0] - b[0, 2] * a[0, 0]), tol=tol
)
assert_np_equal(
r2.numpy()[0], 2 * (a[0, 3] * b[0, 2] + b[0, 3] * a[0, 2] + a[0, 0] * b[0, 1] - b[0, 0] * a[0, 1]), tol=tol
)
assert_np_equal(
r3.numpy()[0], 2 * (a[0, 3] * b[0, 3] - a[0, 0] * b[0, 0] - a[0, 1] * b[0, 1] - a[0, 2] * b[0, 2]), tol=tol
)
tape.backward(loss=r0)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([b[0, 3], b[0, 2], -b[0, 1], b[0, 0]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([a[0, 3], -a[0, 2], a[0, 1], a[0, 0]]), tol=tol)
tape.zero()
tape.backward(loss=r1)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([-b[0, 2], b[0, 3], b[0, 0], b[0, 1]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([a[0, 2], a[0, 3], -a[0, 0], a[0, 1]]), tol=tol)
tape.zero()
tape.backward(loss=r2)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([b[0, 1], -b[0, 0], b[0, 3], b[0, 2]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([-a[0, 1], a[0, 0], a[0, 3], a[0, 2]]), tol=tol)
tape.zero()
tape.backward(loss=r3)
agrad = tape.gradients[s].numpy()[0]
assert_np_equal(agrad, 2 * np.array([-b[0, 0], -b[0, 1], -b[0, 2], b[0, 3]]), tol=tol)
bgrad = tape.gradients[q].numpy()[0]
assert_np_equal(bgrad, 2 * np.array([-a[0, 0], -a[0, 1], -a[0, 2], a[0, 3]]), tol=tol)
tape.zero()
def test_indexing(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_indexing(
q: wp.array(dtype=quat),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * q[0][0]
r1[0] = wptype(2) * q[0][1]
r2[0] = wptype(2) * q[0][2]
r3[0] = wptype(2) * q[0][3]
kernel = getkernel(check_quat_indexing, suffix=dtype.__name__)
if register_kernels:
return
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[q], outputs=[r0, r1, r2, r3], device=device)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
allgrads = tape.gradients[q].numpy()[0]
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = 2
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
assert_np_equal(r0.numpy()[0], 2.0 * q.numpy()[0, 0], tol=tol)
assert_np_equal(r1.numpy()[0], 2.0 * q.numpy()[0, 1], tol=tol)
assert_np_equal(r2.numpy()[0], 2.0 * q.numpy()[0, 2], tol=tol)
assert_np_equal(r3.numpy()[0], 2.0 * q.numpy()[0, 3], tol=tol)
def test_quat_lerp(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
def check_quat_lerp(
s: wp.array(dtype=quat),
q: wp.array(dtype=quat),
t: wp.array(dtype=wptype),
r0: wp.array(dtype=wptype),
r1: wp.array(dtype=wptype),
r2: wp.array(dtype=wptype),
r3: wp.array(dtype=wptype),
):
result = wp.lerp(s[0], q[0], t[0])
# multiply outputs by 2 so we've got something to backpropagate:
r0[0] = wptype(2) * result[0]
r1[0] = wptype(2) * result[1]
r2[0] = wptype(2) * result[2]
r3[0] = wptype(2) * result[3]
kernel = getkernel(check_quat_lerp, suffix=dtype.__name__)
if register_kernels:
return
s = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
q = wp.array(rng.standard_normal(size=(1, 4)).astype(dtype), dtype=quat, requires_grad=True, device=device)
t = wp.array(rng.uniform(size=1).astype(dtype), dtype=wptype, requires_grad=True, device=device)
r0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
r3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[s, q, t],
outputs=[
r0,
r1,
r2,
r3,
],
device=device,
)
a = s.numpy()
b = q.numpy()
tt = t.numpy()
assert_np_equal(r0.numpy()[0], 2 * ((1 - tt) * a[0, 0] + tt * b[0, 0]), tol=tol)
assert_np_equal(r1.numpy()[0], 2 * ((1 - tt) * a[0, 1] + tt * b[0, 1]), tol=tol)
assert_np_equal(r2.numpy()[0], 2 * ((1 - tt) * a[0, 2] + tt * b[0, 2]), tol=tol)
assert_np_equal(r3.numpy()[0], 2 * ((1 - tt) * a[0, 3] + tt * b[0, 3]), tol=tol)
for i, l in enumerate([r0, r1, r2, r3]):
tape.backward(loss=l)
agrad = tape.gradients[s].numpy()[0]
bgrad = tape.gradients[q].numpy()[0]
tgrad = tape.gradients[t].numpy()[0]
expected_grads = np.zeros_like(agrad)
expected_grads[i] = 2 * (1 - tt)
assert_np_equal(agrad, expected_grads, tol=tol)
expected_grads[i] = 2 * tt
assert_np_equal(bgrad, expected_grads, tol=tol)
assert_np_equal(tgrad, 2 * (b[0, i] - a[0, i]), tol=tol)
tape.zero()
def test_quat_rotate(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
def check_quat_rotate(
q: wp.array(dtype=quat),
v: wp.array(dtype=vec3),
outputs: wp.array(dtype=wptype),
outputs_inv: wp.array(dtype=wptype),
outputs_manual: wp.array(dtype=wptype),
outputs_inv_manual: wp.array(dtype=wptype),
):
result = wp.quat_rotate(q[0], v[0])
result_inv = wp.quat_rotate_inv(q[0], v[0])
qv = vec3(q[0][0], q[0][1], q[0][2])
qw = q[0][3]
result_manual = v[0] * (wptype(2) * qw * qw - wptype(1))
result_manual += wp.cross(qv, v[0]) * qw * wptype(2)
result_manual += qv * wp.dot(qv, v[0]) * wptype(2)
result_inv_manual = v[0] * (wptype(2) * qw * qw - wptype(1))
result_inv_manual -= wp.cross(qv, v[0]) * qw * wptype(2)
result_inv_manual += qv * wp.dot(qv, v[0]) * wptype(2)
for i in range(3):
# multiply outputs by 2 so we've got something to backpropagate:
outputs[i] = wptype(2) * result[i]
outputs_inv[i] = wptype(2) * result_inv[i]
outputs_manual[i] = wptype(2) * result_manual[i]
outputs_inv_manual[i] = wptype(2) * result_inv_manual[i]
kernel = getkernel(check_quat_rotate, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = rng.standard_normal(size=(1, 4))
q /= np.linalg.norm(q)
q = wp.array(q.astype(dtype), dtype=quat, requires_grad=True, device=device)
v = wp.array(0.5 * rng.standard_normal(size=(1, 3)).astype(dtype), dtype=vec3, requires_grad=True, device=device)
# test values against the manually computed result:
outputs = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_inv = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_manual = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
outputs_inv_manual = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[q, v],
outputs=[
outputs,
outputs_inv,
outputs_manual,
outputs_inv_manual,
],
device=device,
)
assert_np_equal(outputs.numpy(), outputs_manual.numpy(), tol=tol)
assert_np_equal(outputs_inv.numpy(), outputs_inv_manual.numpy(), tol=tol)
# test gradients against the manually computed result:
for i in range(3):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_inv = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_inv_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[q, v],
outputs=[
outputs,
outputs_inv,
outputs_manual,
outputs_inv_manual,
],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, i], outputs=[cmp], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_inv, i], outputs=[cmp_inv], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outputs_manual, i], outputs=[cmp_manual], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outputs_inv_manual, i], outputs=[cmp_inv_manual], device=device
)
tape.backward(loss=cmp)
qgrads = 1.0 * tape.gradients[q].numpy()
vgrads = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_inv)
qgrads_inv = 1.0 * tape.gradients[q].numpy()
vgrads_inv = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_manual)
qgrads_manual = 1.0 * tape.gradients[q].numpy()
vgrads_manual = 1.0 * tape.gradients[v].numpy()
tape.zero()
tape.backward(loss=cmp_inv_manual)
qgrads_inv_manual = 1.0 * tape.gradients[q].numpy()
vgrads_inv_manual = 1.0 * tape.gradients[v].numpy()
tape.zero()
assert_np_equal(qgrads, qgrads_manual, tol=tol)
assert_np_equal(vgrads, vgrads_manual, tol=tol)
assert_np_equal(qgrads_inv, qgrads_inv_manual, tol=tol)
assert_np_equal(vgrads_inv, vgrads_inv_manual, tol=tol)
def test_quat_to_matrix(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
quat = wp.types.quaternion(dtype=wptype)
mat3 = wp.types.matrix(shape=(3, 3), dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
def check_quat_to_matrix(
q: wp.array(dtype=quat),
outputs: wp.array(dtype=wptype),
outputs_manual: wp.array(dtype=wptype),
):
result = wp.quat_to_matrix(q[0])
xaxis = wp.quat_rotate(
q[0],
vec3(
wptype(1),
wptype(0),
wptype(0),
),
)
yaxis = wp.quat_rotate(
q[0],
vec3(
wptype(0),
wptype(1),
wptype(0),
),
)
zaxis = wp.quat_rotate(
q[0],
vec3(
wptype(0),
wptype(0),
wptype(1),
),
)
result_manual = mat3(xaxis, yaxis, zaxis)
idx = 0
for i in range(3):
for j in range(3):
# multiply outputs by 2 so we've got something to backpropagate:
outputs[idx] = wptype(2) * result[i, j]
outputs_manual[idx] = wptype(2) * result_manual[i, j]
idx = idx + 1
kernel = getkernel(check_quat_to_matrix, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
q = rng.standard_normal(size=(1, 4))
q /= np.linalg.norm(q)
q = wp.array(q.astype(dtype), dtype=quat, requires_grad=True, device=device)
# test values against the manually computed result:
outputs = wp.zeros(3 * 3, dtype=wptype, requires_grad=True, device=device)
outputs_manual = wp.zeros(3 * 3, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[q],
outputs=[
outputs,
outputs_manual,
],
device=device,
)
assert_np_equal(outputs.numpy(), outputs_manual.numpy(), tol=tol)
# sanity check: divide by 2 to remove that scale factor we put in there, and
# it should be a rotation matrix
R = 0.5 * outputs.numpy().reshape(3, 3)
assert_np_equal(np.matmul(R, R.T), np.eye(3), tol=tol)
# test gradients against the manually computed result:
idx = 0
for _i in range(3):
for _j in range(3):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
cmp_manual = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[q],
outputs=[
outputs,
outputs_manual,
],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outputs, idx], outputs=[cmp], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outputs_manual, idx], outputs=[cmp_manual], device=device
)
tape.backward(loss=cmp)
qgrads = 1.0 * tape.gradients[q].numpy()
tape.zero()
tape.backward(loss=cmp_manual)
qgrads_manual = 1.0 * tape.gradients[q].numpy()
tape.zero()
assert_np_equal(qgrads, qgrads_manual, tol=tol)
idx = idx + 1
############################################################
def test_slerp_grad(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
seed = 42
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(3, wptype)
quat = wp.types.quaternion(wptype)
def slerp_kernel(
q0: wp.array(dtype=quat),
q1: wp.array(dtype=quat),
t: wp.array(dtype=wptype),
loss: wp.array(dtype=wptype),
index: int,
):
tid = wp.tid()
q = wp.quat_slerp(q0[tid], q1[tid], t[tid])
wp.atomic_add(loss, 0, q[index])
slerp_kernel = getkernel(slerp_kernel, suffix=dtype.__name__)
def slerp_kernel_forward(
q0: wp.array(dtype=quat),
q1: wp.array(dtype=quat),
t: wp.array(dtype=wptype),
loss: wp.array(dtype=wptype),
index: int,
):
tid = wp.tid()
axis = vec3()
angle = wptype(0.0)
wp.quat_to_axis_angle(wp.mul(wp.quat_inverse(q0[tid]), q1[tid]), axis, angle)
q = wp.mul(q0[tid], wp.quat_from_axis_angle(axis, t[tid] * angle))
wp.atomic_add(loss, 0, q[index])
slerp_kernel_forward = getkernel(slerp_kernel_forward, suffix=dtype.__name__)
def quat_sampler_slerp(kernel_seed: int, quats: wp.array(dtype=quat)):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
angle = wp.randf(state, 0.0, 2.0 * 3.1415926535)
dir = wp.sample_unit_sphere_surface(state) * wp.sin(angle * 0.5)
q = quat(wptype(dir[0]), wptype(dir[1]), wptype(dir[2]), wptype(wp.cos(angle * 0.5)))
qn = wp.normalize(q)
quats[tid] = qn
quat_sampler = getkernel(quat_sampler_slerp, suffix=dtype.__name__)
if register_kernels:
return
N = 50
q0 = wp.zeros(N, dtype=quat, device=device, requires_grad=True)
q1 = wp.zeros(N, dtype=quat, device=device, requires_grad=True)
wp.launch(kernel=quat_sampler, dim=N, inputs=[seed, q0], device=device)
wp.launch(kernel=quat_sampler, dim=N, inputs=[seed + 1, q1], device=device)
t = rng.uniform(low=0.0, high=1.0, size=N)
t = wp.array(t, dtype=wptype, device=device, requires_grad=True)
def compute_gradients(kernel, wrt, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=N, inputs=[q0, q1, t, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[wrt].numpy()
tape.zero()
return loss.numpy()[0], gradients
eps = {
np.float16: 2.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
# wrt t
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(slerp_kernel, t, 0)
ycmp, gradients_y = compute_gradients(slerp_kernel, t, 1)
zcmp, gradients_z = compute_gradients(slerp_kernel, t, 2)
wcmp, gradients_w = compute_gradients(slerp_kernel, t, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(slerp_kernel_forward, t, 0)
ycmp_auto, gradients_y_auto = compute_gradients(slerp_kernel_forward, t, 1)
zcmp_auto, gradients_z_auto = compute_gradients(slerp_kernel_forward, t, 2)
wcmp_auto, gradients_w_auto = compute_gradients(slerp_kernel_forward, t, 3)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
# wrt q0
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(slerp_kernel, q0, 0)
ycmp, gradients_y = compute_gradients(slerp_kernel, q0, 1)
zcmp, gradients_z = compute_gradients(slerp_kernel, q0, 2)
wcmp, gradients_w = compute_gradients(slerp_kernel, q0, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(slerp_kernel_forward, q0, 0)
ycmp_auto, gradients_y_auto = compute_gradients(slerp_kernel_forward, q0, 1)
zcmp_auto, gradients_z_auto = compute_gradients(slerp_kernel_forward, q0, 2)
wcmp_auto, gradients_w_auto = compute_gradients(slerp_kernel_forward, q0, 3)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
# wrt q1
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(slerp_kernel, q1, 0)
ycmp, gradients_y = compute_gradients(slerp_kernel, q1, 1)
zcmp, gradients_z = compute_gradients(slerp_kernel, q1, 2)
wcmp, gradients_w = compute_gradients(slerp_kernel, q1, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(slerp_kernel_forward, q1, 0)
ycmp_auto, gradients_y_auto = compute_gradients(slerp_kernel_forward, q1, 1)
zcmp_auto, gradients_z_auto = compute_gradients(slerp_kernel_forward, q1, 2)
wcmp_auto, gradients_w_auto = compute_gradients(slerp_kernel_forward, q1, 3)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
############################################################
def test_quat_to_axis_angle_grad(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
seed = 42
num_rand = 50
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(3, wptype)
vec4 = wp.types.vector(4, wptype)
quat = wp.types.quaternion(wptype)
def quat_to_axis_angle_kernel(quats: wp.array(dtype=quat), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
axis = vec3()
angle = wptype(0.0)
wp.quat_to_axis_angle(quats[tid], axis, angle)
a = vec4(axis[0], axis[1], axis[2], angle)
wp.atomic_add(loss, 0, a[coord_idx])
quat_to_axis_angle_kernel = getkernel(quat_to_axis_angle_kernel, suffix=dtype.__name__)
def quat_to_axis_angle_kernel_forward(quats: wp.array(dtype=quat), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
q = quats[tid]
axis = vec3()
angle = wptype(0.0)
v = vec3(q[0], q[1], q[2])
if q[3] < wptype(0):
axis = -wp.normalize(v)
else:
axis = wp.normalize(v)
angle = wptype(2) * wp.atan2(wp.length(v), wp.abs(q[3]))
a = vec4(axis[0], axis[1], axis[2], angle)
wp.atomic_add(loss, 0, a[coord_idx])
quat_to_axis_angle_kernel_forward = getkernel(quat_to_axis_angle_kernel_forward, suffix=dtype.__name__)
def quat_sampler(kernel_seed: int, angles: wp.array(dtype=float), quats: wp.array(dtype=quat)):
tid = wp.tid()
state = wp.rand_init(kernel_seed, tid)
angle = angles[tid]
dir = wp.sample_unit_sphere_surface(state) * wp.sin(angle * 0.5)
q = quat(wptype(dir[0]), wptype(dir[1]), wptype(dir[2]), wptype(wp.cos(angle * 0.5)))
qn = wp.normalize(q)
quats[tid] = qn
quat_sampler = getkernel(quat_sampler, suffix=dtype.__name__)
if register_kernels:
return
quats = wp.zeros(num_rand, dtype=quat, device=device, requires_grad=True)
angles = wp.array(
np.linspace(0.0, 2.0 * np.pi, num_rand, endpoint=False, dtype=np.float32), dtype=float, device=device
)
wp.launch(kernel=quat_sampler, dim=num_rand, inputs=[seed, angles, quats], device=device)
edge_cases = np.array(
[(1.0, 0.0, 0.0, 0.0), (0.0, 1.0 / np.sqrt(3), 1.0 / np.sqrt(3), 1.0 / np.sqrt(3)), (0.0, 0.0, 0.0, 0.0)]
)
num_edge = len(edge_cases)
edge_cases = wp.array(edge_cases, dtype=quat, device=device, requires_grad=True)
def compute_gradients(arr, kernel, dim, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=dim, inputs=[arr, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[arr].numpy()
tape.zero()
return loss.numpy()[0], gradients
# gather gradients from builtin adjoints
xcmp, gradients_x = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 0)
ycmp, gradients_y = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 1)
zcmp, gradients_z = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 2)
wcmp, gradients_w = compute_gradients(quats, quat_to_axis_angle_kernel, num_rand, 3)
# gather gradients from autodiff
xcmp_auto, gradients_x_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 0)
ycmp_auto, gradients_y_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 1)
zcmp_auto, gradients_z_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 2)
wcmp_auto, gradients_w_auto = compute_gradients(quats, quat_to_axis_angle_kernel_forward, num_rand, 3)
# edge cases: gather gradients from builtin adjoints
_, edge_gradients_x = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 0)
_, edge_gradients_y = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 1)
_, edge_gradients_z = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 2)
_, edge_gradients_w = compute_gradients(edge_cases, quat_to_axis_angle_kernel, num_edge, 3)
# edge cases: gather gradients from autodiff
_, edge_gradients_x_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 0)
_, edge_gradients_y_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 1)
_, edge_gradients_z_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 2)
_, edge_gradients_w_auto = compute_gradients(edge_cases, quat_to_axis_angle_kernel_forward, num_edge, 3)
eps = {
np.float16: 2.0e-1,
np.float32: 2.0e-4,
np.float64: 2.0e-7,
}.get(dtype, 0)
assert_np_equal(xcmp, xcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(zcmp, zcmp_auto, tol=eps)
assert_np_equal(wcmp, wcmp_auto, tol=eps)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
assert_np_equal(edge_gradients_x, edge_gradients_x_auto, tol=eps)
assert_np_equal(edge_gradients_y, edge_gradients_y_auto, tol=eps)
assert_np_equal(edge_gradients_z, edge_gradients_z_auto, tol=eps)
assert_np_equal(edge_gradients_w, edge_gradients_w_auto, tol=eps)
############################################################
def test_quat_rpy_grad(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
N = 3
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(3, wptype)
quat = wp.types.quaternion(wptype)
def rpy_to_quat_kernel(rpy_arr: wp.array(dtype=vec3), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
rpy = rpy_arr[tid]
roll = rpy[0]
pitch = rpy[1]
yaw = rpy[2]
q = wp.quat_rpy(roll, pitch, yaw)
wp.atomic_add(loss, 0, q[coord_idx])
rpy_to_quat_kernel = getkernel(rpy_to_quat_kernel, suffix=dtype.__name__)
def rpy_to_quat_kernel_forward(rpy_arr: wp.array(dtype=vec3), loss: wp.array(dtype=wptype), coord_idx: int):
tid = wp.tid()
rpy = rpy_arr[tid]
roll = rpy[0]
pitch = rpy[1]
yaw = rpy[2]
cy = wp.cos(yaw * wptype(0.5))
sy = wp.sin(yaw * wptype(0.5))
cr = wp.cos(roll * wptype(0.5))
sr = wp.sin(roll * wptype(0.5))
cp = wp.cos(pitch * wptype(0.5))
sp = wp.sin(pitch * wptype(0.5))
w = cy * cr * cp + sy * sr * sp
x = cy * sr * cp - sy * cr * sp
y = cy * cr * sp + sy * sr * cp
z = sy * cr * cp - cy * sr * sp
q = quat(x, y, z, w)
wp.atomic_add(loss, 0, q[coord_idx])
rpy_to_quat_kernel_forward = getkernel(rpy_to_quat_kernel_forward, suffix=dtype.__name__)
if register_kernels:
return
rpy_arr = rng.uniform(low=-np.pi, high=np.pi, size=(N, 3))
rpy_arr = wp.array(rpy_arr, dtype=vec3, device=device, requires_grad=True)
def compute_gradients(kernel, wrt, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=N, inputs=[wrt, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[wrt].numpy()
tape.zero()
return loss.numpy()[0], gradients
# wrt rpy
# gather gradients from builtin adjoints
rcmp, gradients_r = compute_gradients(rpy_to_quat_kernel, rpy_arr, 0)
pcmp, gradients_p = compute_gradients(rpy_to_quat_kernel, rpy_arr, 1)
ycmp, gradients_y = compute_gradients(rpy_to_quat_kernel, rpy_arr, 2)
# gather gradients from autodiff
rcmp_auto, gradients_r_auto = compute_gradients(rpy_to_quat_kernel_forward, rpy_arr, 0)
pcmp_auto, gradients_p_auto = compute_gradients(rpy_to_quat_kernel_forward, rpy_arr, 1)
ycmp_auto, gradients_y_auto = compute_gradients(rpy_to_quat_kernel_forward, rpy_arr, 2)
eps = {
np.float16: 2.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
assert_np_equal(rcmp, rcmp_auto, tol=eps)
assert_np_equal(pcmp, pcmp_auto, tol=eps)
assert_np_equal(ycmp, ycmp_auto, tol=eps)
assert_np_equal(gradients_r, gradients_r_auto, tol=eps)
assert_np_equal(gradients_p, gradients_p_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
############################################################
def test_quat_from_matrix(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat33 = wp.types.matrix((3, 3), wptype)
quat = wp.types.quaternion(wptype)
def quat_from_matrix(m: wp.array2d(dtype=wptype), loss: wp.array(dtype=wptype), idx: int):
tid = wp.tid()
matrix = mat33(
m[tid, 0], m[tid, 1], m[tid, 2], m[tid, 3], m[tid, 4], m[tid, 5], m[tid, 6], m[tid, 7], m[tid, 8]
)
q = wp.quat_from_matrix(matrix)
wp.atomic_add(loss, 0, q[idx])
def quat_from_matrix_forward(mats: wp.array2d(dtype=wptype), loss: wp.array(dtype=wptype), idx: int):
tid = wp.tid()
m = mat33(
mats[tid, 0],
mats[tid, 1],
mats[tid, 2],
mats[tid, 3],
mats[tid, 4],
mats[tid, 5],
mats[tid, 6],
mats[tid, 7],
mats[tid, 8],
)
tr = m[0][0] + m[1][1] + m[2][2]
x = wptype(0)
y = wptype(0)
z = wptype(0)
w = wptype(0)
h = wptype(0)
if tr >= wptype(0):
h = wp.sqrt(tr + wptype(1))
w = wptype(0.5) * h
h = wptype(0.5) / h
x = (m[2][1] - m[1][2]) * h
y = (m[0][2] - m[2][0]) * h
z = (m[1][0] - m[0][1]) * h
else:
max_diag = 0
if m[1][1] > m[0][0]:
max_diag = 1
if m[2][2] > m[max_diag][max_diag]:
max_diag = 2
if max_diag == 0:
h = wp.sqrt((m[0][0] - (m[1][1] + m[2][2])) + wptype(1))
x = wptype(0.5) * h
h = wptype(0.5) / h
y = (m[0][1] + m[1][0]) * h
z = (m[2][0] + m[0][2]) * h
w = (m[2][1] - m[1][2]) * h
elif max_diag == 1:
h = wp.sqrt((m[1][1] - (m[2][2] + m[0][0])) + wptype(1))
y = wptype(0.5) * h
h = wptype(0.5) / h
z = (m[1][2] + m[2][1]) * h
x = (m[0][1] + m[1][0]) * h
w = (m[0][2] - m[2][0]) * h
if max_diag == 2:
h = wp.sqrt((m[2][2] - (m[0][0] + m[1][1])) + wptype(1))
z = wptype(0.5) * h
h = wptype(0.5) / h
x = (m[2][0] + m[0][2]) * h
y = (m[1][2] + m[2][1]) * h
w = (m[1][0] - m[0][1]) * h
q = wp.normalize(quat(x, y, z, w))
wp.atomic_add(loss, 0, q[idx])
quat_from_matrix = getkernel(quat_from_matrix, suffix=dtype.__name__)
quat_from_matrix_forward = getkernel(quat_from_matrix_forward, suffix=dtype.__name__)
if register_kernels:
return
m = np.array(
[
[1.0, 0.0, 0.0, 0.0, 0.5, 0.866, 0.0, -0.866, 0.5],
[0.866, 0.0, 0.25, -0.433, 0.5, 0.75, -0.25, -0.866, 0.433],
[0.866, -0.433, 0.25, 0.0, 0.5, 0.866, -0.5, -0.75, 0.433],
[-1.2, -1.6, -2.3, 0.25, -0.6, -0.33, 3.2, -1.0, -2.2],
]
)
m = wp.array2d(m, dtype=wptype, device=device, requires_grad=True)
N = m.shape[0]
def compute_gradients(kernel, wrt, index):
loss = wp.zeros(1, dtype=wptype, device=device, requires_grad=True)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel, dim=N, inputs=[m, loss, index], device=device)
tape.backward(loss)
gradients = 1.0 * tape.gradients[wrt].numpy()
tape.zero()
return loss.numpy()[0], gradients
# gather gradients from builtin adjoints
cmpx, gradients_x = compute_gradients(quat_from_matrix, m, 0)
cmpy, gradients_y = compute_gradients(quat_from_matrix, m, 1)
cmpz, gradients_z = compute_gradients(quat_from_matrix, m, 2)
cmpw, gradients_w = compute_gradients(quat_from_matrix, m, 3)
# gather gradients from autodiff
cmpx_auto, gradients_x_auto = compute_gradients(quat_from_matrix_forward, m, 0)
cmpy_auto, gradients_y_auto = compute_gradients(quat_from_matrix_forward, m, 1)
cmpz_auto, gradients_z_auto = compute_gradients(quat_from_matrix_forward, m, 2)
cmpw_auto, gradients_w_auto = compute_gradients(quat_from_matrix_forward, m, 3)
# compare
eps = 1.0e6
eps = {
np.float16: 2.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
assert_np_equal(cmpx, cmpx_auto, tol=eps)
assert_np_equal(cmpy, cmpy_auto, tol=eps)
assert_np_equal(cmpz, cmpz_auto, tol=eps)
assert_np_equal(cmpw, cmpw_auto, tol=eps)
assert_np_equal(gradients_x, gradients_x_auto, tol=eps)
assert_np_equal(gradients_y, gradients_y_auto, tol=eps)
assert_np_equal(gradients_z, gradients_z_auto, tol=eps)
assert_np_equal(gradients_w, gradients_w_auto, tol=eps)
def test_quat_identity(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def quat_identity_test(output: wp.array(dtype=wptype)):
q = wp.quat_identity(dtype=wptype)
output[0] = q[0]
output[1] = q[1]
output[2] = q[2]
output[3] = q[3]
def quat_identity_test_default(output: wp.array(dtype=wp.float32)):
q = wp.quat_identity()
output[0] = q[0]
output[1] = q[1]
output[2] = q[2]
output[3] = q[3]
quat_identity_kernel = getkernel(quat_identity_test, suffix=dtype.__name__)
quat_identity_default_kernel = getkernel(quat_identity_test_default, suffix=np.float32.__name__)
if register_kernels:
return
output = wp.zeros(4, dtype=wptype, device=device)
wp.launch(quat_identity_kernel, dim=1, inputs=[], outputs=[output], device=device)
expected = np.zeros_like(output.numpy())
expected[3] = 1
assert_np_equal(output.numpy(), expected)
# let's just test that it defaults to float32:
output = wp.zeros(4, dtype=wp.float32, device=device)
wp.launch(quat_identity_default_kernel, dim=1, inputs=[], outputs=[output], device=device)
expected = np.zeros_like(output.numpy())
expected[3] = 1
assert_np_equal(output.numpy(), expected)
############################################################
def test_quat_euler_conversion(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
N = 3
rpy_arr = rng.uniform(low=-np.pi, high=np.pi, size=(N, 3))
quats_from_euler = [list(wp.sim.quat_from_euler(wp.vec3(*rpy), 0, 1, 2)) for rpy in rpy_arr]
quats_from_rpy = [list(wp.quat_rpy(rpy[0], rpy[1], rpy[2])) for rpy in rpy_arr]
assert_np_equal(np.array(quats_from_euler), np.array(quats_from_rpy), tol=1e-4)
def test_anon_type_instance(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def quat_create_test(input: wp.array(dtype=wptype), output: wp.array(dtype=wptype)):
# component constructor:
q = wp.quaternion(input[0], input[1], input[2], input[3])
output[0] = wptype(2) * q[0]
output[1] = wptype(2) * q[1]
output[2] = wptype(2) * q[2]
output[3] = wptype(2) * q[3]
# vector / scalar constructor:
q2 = wp.quaternion(wp.vector(input[4], input[5], input[6]), input[7])
output[4] = wptype(2) * q2[0]
output[5] = wptype(2) * q2[1]
output[6] = wptype(2) * q2[2]
output[7] = wptype(2) * q2[3]
quat_create_kernel = getkernel(quat_create_test, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
input = wp.array(rng.standard_normal(size=8).astype(dtype), requires_grad=True, device=device)
output = wp.zeros(8, dtype=wptype, requires_grad=True, device=device)
wp.launch(quat_create_kernel, dim=1, inputs=[input], outputs=[output], device=device)
assert_np_equal(output.numpy(), 2 * input.numpy())
for i in range(len(input)):
cmp = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(quat_create_kernel, dim=1, inputs=[input], outputs=[output], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[output, i], outputs=[cmp], device=device)
tape.backward(loss=cmp)
expectedgrads = np.zeros(len(input))
expectedgrads[i] = 2
assert_np_equal(tape.gradients[input].numpy(), expectedgrads)
tape.zero()
# Same as above but with a default (float) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructor_default():
qzero = wp.quat()
wp.expect_eq(qzero[0], 0.0)
wp.expect_eq(qzero[1], 0.0)
wp.expect_eq(qzero[2], 0.0)
wp.expect_eq(qzero[3], 0.0)
qval = wp.quat(1.0, 2.0, 3.0, 4.0)
wp.expect_eq(qval[0], 1.0)
wp.expect_eq(qval[1], 2.0)
wp.expect_eq(qval[2], 3.0)
wp.expect_eq(qval[3], 4.0)
qeye = wp.quat_identity()
wp.expect_eq(qeye[0], 0.0)
wp.expect_eq(qeye[1], 0.0)
wp.expect_eq(qeye[2], 0.0)
wp.expect_eq(qeye[3], 1.0)
def test_py_arithmetic_ops(test, device, dtype):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def make_quat(*args):
if wptype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(wptype._type_(x).value for x in args)
return args
quat_cls = wp.types.quaternion(wptype)
v = quat_cls(1, -2, 3, -4)
test.assertSequenceEqual(+v, make_quat(1, -2, 3, -4))
test.assertSequenceEqual(-v, make_quat(-1, 2, -3, 4))
test.assertSequenceEqual(v + quat_cls(5, 5, 5, 5), make_quat(6, 3, 8, 1))
test.assertSequenceEqual(v - quat_cls(5, 5, 5, 5), make_quat(-4, -7, -2, -9))
v = quat_cls(2, 4, 6, 8)
test.assertSequenceEqual(v * wptype(2), make_quat(4, 8, 12, 16))
test.assertSequenceEqual(wptype(2) * v, make_quat(4, 8, 12, 16))
test.assertSequenceEqual(v / wptype(2), make_quat(1, 2, 3, 4))
test.assertSequenceEqual(wptype(24) / v, make_quat(12, 6, 4, 3))
devices = get_test_devices()
class TestQuat(unittest.TestCase):
pass
add_kernel_test(TestQuat, test_constructor_default, dim=1, devices=devices)
for dtype in np_float_types:
add_function_test_register_kernel(
TestQuat, f"test_constructors_{dtype.__name__}", test_constructors, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_casting_constructors_{dtype.__name__}",
test_casting_constructors,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_anon_type_instance_{dtype.__name__}", test_anon_type_instance, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_inverse_{dtype.__name__}", test_inverse, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_identity_{dtype.__name__}", test_quat_identity, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_dotproduct_{dtype.__name__}", test_dotproduct, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_length_{dtype.__name__}", test_length, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_normalize_{dtype.__name__}", test_normalize, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_addition_{dtype.__name__}", test_addition, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_subtraction_{dtype.__name__}", test_subtraction, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_scalar_multiplication_{dtype.__name__}",
test_scalar_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_scalar_division_{dtype.__name__}", test_scalar_division, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_quat_multiplication_{dtype.__name__}",
test_quat_multiplication,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_indexing_{dtype.__name__}", test_indexing, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_lerp_{dtype.__name__}", test_quat_lerp, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_quat_to_axis_angle_grad_{dtype.__name__}",
test_quat_to_axis_angle_grad,
devices=devices,
dtype=dtype,
)
add_function_test_register_kernel(
TestQuat, f"test_slerp_grad_{dtype.__name__}", test_slerp_grad, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_rpy_grad_{dtype.__name__}", test_quat_rpy_grad, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_from_matrix_{dtype.__name__}", test_quat_from_matrix, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_rotate_{dtype.__name__}", test_quat_rotate, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat, f"test_quat_to_matrix_{dtype.__name__}", test_quat_to_matrix, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestQuat,
f"test_quat_euler_conversion_{dtype.__name__}",
test_quat_euler_conversion,
devices=devices,
dtype=dtype,
)
add_function_test(
TestQuat, f"test_py_arithmetic_ops_{dtype.__name__}", test_py_arithmetic_ops, devices=None, dtype=dtype
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 73,068 | Python | 34.045084 | 117 | 0.58061 |
NVIDIA/warp/warp/tests/test_async.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
from warp.utils import check_iommu
class Capturable:
def __init__(self, use_graph=True, stream=None):
self.use_graph = use_graph
self.stream = stream
def __enter__(self):
if self.use_graph:
wp.capture_begin(stream=self.stream)
def __exit__(self, exc_type, exc_value, traceback):
if self.use_graph:
try:
# need to call capture_end() to terminate the CUDA stream capture
graph = wp.capture_end(stream=self.stream)
except Exception:
# capture_end() will raise if there was an error during capture, but we squash it here
# if we already had an exception so that the original exception percolates to the caller
if exc_type is None:
raise
else:
# capture can succeed despite some errors during capture (e.g. cudaInvalidValue during copy)
# but if we had an exception during capture, don't launch the graph
if exc_type is None:
wp.capture_launch(graph, stream=self.stream)
@wp.kernel
def inc(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] + 1.0
def test_async_empty(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
with Capturable(use_graph):
a = wp.empty(n, dtype=float)
test.assertIsInstance(a, wp.array)
test.assertIsNotNone(a.ptr)
test.assertEqual(a.size, n)
test.assertEqual(a.dtype, wp.float32)
test.assertEqual(a.device, device)
def test_async_zeros(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
with Capturable(use_graph):
a = wp.zeros(n, dtype=float)
assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
def test_async_zero_v1(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
with Capturable(use_graph):
a = wp.empty(n, dtype=float)
a.zero_()
assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
def test_async_zero_v2(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
a = wp.empty(n, dtype=float)
with Capturable(use_graph):
a.zero_()
assert_np_equal(a.numpy(), np.zeros(n, dtype=np.float32))
def test_async_full(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
value = 42
with Capturable(use_graph):
a = wp.full(n, value, dtype=float)
assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
def test_async_fill_v1(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
value = 17
with Capturable(use_graph):
a = wp.empty(n, dtype=float)
a.fill_(value)
assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
def test_async_fill_v2(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
value = 17
a = wp.empty(n, dtype=float)
with Capturable(use_graph):
a.fill_(value)
assert_np_equal(a.numpy(), np.full(n, value, dtype=np.float32))
def test_async_kernels_v1(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
num_iters = 10
with Capturable(use_graph):
a = wp.zeros(n, dtype=float)
for _i in range(num_iters):
wp.launch(inc, dim=a.size, inputs=[a])
assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
def test_async_kernels_v2(test, device, use_mempools, use_graph):
with wp.ScopedDevice(device), wp.ScopedMempool(device, use_mempools):
n = 100
num_iters = 10
a = wp.zeros(n, dtype=float)
with Capturable(use_graph):
for _i in range(num_iters):
wp.launch(inc, dim=a.size, inputs=[a])
assert_np_equal(a.numpy(), np.full(n, num_iters, dtype=np.float32))
class TestAsync(unittest.TestCase):
pass
# get all CUDA devices
cuda_devices = wp.get_cuda_devices()
# get CUDA devices that support mempools
cuda_devices_with_mempools = []
for d in cuda_devices:
if d.is_mempool_supported:
cuda_devices_with_mempools.append(d)
# get a pair of CUDA devices that support mempool access
cuda_devices_with_mempool_access = []
for target_device in cuda_devices_with_mempools:
for peer_device in cuda_devices_with_mempools:
if peer_device != target_device:
if wp.is_mempool_access_supported(target_device, peer_device):
cuda_devices_with_mempool_access = [target_device, peer_device]
break
if cuda_devices_with_mempool_access:
break
def add_test_variants(
func,
device_count=1,
graph_allocs=False,
requires_mempool_access_with_graph=False,
):
# test that works with default allocators
if not graph_allocs and device_count <= len(cuda_devices):
devices = cuda_devices[:device_count]
def func1(t, d):
return func(t, *devices, False, False)
def func2(t, d):
return func(t, *devices, False, True)
name1 = f"{func.__name__}_DefaultAlloc_NoGraph"
name2 = f"{func.__name__}_DefaultAlloc_WithGraph"
if device_count == 1:
add_function_test(TestAsync, name1, func1, devices=devices)
add_function_test(TestAsync, name2, func2, devices=devices)
else:
add_function_test(TestAsync, name1, func1)
add_function_test(TestAsync, name2, func2)
# test that works with mempool allocators
if device_count <= len(cuda_devices_with_mempools):
devices = cuda_devices_with_mempools[:device_count]
def func3(t, d):
return func(t, *devices, True, False)
name3 = f"{func.__name__}_MempoolAlloc_NoGraph"
if device_count == 1:
add_function_test(TestAsync, name3, func3, devices=devices)
else:
add_function_test(TestAsync, name3, func3)
# test that requires devices with mutual mempool access during graph capture (e.g., p2p memcpy limitation)
if requires_mempool_access_with_graph:
suitable_devices = cuda_devices_with_mempool_access
else:
suitable_devices = cuda_devices_with_mempools
if device_count <= len(suitable_devices):
devices = suitable_devices[:device_count]
def func4(t, d):
return func(t, *devices, True, True)
name4 = f"{func.__name__}_MempoolAlloc_WithGraph"
if device_count == 1:
add_function_test(TestAsync, name4, func4, devices=devices)
else:
add_function_test(TestAsync, name4, func4)
add_test_variants(test_async_empty, graph_allocs=True)
add_test_variants(test_async_zeros, graph_allocs=True)
add_test_variants(test_async_zero_v1, graph_allocs=True)
add_test_variants(test_async_zero_v2, graph_allocs=False)
add_test_variants(test_async_full, graph_allocs=True)
add_test_variants(test_async_fill_v1, graph_allocs=True)
add_test_variants(test_async_fill_v2, graph_allocs=False)
add_test_variants(test_async_kernels_v1, graph_allocs=True)
add_test_variants(test_async_kernels_v2, graph_allocs=False)
# =================================================================================
# wp.copy() tests
# =================================================================================
def as_contiguous_array(data, device=None, grad_data=None):
a = wp.array(data=data, device=device, copy=True)
if grad_data is not None:
a.grad = as_contiguous_array(grad_data, device=device)
return a
def as_strided_array(data, device=None, grad_data=None):
a = wp.array(data=data, device=device)
# make a copy with non-contiguous strides
strides = (*a.strides[:-1], 2 * a.strides[-1])
strided_a = wp.zeros(shape=a.shape, strides=strides, dtype=a.dtype, device=device)
wp.copy(strided_a, a)
if grad_data is not None:
strided_a.grad = as_strided_array(grad_data, device=device)
return strided_a
def as_indexed_array(data, device=None, **kwargs):
a = wp.array(data=data, device=device)
# allocate double the elements so we can index half of them
shape = (*a.shape[:-1], 2 * a.shape[-1])
big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
indexed_a = big_a[indices]
wp.copy(indexed_a, a)
return indexed_a
def as_fabric_array(data, device=None, **kwargs):
from warp.tests.test_fabricarray import _create_fabric_array_interface
a = wp.array(data=data, device=device)
iface = _create_fabric_array_interface(a, "foo")
fa = wp.fabricarray(data=iface, attrib="foo")
fa._iface = iface # save data reference
return fa
def as_indexed_fabric_array(data, device=None, **kwargs):
from warp.tests.test_fabricarray import _create_fabric_array_interface
a = wp.array(data=data, device=device)
shape = (*a.shape[:-1], 2 * a.shape[-1])
# allocate double the elements so we can index half of them
big_a = wp.zeros(shape=shape, dtype=a.dtype, device=device)
indices = wp.array(data=np.arange(0, shape[-1], 2, dtype=np.int32), device=device)
iface = _create_fabric_array_interface(big_a, "foo", copy=True)
fa = wp.fabricarray(data=iface, attrib="foo")
fa._iface = iface # save data reference
indexed_fa = fa[indices]
wp.copy(indexed_fa, a)
return indexed_fa
class CopyParams:
def __init__(
self,
with_grad=False, # whether to use arrays with gradients (contiguous and strided only)
src_use_mempool=False, # whether to enable memory pool on source device
dst_use_mempool=False, # whether to enable memory pool on destination device
access_dst_src=False, # whether destination device has access to the source mempool
access_src_dst=False, # whether source device has access to the destination mempool
stream_device=None, # the device for the stream (None for default behaviour)
use_graph=False, # whether to use a graph
value_offset=0, # unique offset for generated data values per test
):
self.with_grad = with_grad
self.src_use_mempool = src_use_mempool
self.dst_use_mempool = dst_use_mempool
self.access_dst_src = access_dst_src
self.access_src_dst = access_src_dst
self.stream_device = stream_device
self.use_graph = use_graph
self.value_offset = value_offset
def copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params: CopyParams):
# activate the given memory pool configuration
with wp.ScopedMempool(src_device, params.src_use_mempool), wp.ScopedMempool(
dst_device, params.dst_use_mempool
), wp.ScopedMempoolAccess(dst_device, src_device, params.access_dst_src), wp.ScopedMempoolAccess(
src_device, dst_device, params.access_src_dst
):
# make sure the data are different between tests by adding a unique offset
# this avoids aliasing issues with older memory
src_data = np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
dst_data = np.zeros(n, dtype=np.float32)
if params.with_grad:
src_grad_data = -np.arange(params.value_offset, params.value_offset + n, dtype=np.float32)
dst_grad_data = np.zeros(n, dtype=np.float32)
else:
src_grad_data = None
dst_grad_data = None
# create Warp arrays for the copy
src = src_ctor(src_data, device=src_device, grad_data=src_grad_data)
dst = dst_ctor(dst_data, device=dst_device, grad_data=dst_grad_data)
# determine the stream argument to pass to wp.copy()
if params.stream_device is not None:
stream_arg = wp.Stream(params.stream_device)
else:
stream_arg = None
# determine the actual stream used for the copy
if stream_arg is not None:
stream = stream_arg
else:
if dst_device.is_cuda:
stream = dst_device.stream
elif src_device.is_cuda:
stream = src_device.stream
else:
stream = None
# check if an exception is expected given the arguments and system configuration
expected_error_type = None
expected_error_regex = None
# restrictions on copying between different devices during graph capture
if params.use_graph and src_device != dst_device:
# errors with allocating staging buffer on source device
if not src.is_contiguous:
if src_device.is_cuda and not src_device.is_mempool_enabled:
# can't allocate staging buffer using default CUDA allocator during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
elif src_device.is_cpu:
# can't allocate CPU staging buffer during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
# errors with allocating staging buffer on destination device
if expected_error_type is None:
if not dst.is_contiguous:
if dst_device.is_cuda and not dst_device.is_mempool_enabled:
# can't allocate staging buffer using default CUDA allocator during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
elif dst_device.is_cpu and src_device.is_cuda:
# can't allocate CPU staging buffer during capture
expected_error_type, expected_error_regex = RuntimeError, r"^Failed to allocate"
# p2p copies and mempool access
if expected_error_type is None and src_device.is_cuda and dst_device.is_cuda:
# If the source is a contiguous mempool allocation or a non-contiguous array
# AND the destination is a contiguous mempool allocation or a non-contiguous array,
# then memory pool access needs to be enabled EITHER from src_device to dst_device
# OR from dst_device to src_device.
if (
((src.is_contiguous and params.src_use_mempool) or not src.is_contiguous)
and ((dst.is_contiguous and params.dst_use_mempool) or not dst.is_contiguous)
and not wp.is_mempool_access_enabled(src_device, dst_device)
and not wp.is_mempool_access_enabled(dst_device, src_device)
):
expected_error_type, expected_error_regex = RuntimeError, r"^Warp copy error"
# synchronize before test
wp.synchronize()
if expected_error_type is not None:
# disable error output from Warp if we expect an exception
try:
saved_error_output_enabled = wp.context.runtime.core.is_error_output_enabled()
wp.context.runtime.core.set_error_output_enabled(False)
with test.assertRaisesRegex(expected_error_type, expected_error_regex):
with Capturable(use_graph=params.use_graph, stream=stream):
wp.copy(dst, src, stream=stream_arg)
finally:
wp.context.runtime.core.set_error_output_enabled(saved_error_output_enabled)
wp.synchronize()
# print(f"SUCCESSFUL ERROR PREDICTION: {expected_error_regex}")
else:
with Capturable(use_graph=params.use_graph, stream=stream):
wp.copy(dst, src, stream=stream_arg)
# synchronize the stream where the copy was running (None for h2h copies)
if stream is not None:
wp.synchronize_stream(stream)
assert_np_equal(dst.numpy(), src.numpy())
if params.with_grad:
assert_np_equal(dst.grad.numpy(), src.grad.numpy())
# print("SUCCESSFUL COPY")
array_constructors = {
"contiguous": as_contiguous_array,
"strided": as_strided_array,
"indexed": as_indexed_array,
"fabric": as_fabric_array,
"indexedfabric": as_indexed_fabric_array,
}
array_type_codes = {
"contiguous": "c",
"strided": "s",
"indexed": "i",
"fabric": "f",
"indexedfabric": "fi",
}
device_pairs = {}
cpu = None
cuda0 = None
cuda1 = None
cuda2 = None
if wp.is_cpu_available():
cpu = wp.get_device("cpu")
device_pairs["h2h"] = (cpu, cpu)
if wp.is_cuda_available():
cuda0 = wp.get_device("cuda:0")
device_pairs["d2d"] = (cuda0, cuda0)
if wp.is_cpu_available():
device_pairs["h2d"] = (cpu, cuda0)
device_pairs["d2h"] = (cuda0, cpu)
if wp.get_cuda_device_count() > 1:
cuda1 = wp.get_device("cuda:1")
device_pairs["p2p"] = (cuda0, cuda1)
if wp.get_cuda_device_count() > 2:
cuda2 = wp.get_device("cuda:2")
num_copy_elems = 1000000
num_copy_tests = 0
def add_copy_test(test_name, src_ctor, dst_ctor, src_device, dst_device, n, params):
def test_func(
test,
device,
src_ctor=src_ctor,
dst_ctor=dst_ctor,
src_device=src_device,
dst_device=dst_device,
n=n,
params=params,
):
return copy_template(test, src_ctor, dst_ctor, src_device, dst_device, n, params)
add_function_test(TestAsync, test_name, test_func, check_output=False)
# Procedurally add tests with argument combinations supported by the system.
for src_type, src_ctor in array_constructors.items():
for dst_type, dst_ctor in array_constructors.items():
copy_type = f"{array_type_codes[src_type]}2{array_type_codes[dst_type]}"
for transfer_type, device_pair in device_pairs.items():
# skip p2p tests if IOMMU is enabled on Linux
if transfer_type == "p2p" and not check_iommu():
continue
src_device = device_pair[0]
dst_device = device_pair[1]
# basic copy arguments
copy_args = (src_ctor, dst_ctor, src_device, dst_device, num_copy_elems)
if src_device.is_cuda and src_device.is_mempool_supported:
src_mempool_flags = [False, True]
else:
src_mempool_flags = [False]
if dst_device.is_cuda and dst_device.is_mempool_supported:
dst_mempool_flags = [False, True]
else:
dst_mempool_flags = [False]
# stream options
if src_device.is_cuda:
if dst_device.is_cuda:
if src_device == dst_device:
# d2d
assert src_device == cuda0 and dst_device == cuda0
if cuda1 is not None:
stream_devices = [None, cuda0, cuda1]
else:
stream_devices = [None, cuda0]
else:
# p2p
assert src_device == cuda0 and dst_device == cuda1
if cuda2 is not None:
stream_devices = [None, cuda0, cuda1, cuda2]
else:
stream_devices = [None, cuda0, cuda1]
else:
# d2h
assert src_device == cuda0
if cuda1 is not None:
stream_devices = [None, cuda0, cuda1]
else:
stream_devices = [None, cuda0]
else:
if dst_device.is_cuda:
# h2d
assert dst_device == cuda0
if cuda1 is not None:
stream_devices = [None, cuda0, cuda1]
else:
stream_devices = [None, cuda0]
else:
# h2h
stream_devices = [None]
# gradient options (only supported with contiguous and strided arrays)
if src_type in ("contiguous", "strided") and dst_type in ("contiguous", "strided"):
grad_flags = [False, True]
else:
grad_flags = [False]
# graph capture options (only supported with CUDA devices)
if src_device.is_cuda or dst_device.is_cuda:
graph_flags = [False, True]
else:
graph_flags = [False]
# access from destination device to source mempool
if wp.is_mempool_access_supported(dst_device, src_device):
access_dst_src_flags = [False, True]
else:
access_dst_src_flags = [False]
# access from source device to destination mempool
if wp.is_mempool_access_supported(src_device, dst_device):
access_src_dst_flags = [False, True]
else:
access_src_dst_flags = [False]
for src_use_mempool in src_mempool_flags:
for dst_use_mempool in dst_mempool_flags:
for stream_device in stream_devices:
for access_dst_src in access_dst_src_flags:
for access_src_dst in access_src_dst_flags:
for with_grad in grad_flags:
for use_graph in graph_flags:
test_name = f"test_copy_{copy_type}_{transfer_type}"
if src_use_mempool:
test_name += "_SrcPoolOn"
else:
test_name += "_SrcPoolOff"
if dst_use_mempool:
test_name += "_DstPoolOn"
else:
test_name += "_DstPoolOff"
if stream_device is None:
test_name += "_NoStream"
elif stream_device == cuda0:
test_name += "_Stream0"
elif stream_device == cuda1:
test_name += "_Stream1"
elif stream_device == cuda2:
test_name += "_Stream2"
else:
raise AssertionError
if with_grad:
test_name += "_Grad"
else:
test_name += "_NoGrad"
if use_graph:
test_name += "_Graph"
else:
test_name += "_NoGraph"
if access_dst_src and access_src_dst:
test_name += "_AccessBoth"
elif access_dst_src and not access_src_dst:
test_name += "_AccessDstSrc"
elif not access_dst_src and access_src_dst:
test_name += "_AccessSrcDst"
else:
test_name += "_AccessNone"
copy_params = CopyParams(
src_use_mempool=src_use_mempool,
dst_use_mempool=dst_use_mempool,
access_dst_src=access_dst_src,
access_src_dst=access_src_dst,
stream_device=stream_device,
with_grad=with_grad,
use_graph=use_graph,
value_offset=num_copy_tests,
)
add_copy_test(test_name, *copy_args, copy_params)
num_copy_tests += 1
# Specify individual test(s) for debugging purposes
# add_copy_test("test_a", as_contiguous_array, as_strided_array, cuda0, cuda1, num_copy_elems,
# CopyParams(
# src_use_mempool=True,
# dst_use_mempool=True,
# access_dst_src=False,
# access_src_dst=False,
# stream_device=cuda0,
# with_grad=False,
# use_graph=True,
# value_offset=0))
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 26,378 | Python | 38.548726 | 110 | 0.556676 |
NVIDIA/warp/warp/tests/walkthrough_debug.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
####################################################################################################
#
# This file demonstrates step-through debugging support of the C++ code generated for a Warp kernel
# running on the CPU.
#
# This is not a unit test; it should be run interactively.
#
# For a fully integrated experience use Visual Studio Code and install the "Python C++ Debugger"
# and "CodeLLDB" extensions. Add the following configurations to your .vscode/launch.json file:
#
"""
{
"name": "Warp Debugger",
"type": "pythoncpp",
"request": "launch",
"pythonLaunchName": "Python: Current File",
"cppAttachName": "(lldb) Attach",
},
{
"name": "Python: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"stopOnEntry": false,
},
{
"name": "(lldb) Attach",
"type": "lldb",
"request": "attach",
},
"""
#
# Then run this .py file using the "Warp Debugger" configuration.
#
# Check out the following resources for more information about launch configurations and
# troubleshooting common VSCode debugger integration issues:
# • https://vscode-docs.readthedocs.io/en/stable/editor/debugging/#launch-configurations
# • https://code.visualstudio.com/docs/cpp/cpp-debug#_debugging
#
####################################################################################################
import warp as wp
# The init() function prints the directory of the kernel cache which contains the .cpp files
# generated from Warp kernels. You can put breakpoints in these C++ files through Visual Studio Code,
# but it's generally more convenient to use wp.breakpoint(). See the example below.
wp.init()
# Enable kernels to be compiled with debug info and disable optimizations
wp.config.mode = "debug"
# Make sure Warp was built with `build_lib.py --mode=debug`
assert wp.context.runtime.core.is_debug_enabled(), "Warp must be built in debug mode to enable debugging kernels"
@wp.kernel
def example_breakpoint(n: int):
a = int(0)
for _i in range(0, n):
if a == 5:
# Your debugger should halt at the C++ code corresponding with the next line,
# namely a call to the __debugbreak() intrinsic function.
wp.breakpoint()
break
a += 1
wp.expect_eq(a, 5)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
wp.launch(example_breakpoint, dim=1, inputs=[10], device="cpu")
| 2,889 | Python | 32.604651 | 113 | 0.650052 |
NVIDIA/warp/warp/tests/unittest_serial.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
import warp.tests.unittest_suites
def run_suite() -> bool:
"""Run a test suite"""
# force rebuild of all kernels
wp.build.clear_kernel_cache()
print("Cleared Warp kernel cache")
runner = unittest.TextTestRunner(verbosity=2, failfast=True)
# Can swap out different suites
suite = warp.tests.unittest_suites.default_suite()
# suite = warp.tests.unittest_suites.auto_discover_suite()
# suite = warp.tests.unittest_suites.kit_suite()
print(f"Test suite has {suite.countTestCases()} tests")
ret = not runner.run(suite).wasSuccessful()
return ret
if __name__ == "__main__":
ret = run_suite()
import sys
sys.exit(ret)
| 1,140 | Python | 28.25641 | 76 | 0.723684 |
NVIDIA/warp/warp/tests/test_snippet.py | import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
def test_basic(test, device):
snippet = """
out[tid] = a * x[tid] + y[tid];
"""
adj_snippet = """
adj_a += x[tid] * adj_out[tid];
adj_x[tid] += a * adj_out[tid];
adj_y[tid] += adj_out[tid];
"""
@wp.func_native(snippet, adj_snippet)
def saxpy(
a: wp.float32,
x: wp.array(dtype=wp.float32),
y: wp.array(dtype=wp.float32),
out: wp.array(dtype=wp.float32),
tid: int,
): # fmt: skip
...
@wp.kernel
def saxpy_cu(
a: wp.float32, x: wp.array(dtype=wp.float32), y: wp.array(dtype=wp.float32), out: wp.array(dtype=wp.float32)
):
tid = wp.tid()
saxpy(a, x, y, out, tid)
@wp.kernel
def saxpy_py(
a: wp.float32, x: wp.array(dtype=wp.float32), y: wp.array(dtype=wp.float32), out: wp.array(dtype=wp.float32)
):
tid = wp.tid()
out[tid] = a * x[tid] + y[tid]
N = 128
a1 = 2.0
x1 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device, requires_grad=True)
y1 = wp.zeros_like(x1)
out1 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device)
adj_out1 = wp.array(np.ones(N, dtype=np.float32), dtype=wp.float32, device=device)
a2 = 2.0
x2 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device, requires_grad=True)
y2 = wp.zeros_like(x2)
out2 = wp.array(np.arange(N, dtype=np.float32), dtype=wp.float32, device=device)
adj_out2 = wp.array(np.ones(N, dtype=np.float32), dtype=wp.float32, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=saxpy_cu, dim=N, inputs=[a1, x1, y1], outputs=[out1], device=device)
wp.launch(kernel=saxpy_py, dim=N, inputs=[a2, x2, y2], outputs=[out2], device=device)
tape.backward(grads={out1: adj_out1, out2: adj_out2})
# test forward snippet
assert_np_equal(out1.numpy(), out2.numpy())
# test backward snippet
assert_np_equal(x1.grad.numpy(), a1 * np.ones(N, dtype=np.float32))
assert_np_equal(x1.grad.numpy(), x2.grad.numpy())
assert_np_equal(y1.grad.numpy(), np.ones(N, dtype=np.float32))
assert_np_equal(y1.grad.numpy(), y2.grad.numpy())
def test_shared_memory(test, device):
snippet = """
__shared__ int s[128];
s[tid] = d[tid];
__syncthreads();
d[tid] = s[N - tid - 1];
"""
@wp.func_native(snippet)
def reverse(d: wp.array(dtype=int), N: int, tid: int):
"""Reverse the array d in place using shared memory."""
return
@wp.kernel
def reverse_kernel(d: wp.array(dtype=int), N: int):
tid = wp.tid()
reverse(d, N, tid)
N = 128
x = wp.array(np.arange(N, dtype=int), dtype=int, device=device)
y = np.arange(127, -1, -1, dtype=int)
wp.launch(kernel=reverse_kernel, dim=N, inputs=[x, N], device=device)
assert_np_equal(x.numpy(), y)
assert reverse.__doc__ == "Reverse the array d in place using shared memory."
def test_cpu_snippet(test, device):
snippet = """
int inc = 1;
out[tid] = x[tid] + inc;
"""
@wp.func_native(snippet)
def increment_snippet(
x: wp.array(dtype=wp.int32),
out: wp.array(dtype=wp.int32),
tid: int,
): # fmt: skip
...
@wp.kernel
def increment(x: wp.array(dtype=wp.int32), out: wp.array(dtype=wp.int32)):
tid = wp.tid()
increment_snippet(x, out, tid)
N = 128
x = wp.array(np.arange(N, dtype=np.int32), dtype=wp.int32, device=device)
out = wp.zeros(N, dtype=wp.int32, device=device)
wp.launch(kernel=increment, dim=N, inputs=[x], outputs=[out], device=device)
assert_np_equal(out.numpy(), np.arange(1, N + 1, 1, dtype=np.int32))
def test_custom_replay_grad(test, device):
num_threads = 16
counter = wp.zeros(1, dtype=wp.int32, device=device)
thread_ids = wp.zeros(num_threads, dtype=wp.int32, device=device)
inputs = wp.array(np.arange(num_threads, dtype=np.float32), device=device, requires_grad=True)
outputs = wp.zeros_like(inputs)
snippet = """
int next_index = atomicAdd(counter, 1);
thread_values[tid] = next_index;
"""
replay_snippet = ""
@wp.func_native(snippet, replay_snippet=replay_snippet)
def reversible_increment(counter: wp.array(dtype=int), thread_values: wp.array(dtype=int), tid: int): # fmt: skip
...
@wp.kernel
def run_atomic_add(
input: wp.array(dtype=float),
counter: wp.array(dtype=int),
thread_values: wp.array(dtype=int),
output: wp.array(dtype=float),
):
tid = wp.tid()
reversible_increment(counter, thread_values, tid)
idx = thread_values[tid]
output[idx] = input[idx] ** 2.0
tape = wp.Tape()
with tape:
wp.launch(
run_atomic_add, dim=num_threads, inputs=[inputs, counter, thread_ids], outputs=[outputs], device=device
)
tape.backward(grads={outputs: wp.array(np.ones(num_threads, dtype=np.float32), device=device)})
assert_np_equal(inputs.grad.numpy(), 2.0 * inputs.numpy(), tol=1e-4)
def test_replay_simplification(test, device):
num_threads = 8
x = wp.array(1.0 + np.arange(num_threads, dtype=np.float32), device=device, requires_grad=True)
y = wp.zeros_like(x)
z = wp.zeros_like(x)
snippet = "y[tid] = powf(x[tid], 2.0);"
replay_snippet = "y[tid] = x[tid];"
adj_snippet = "adj_x[tid] += 2.0 * adj_y[tid];"
@wp.func_native(snippet, adj_snippet=adj_snippet, replay_snippet=replay_snippet)
def square(x: wp.array(dtype=float), y: wp.array(dtype=float), tid: int): # fmt: skip
...
@wp.kernel
def log_square_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float), z: wp.array(dtype=float)):
tid = wp.tid()
square(x, y, tid)
z[tid] = wp.log(y[tid])
tape = wp.Tape()
with tape:
wp.launch(log_square_kernel, dim=num_threads, inputs=[x, y], outputs=[z], device=device)
tape.backward(grads={z: wp.array(np.ones(num_threads, dtype=np.float32), device=device)})
assert_np_equal(x.grad.numpy(), 2.0 / (1.0 + np.arange(num_threads)), tol=1e-6)
def test_recompile_snippet(test, device):
snippet = """
int inc = 1;
out[tid] = x[tid] + inc;
"""
@wp.func_native(snippet)
def increment_snippet(
x: wp.array(dtype=wp.int32),
out: wp.array(dtype=wp.int32),
tid: int,
): # fmt: skip
...
@wp.kernel
def increment(x: wp.array(dtype=wp.int32), out: wp.array(dtype=wp.int32)):
tid = wp.tid()
increment_snippet(x, out, tid)
N = 128
x = wp.array(np.arange(N, dtype=np.int32), dtype=wp.int32, device=device)
out = wp.zeros(N, dtype=wp.int32, device=device)
wp.launch(kernel=increment, dim=N, inputs=[x], outputs=[out], device=device)
assert_np_equal(out.numpy(), np.arange(1, N + 1, 1, dtype=np.int32))
snippet = """
int inc = 2;
out[tid] = x[tid] + inc;
"""
@wp.func_native(snippet)
def increment_snippet(
x: wp.array(dtype=wp.int32),
out: wp.array(dtype=wp.int32),
tid: int,
): # fmt: skip
...
wp.launch(kernel=increment, dim=N, inputs=[x], outputs=[out], device=device)
assert_np_equal(out.numpy(), 1 + np.arange(1, N + 1, 1, dtype=np.int32))
def test_return_type(test, device):
snippet = """
float sq = x * x;
return sq;
"""
adj_snippet = """
adj_x += 2 * x * adj_ret;
"""
# check python built-in return type compilation
@wp.func_native(snippet, adj_snippet)
def square(x: float) -> float: ...
# check warp built-in return type compilation
@wp.func_native(snippet, adj_snippet)
def square(x: wp.float32) -> wp.float32: ...
@wp.kernel
def square_kernel(i: wp.array(dtype=float), o: wp.array(dtype=float)):
tid = wp.tid()
x = i[tid]
o[tid] = square(x)
N = 5
x = wp.array(np.arange(N, dtype=float), dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
tape = wp.Tape()
with tape:
wp.launch(kernel=square_kernel, dim=N, inputs=[x, y], device=device)
y.grad = wp.ones(N, dtype=float, device=device)
tape.backward()
assert_np_equal(y.numpy(), np.array([0.0, 1.0, 4.0, 9.0, 16.0]))
assert_np_equal(x.grad.numpy(), np.array([0.0, 2.0, 4.0, 6.0, 8.0]))
class TestSnippets(unittest.TestCase):
pass
add_function_test(TestSnippets, "test_basic", test_basic, devices=get_selected_cuda_test_devices())
add_function_test(TestSnippets, "test_shared_memory", test_shared_memory, devices=get_selected_cuda_test_devices())
add_function_test(TestSnippets, "test_cpu_snippet", test_cpu_snippet, devices=["cpu"])
add_function_test(
TestSnippets, "test_custom_replay_grad", test_custom_replay_grad, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestSnippets, "test_replay_simplification", test_replay_simplification, devices=get_selected_cuda_test_devices()
)
add_function_test(
TestSnippets, "test_recompile_snippet", test_recompile_snippet, devices=get_selected_cuda_test_devices()
)
add_function_test(TestSnippets, "test_return_type", test_return_type, devices=get_selected_cuda_test_devices())
if __name__ == "__main__":
unittest.main(verbosity=2)
| 9,443 | Python | 30.065789 | 118 | 0.607328 |
NVIDIA/warp/warp/tests/test_vec.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_signed_int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.byte,
]
np_unsigned_int_types = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.ubyte,
]
np_float_types = [np.float16, np.float32, np.float64]
def randvals(rng, shape, dtype):
if dtype in np_float_types:
return rng.standard_normal(size=shape).astype(dtype)
elif dtype in [np.int8, np.uint8, np.byte, np.ubyte]:
return rng.integers(1, high=3, size=shape, dtype=dtype)
return rng.integers(1, high=5, size=shape, dtype=dtype)
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def test_anon_constructor_error_dtype_keyword_missing(test, device):
@wp.kernel
def kernel():
wp.vector(length=123)
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) must have dtype as a keyword argument if it has no positional arguments, e.g.: wp.vector\(length=5, dtype=wp.float32\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_length_mismatch(test, device):
@wp.kernel
def kernel():
wp.vector(
wp.vector(length=2, dtype=float),
length=3,
dtype=float,
)
with test.assertRaisesRegex(
RuntimeError,
r"Incompatible vector lengths for casting copy constructor, 3 vs 2$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_numeric_arg_missing_1(test, device):
@wp.kernel
def kernel():
wp.vector(1.0, 2.0, length=12345)
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) must have one scalar argument or the dtype keyword argument if the length keyword argument is specified, e.g.: wp.vec\(1.0, length=5\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_numeric_arg_missing_2(test, device):
@wp.kernel
def kernel():
wp.vector()
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) must have at least one numeric argument, if it's length, dtype is not specified$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_dtype_keyword_extraneous(test, device):
@wp.kernel
def kernel():
wp.vector(1.0, 2.0, 3.0, dtype=float)
with test.assertRaisesRegex(
RuntimeError,
r"vec\(\) should not have dtype specified if numeric arguments are given, the dtype will be inferred from the argument types$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_numeric_args_mismatch(test, device):
@wp.kernel
def kernel():
wp.vector(1.0, 2)
with test.assertRaisesRegex(
RuntimeError,
r"All numeric arguments to vec\(\) constructor should have the same "
r"type, expected 2 arg_types of type <class 'warp.types.float32'>, "
r"received <class 'warp.types.float32'>,<class 'warp.types.int32'>$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_incompatible_sizes(test, device):
@wp.kernel
def kernel():
wp.vec3(wp.vec2(1.0, 2.0))
with test.assertRaisesRegex(RuntimeError, r"Incompatible matrix sizes for casting copy constructor, 3 vs 2"):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_numeric_args_mismatch(test, device):
@wp.kernel
def kernel():
wp.vec2(1.0, 2)
with test.assertRaisesRegex(
RuntimeError,
r"All numeric arguments to vec\(\) constructor should have the same "
r"type, expected 2 arg_types of type <class 'warp.types.float32'>, "
r"received <class 'warp.types.float32'>,<class 'warp.types.int32'>$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_negation(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_negation(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v2out: wp.array(dtype=vec2),
v3out: wp.array(dtype=vec3),
v4out: wp.array(dtype=vec4),
v5out: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = -v2[0]
v3result = -v3[0]
v4result = -v4[0]
v5result = -v5[0]
v2out[0] = v2result
v3out[0] = v3result
v4out[0] = v4result
v5out[0] = v5result
# multiply these outputs by 2 so we've got something to backpropagate:
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_negation, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5_np = randvals(rng, (1, 5), dtype)
v5 = wp.array(v5_np, dtype=vec5, requires_grad=True, device=device)
v2out = wp.zeros(1, dtype=vec2, device=device)
v3out = wp.zeros(1, dtype=vec3, device=device)
v4out = wp.zeros(1, dtype=vec4, device=device)
v5out = wp.zeros(1, dtype=vec5, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[v2, v3, v4, v5],
outputs=[v2out, v3out, v4out, v5out, v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
expected_grads[i] = -2
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
assert_np_equal(v2out.numpy()[0], -v2.numpy()[0], tol=tol)
assert_np_equal(v3out.numpy()[0], -v3.numpy()[0], tol=tol)
assert_np_equal(v4out.numpy()[0], -v4.numpy()[0], tol=tol)
assert_np_equal(v5out.numpy()[0], -v5.numpy()[0], tol=tol)
def test_subtraction_unsigned(test, device, dtype, register_kernels=False):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_subtraction_unsigned():
wp.expect_eq(vec2(wptype(3), wptype(4)) - vec2(wptype(1), wptype(2)), vec2(wptype(2), wptype(2)))
wp.expect_eq(
vec3(
wptype(3),
wptype(4),
wptype(4),
)
- vec3(wptype(1), wptype(2), wptype(3)),
vec3(wptype(2), wptype(2), wptype(1)),
)
wp.expect_eq(
vec4(
wptype(3),
wptype(4),
wptype(4),
wptype(5),
)
- vec4(wptype(1), wptype(2), wptype(3), wptype(4)),
vec4(wptype(2), wptype(2), wptype(1), wptype(1)),
)
wp.expect_eq(
vec5(
wptype(3),
wptype(4),
wptype(4),
wptype(5),
wptype(4),
)
- vec5(wptype(1), wptype(2), wptype(3), wptype(4), wptype(4)),
vec5(wptype(2), wptype(2), wptype(1), wptype(1), wptype(0)),
)
kernel = getkernel(check_subtraction_unsigned, suffix=dtype.__name__)
if register_kernels:
return
wp.launch(kernel, dim=1, inputs=[], outputs=[], device=device)
def test_subtraction(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_subtraction(
s2: wp.array(dtype=vec2),
s3: wp.array(dtype=vec3),
s4: wp.array(dtype=vec4),
s5: wp.array(dtype=vec5),
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
v20: wp.array(dtype=wptype),
v21: wp.array(dtype=wptype),
v30: wp.array(dtype=wptype),
v31: wp.array(dtype=wptype),
v32: wp.array(dtype=wptype),
v40: wp.array(dtype=wptype),
v41: wp.array(dtype=wptype),
v42: wp.array(dtype=wptype),
v43: wp.array(dtype=wptype),
v50: wp.array(dtype=wptype),
v51: wp.array(dtype=wptype),
v52: wp.array(dtype=wptype),
v53: wp.array(dtype=wptype),
v54: wp.array(dtype=wptype),
):
v2result = v2[0] - s2[0]
v3result = v3[0] - s3[0]
v4result = v4[0] - s4[0]
v5result = v5[0] - s5[0]
# multiply outputs by 2 so there's something to backpropagate:
v20[0] = wptype(2) * v2result[0]
v21[0] = wptype(2) * v2result[1]
v30[0] = wptype(2) * v3result[0]
v31[0] = wptype(2) * v3result[1]
v32[0] = wptype(2) * v3result[2]
v40[0] = wptype(2) * v4result[0]
v41[0] = wptype(2) * v4result[1]
v42[0] = wptype(2) * v4result[2]
v43[0] = wptype(2) * v4result[3]
v50[0] = wptype(2) * v5result[0]
v51[0] = wptype(2) * v5result[1]
v52[0] = wptype(2) * v5result[2]
v53[0] = wptype(2) * v5result[3]
v54[0] = wptype(2) * v5result[4]
kernel = getkernel(check_subtraction, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
v20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
v54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
device=device,
)
assert_np_equal(v20.numpy()[0], 2 * (v2.numpy()[0, 0] - s2.numpy()[0, 0]), tol=tol)
assert_np_equal(v21.numpy()[0], 2 * (v2.numpy()[0, 1] - s2.numpy()[0, 1]), tol=tol)
assert_np_equal(v30.numpy()[0], 2 * (v3.numpy()[0, 0] - s3.numpy()[0, 0]), tol=tol)
assert_np_equal(v31.numpy()[0], 2 * (v3.numpy()[0, 1] - s3.numpy()[0, 1]), tol=tol)
assert_np_equal(v32.numpy()[0], 2 * (v3.numpy()[0, 2] - s3.numpy()[0, 2]), tol=tol)
assert_np_equal(v40.numpy()[0], 2 * (v4.numpy()[0, 0] - s4.numpy()[0, 0]), tol=2 * tol)
assert_np_equal(v41.numpy()[0], 2 * (v4.numpy()[0, 1] - s4.numpy()[0, 1]), tol=2 * tol)
assert_np_equal(v42.numpy()[0], 2 * (v4.numpy()[0, 2] - s4.numpy()[0, 2]), tol=2 * tol)
assert_np_equal(v43.numpy()[0], 2 * (v4.numpy()[0, 3] - s4.numpy()[0, 3]), tol=2 * tol)
assert_np_equal(v50.numpy()[0], 2 * (v5.numpy()[0, 0] - s5.numpy()[0, 0]), tol=tol)
assert_np_equal(v51.numpy()[0], 2 * (v5.numpy()[0, 1] - s5.numpy()[0, 1]), tol=tol)
assert_np_equal(v52.numpy()[0], 2 * (v5.numpy()[0, 2] - s5.numpy()[0, 2]), tol=tol)
assert_np_equal(v53.numpy()[0], 2 * (v5.numpy()[0, 3] - s5.numpy()[0, 3]), tol=tol)
assert_np_equal(v54.numpy()[0], 2 * (v5.numpy()[0, 4] - s5.numpy()[0, 4]), tol=tol)
if dtype in np_float_types:
for i, l in enumerate([v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54]):
tape.backward(loss=l)
sgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [s2, s3, s4, s5]])
expected_grads = np.zeros_like(sgrads)
expected_grads[i] = -2
assert_np_equal(sgrads, expected_grads, tol=10 * tol)
allgrads = np.concatenate([tape.gradients[v].numpy()[0] for v in [v2, v3, v4, v5]])
expected_grads = np.zeros_like(allgrads)
# d/dv v/s = 1/s
expected_grads[i] = 2
assert_np_equal(allgrads, expected_grads, tol=tol)
tape.zero()
def test_length(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-7,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_length(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
l2: wp.array(dtype=wptype),
l3: wp.array(dtype=wptype),
l4: wp.array(dtype=wptype),
l5: wp.array(dtype=wptype),
l22: wp.array(dtype=wptype),
l23: wp.array(dtype=wptype),
l24: wp.array(dtype=wptype),
l25: wp.array(dtype=wptype),
):
l2[0] = wptype(2) * wp.length(v2[0])
l3[0] = wptype(2) * wp.length(v3[0])
l4[0] = wptype(2) * wp.length(v4[0])
l5[0] = wptype(2) * wp.length(v5[0])
l22[0] = wptype(2) * wp.length_sq(v2[0])
l23[0] = wptype(2) * wp.length_sq(v3[0])
l24[0] = wptype(2) * wp.length_sq(v4[0])
l25[0] = wptype(2) * wp.length_sq(v5[0])
kernel = getkernel(check_length, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
l2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l4 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l5 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l22 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l23 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l24 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
l25 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
v4,
v5,
],
outputs=[l2, l3, l4, l5, l22, l23, l24, l25],
device=device,
)
assert_np_equal(l2.numpy()[0], 2 * np.linalg.norm(v2.numpy()), tol=10 * tol)
assert_np_equal(l3.numpy()[0], 2 * np.linalg.norm(v3.numpy()), tol=10 * tol)
assert_np_equal(l4.numpy()[0], 2 * np.linalg.norm(v4.numpy()), tol=10 * tol)
assert_np_equal(l5.numpy()[0], 2 * np.linalg.norm(v5.numpy()), tol=10 * tol)
assert_np_equal(l22.numpy()[0], 2 * np.linalg.norm(v2.numpy()) ** 2, tol=10 * tol)
assert_np_equal(l23.numpy()[0], 2 * np.linalg.norm(v3.numpy()) ** 2, tol=10 * tol)
assert_np_equal(l24.numpy()[0], 2 * np.linalg.norm(v4.numpy()) ** 2, tol=10 * tol)
assert_np_equal(l25.numpy()[0], 2 * np.linalg.norm(v5.numpy()) ** 2, tol=10 * tol)
tape.backward(loss=l2)
grad = tape.gradients[v2].numpy()[0]
expected_grad = 2 * v2.numpy()[0] / np.linalg.norm(v2.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l3)
grad = tape.gradients[v3].numpy()[0]
expected_grad = 2 * v3.numpy()[0] / np.linalg.norm(v3.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l4)
grad = tape.gradients[v4].numpy()[0]
expected_grad = 2 * v4.numpy()[0] / np.linalg.norm(v4.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l5)
grad = tape.gradients[v5].numpy()[0]
expected_grad = 2 * v5.numpy()[0] / np.linalg.norm(v5.numpy())
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l22)
grad = tape.gradients[v2].numpy()[0]
expected_grad = 4 * v2.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l23)
grad = tape.gradients[v3].numpy()[0]
expected_grad = 4 * v3.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l24)
grad = tape.gradients[v4].numpy()[0]
expected_grad = 4 * v4.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
tape.backward(loss=l25)
grad = tape.gradients[v5].numpy()[0]
expected_grad = 4 * v5.numpy()[0]
assert_np_equal(grad, expected_grad, tol=10 * tol)
tape.zero()
def test_normalize(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec2 = wp.types.vector(length=2, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)
def check_normalize(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
n20: wp.array(dtype=wptype),
n21: wp.array(dtype=wptype),
n30: wp.array(dtype=wptype),
n31: wp.array(dtype=wptype),
n32: wp.array(dtype=wptype),
n40: wp.array(dtype=wptype),
n41: wp.array(dtype=wptype),
n42: wp.array(dtype=wptype),
n43: wp.array(dtype=wptype),
n50: wp.array(dtype=wptype),
n51: wp.array(dtype=wptype),
n52: wp.array(dtype=wptype),
n53: wp.array(dtype=wptype),
n54: wp.array(dtype=wptype),
):
n2 = wptype(2) * wp.normalize(v2[0])
n3 = wptype(2) * wp.normalize(v3[0])
n4 = wptype(2) * wp.normalize(v4[0])
n5 = wptype(2) * wp.normalize(v5[0])
n20[0] = n2[0]
n21[0] = n2[1]
n30[0] = n3[0]
n31[0] = n3[1]
n32[0] = n3[2]
n40[0] = n4[0]
n41[0] = n4[1]
n42[0] = n4[2]
n43[0] = n4[3]
n50[0] = n5[0]
n51[0] = n5[1]
n52[0] = n5[2]
n53[0] = n5[3]
n54[0] = n5[4]
def check_normalize_alt(
v2: wp.array(dtype=vec2),
v3: wp.array(dtype=vec3),
v4: wp.array(dtype=vec4),
v5: wp.array(dtype=vec5),
n20: wp.array(dtype=wptype),
n21: wp.array(dtype=wptype),
n30: wp.array(dtype=wptype),
n31: wp.array(dtype=wptype),
n32: wp.array(dtype=wptype),
n40: wp.array(dtype=wptype),
n41: wp.array(dtype=wptype),
n42: wp.array(dtype=wptype),
n43: wp.array(dtype=wptype),
n50: wp.array(dtype=wptype),
n51: wp.array(dtype=wptype),
n52: wp.array(dtype=wptype),
n53: wp.array(dtype=wptype),
n54: wp.array(dtype=wptype),
):
n2 = wptype(2) * v2[0] / wp.length(v2[0])
n3 = wptype(2) * v3[0] / wp.length(v3[0])
n4 = wptype(2) * v4[0] / wp.length(v4[0])
n5 = wptype(2) * v5[0] / wp.length(v5[0])
n20[0] = n2[0]
n21[0] = n2[1]
n30[0] = n3[0]
n31[0] = n3[1]
n32[0] = n3[2]
n40[0] = n4[0]
n41[0] = n4[1]
n42[0] = n4[2]
n43[0] = n4[3]
n50[0] = n5[0]
n51[0] = n5[1]
n52[0] = n5[2]
n53[0] = n5[3]
n54[0] = n5[4]
normalize_kernel = getkernel(check_normalize, suffix=dtype.__name__)
normalize_alt_kernel = getkernel(check_normalize_alt, suffix=dtype.__name__)
if register_kernels:
return
# I've already tested the things I'm using in check_normalize_alt, so I'll just
# make sure the two are giving the same results/gradients
v2 = wp.array(randvals(rng, (1, 2), dtype), dtype=vec2, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, (1, 4), dtype), dtype=vec4, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, (1, 5), dtype), dtype=vec5, requires_grad=True, device=device)
n20 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n21 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n30 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n31 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n32 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n40 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n41 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n42 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n43 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n50 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n51 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n52 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n53 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n54 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n20_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n21_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n30_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n31_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n32_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n40_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n41_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n42_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n43_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n50_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n51_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n52_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n53_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
n54_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
outputs0 = [
n20,
n21,
n30,
n31,
n32,
n40,
n41,
n42,
n43,
n50,
n51,
n52,
n53,
n54,
]
tape0 = wp.Tape()
with tape0:
wp.launch(
normalize_kernel,
dim=1,
inputs=[
v2,
v3,
v4,
v5,
],
outputs=outputs0,
device=device,
)
outputs1 = [
n20_alt,
n21_alt,
n30_alt,
n31_alt,
n32_alt,
n40_alt,
n41_alt,
n42_alt,
n43_alt,
n50_alt,
n51_alt,
n52_alt,
n53_alt,
n54_alt,
]
tape1 = wp.Tape()
with tape1:
wp.launch(
normalize_alt_kernel,
dim=1,
inputs=[
v2,
v3,
v4,
v5,
],
outputs=outputs1,
device=device,
)
for ncmp, ncmpalt in zip(outputs0, outputs1):
assert_np_equal(ncmp.numpy()[0], ncmpalt.numpy()[0], tol=10 * tol)
invecs = [
v2,
v2,
v3,
v3,
v3,
v4,
v4,
v4,
v4,
v5,
v5,
v5,
v5,
v5,
]
for ncmp, ncmpalt, v in zip(outputs0, outputs1, invecs):
tape0.backward(loss=ncmp)
tape1.backward(loss=ncmpalt)
assert_np_equal(tape0.gradients[v].numpy()[0], tape1.gradients[v].numpy()[0], tol=10 * tol)
tape0.zero()
tape1.zero()
def test_crossproduct(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
def check_cross(
s3: wp.array(dtype=vec3),
v3: wp.array(dtype=vec3),
c0: wp.array(dtype=wptype),
c1: wp.array(dtype=wptype),
c2: wp.array(dtype=wptype),
):
c = wp.cross(s3[0], v3[0])
# multiply outputs by 2 so we've got something to backpropagate:
c0[0] = wptype(2) * c[0]
c1[0] = wptype(2) * c[1]
c2[0] = wptype(2) * c[2]
kernel = getkernel(check_cross, suffix=dtype.__name__)
if register_kernels:
return
s3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, (1, 3), dtype), dtype=vec3, requires_grad=True, device=device)
c0 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
c1 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
c2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s3,
v3,
],
outputs=[c0, c1, c2],
device=device,
)
result = 2 * np.cross(s3.numpy(), v3.numpy())[0]
assert_np_equal(c0.numpy()[0], result[0], tol=10 * tol)
assert_np_equal(c1.numpy()[0], result[1], tol=10 * tol)
assert_np_equal(c2.numpy()[0], result[2], tol=10 * tol)
if dtype in np_float_types:
# c.x = sy vz - sz vy
# c.y = sz vx - sx vz
# c.z = sx vy - sy vx
# ( d/dsx d/dsy d/dsz )c.x = ( 0 vz -vy )
# ( d/dsx d/dsy d/dsz )c.y = ( -vz 0 vx )
# ( d/dsx d/dsy d/dsz )c.z = ( vy -vx 0 )
# ( d/dvx d/dvy d/dvz )c.x = (0 -sz sy)
# ( d/dvx d/dvy d/dvz )c.y = (sz 0 -sx)
# ( d/dvx d/dvy d/dvz )c.z = (-sy sx 0)
tape.backward(loss=c0)
assert_np_equal(
tape.gradients[s3].numpy(), 2.0 * np.array([0, v3.numpy()[0, 2], -v3.numpy()[0, 1]]), tol=10 * tol
)
assert_np_equal(
tape.gradients[v3].numpy(), 2.0 * np.array([0, -s3.numpy()[0, 2], s3.numpy()[0, 1]]), tol=10 * tol
)
tape.zero()
tape.backward(loss=c1)
assert_np_equal(
tape.gradients[s3].numpy(), 2.0 * np.array([-v3.numpy()[0, 2], 0, v3.numpy()[0, 0]]), tol=10 * tol
)
assert_np_equal(
tape.gradients[v3].numpy(), 2.0 * np.array([s3.numpy()[0, 2], 0, -s3.numpy()[0, 0]]), tol=10 * tol
)
tape.zero()
tape.backward(loss=c2)
assert_np_equal(
tape.gradients[s3].numpy(), 2.0 * np.array([v3.numpy()[0, 1], -v3.numpy()[0, 0], 0]), tol=10 * tol
)
assert_np_equal(
tape.gradients[v3].numpy(), 2.0 * np.array([-s3.numpy()[0, 1], s3.numpy()[0, 0], 0]), tol=10 * tol
)
tape.zero()
def test_casting_constructors(test, device, dtype, register_kernels=False):
np_type = np.dtype(dtype)
wp_type = wp.types.np_dtype_to_warp_type[np_type]
vec3 = wp.types.vector(length=3, dtype=wp_type)
np16 = np.dtype(np.float16)
wp16 = wp.types.np_dtype_to_warp_type[np16]
np32 = np.dtype(np.float32)
wp32 = wp.types.np_dtype_to_warp_type[np32]
np64 = np.dtype(np.float64)
wp64 = wp.types.np_dtype_to_warp_type[np64]
def cast_float16(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp16, ndim=2)):
tid = wp.tid()
v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
v2 = wp.vector(v1, dtype=wp16)
b[tid, 0] = v2[0]
b[tid, 1] = v2[1]
b[tid, 2] = v2[2]
def cast_float32(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp32, ndim=2)):
tid = wp.tid()
v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
v2 = wp.vector(v1, dtype=wp32)
b[tid, 0] = v2[0]
b[tid, 1] = v2[1]
b[tid, 2] = v2[2]
def cast_float64(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp64, ndim=2)):
tid = wp.tid()
v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
v2 = wp.vector(v1, dtype=wp64)
b[tid, 0] = v2[0]
b[tid, 1] = v2[1]
b[tid, 2] = v2[2]
kernel_16 = getkernel(cast_float16, suffix=dtype.__name__)
kernel_32 = getkernel(cast_float32, suffix=dtype.__name__)
kernel_64 = getkernel(cast_float64, suffix=dtype.__name__)
if register_kernels:
return
# check casting to float 16
a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 3), dtype=np16), dtype=wp16, requires_grad=True, device=device)
b_result = np.ones((1, 3), dtype=np16)
b_grad = wp.array(np.ones((1, 3), dtype=np16), dtype=wp16, device=device)
a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_16, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 32
a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 3), dtype=np32), dtype=wp32, requires_grad=True, device=device)
b_result = np.ones((1, 3), dtype=np32)
b_grad = wp.array(np.ones((1, 3), dtype=np32), dtype=wp32, device=device)
a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_32, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
# check casting to float 64
a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
b = wp.array(np.zeros((1, 3), dtype=np64), dtype=wp64, requires_grad=True, device=device)
b_result = np.ones((1, 3), dtype=np64)
b_grad = wp.array(np.ones((1, 3), dtype=np64), dtype=wp64, device=device)
a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
tape = wp.Tape()
with tape:
wp.launch(kernel=kernel_64, dim=1, inputs=[a, b], device=device)
tape.backward(grads={b: b_grad})
out = tape.gradients[a].numpy()
assert_np_equal(b.numpy(), b_result)
assert_np_equal(out, a_grad.numpy())
@wp.kernel
def test_vector_constructor_value_func():
a = wp.vec2()
b = wp.vector(a, dtype=wp.float16)
c = wp.vector(a)
d = wp.vector(a, length=2)
# Test matrix constructors using explicit type (float16)
# note that these tests are specifically not using generics / closure
# args to create kernels dynamically (like the rest of this file)
# as those use different code paths to resolve arg types which
# has lead to regressions.
@wp.kernel
def test_constructors_explicit_precision():
# construction for custom matrix types
ones = wp.vector(wp.float16(1.0), length=2)
zeros = wp.vector(length=2, dtype=wp.float16)
custom = wp.vector(wp.float16(0.0), wp.float16(1.0))
for i in range(2):
wp.expect_eq(ones[i], wp.float16(1.0))
wp.expect_eq(zeros[i], wp.float16(0.0))
wp.expect_eq(custom[i], wp.float16(i))
# Same as above but with a default (float/int) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructors_default_precision():
# construction for custom matrix types
ones = wp.vector(1.0, length=2)
zeros = wp.vector(length=2, dtype=float)
custom = wp.vector(0.0, 1.0)
for i in range(2):
wp.expect_eq(ones[i], 1.0)
wp.expect_eq(zeros[i], 0.0)
wp.expect_eq(custom[i], float(i))
@wp.kernel
def test_vector_mutation(expected: wp.types.vector(length=10, dtype=float)):
v = wp.vector(length=10, dtype=float)
# test element indexing
v[0] = 1.0
for i in range(1, 10):
v[i] = float(i) + 1.0
wp.expect_eq(v, expected)
CONSTANT_LENGTH = wp.constant(10)
# tests that we can use global constants in length keyword argument
# for vector constructor
@wp.kernel
def test_constructors_constant_length():
v = wp.vector(length=(CONSTANT_LENGTH), dtype=float)
for i in range(CONSTANT_LENGTH):
v[i] = float(i)
devices = get_test_devices()
class TestVec(unittest.TestCase):
def test_tpl_ops_with_anon(self):
vec3i = wp.vec(3, dtype=int)
v = wp.vec3i(1, 2, 3)
v += vec3i(2, 3, 4)
v -= vec3i(3, 4, 5)
self.assertSequenceEqual(v, (0, 1, 2))
v = vec3i(1, 2, 3)
v += wp.vec3i(2, 3, 4)
v -= wp.vec3i(3, 4, 5)
self.assertSequenceEqual(v, (0, 1, 2))
add_kernel_test(TestVec, test_vector_constructor_value_func, dim=1, devices=devices)
add_kernel_test(TestVec, test_constructors_explicit_precision, dim=1, devices=devices)
add_kernel_test(TestVec, test_constructors_default_precision, dim=1, devices=devices)
add_kernel_test(TestVec, test_constructors_constant_length, dim=1, devices=devices)
vec10 = wp.types.vector(length=10, dtype=float)
add_kernel_test(
TestVec,
test_vector_mutation,
dim=1,
inputs=[vec10(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)],
devices=devices,
)
for dtype in np_unsigned_int_types:
add_function_test_register_kernel(
TestVec,
f"test_subtraction_unsigned_{dtype.__name__}",
test_subtraction_unsigned,
devices=devices,
dtype=dtype,
)
for dtype in np_signed_int_types + np_float_types:
add_function_test_register_kernel(
TestVec, f"test_negation_{dtype.__name__}", test_negation, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec, f"test_subtraction_{dtype.__name__}", test_subtraction, devices=devices, dtype=dtype
)
for dtype in np_float_types:
add_function_test_register_kernel(
TestVec, f"test_crossproduct_{dtype.__name__}", test_crossproduct, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec, f"test_length_{dtype.__name__}", test_length, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec, f"test_normalize_{dtype.__name__}", test_normalize, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestVec,
f"test_casting_constructors_{dtype.__name__}",
test_casting_constructors,
devices=devices,
dtype=dtype,
)
add_function_test(
TestVec,
"test_anon_constructor_error_dtype_keyword_missing",
test_anon_constructor_error_dtype_keyword_missing,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_length_mismatch",
test_anon_constructor_error_length_mismatch,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_numeric_arg_missing_1",
test_anon_constructor_error_numeric_arg_missing_1,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_numeric_arg_missing_2",
test_anon_constructor_error_numeric_arg_missing_2,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_dtype_keyword_extraneous",
test_anon_constructor_error_dtype_keyword_extraneous,
devices=devices,
)
add_function_test(
TestVec,
"test_anon_constructor_error_numeric_args_mismatch",
test_anon_constructor_error_numeric_args_mismatch,
devices=devices,
)
add_function_test(
TestVec,
"test_tpl_constructor_error_incompatible_sizes",
test_tpl_constructor_error_incompatible_sizes,
devices=devices,
)
add_function_test(
TestVec,
"test_tpl_constructor_error_numeric_args_mismatch",
test_tpl_constructor_error_numeric_args_mismatch,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 42,423 | Python | 32.589865 | 155 | 0.582443 |
NVIDIA/warp/warp/tests/test_fast_math.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def test_pow(e: float, result: float):
tid = wp.tid()
y = wp.pow(-2.0, e)
wp.expect_eq(y, result)
def test_fast_math_disabled(test, device):
# on all systems pow() should handle negative base correctly with fast math off
wp.set_module_options({"fast_math": False})
wp.launch(test_pow, dim=1, inputs=[2.0, 4.0], device=device)
@unittest.expectedFailure
def test_fast_math_cuda(test, device):
# on CUDA with --fast-math enabled taking the pow()
# of a negative number will result in a NaN
wp.set_module_options({"fast_math": True})
try:
wp.launch(test_pow, dim=1, inputs=[2.0, 4.0], device=device)
finally:
# Turn fast math back off
wp.set_module_options({"fast_math": False})
class TestFastMath(unittest.TestCase):
def test_fast_math_cpu(self):
# on all systems pow() should handle negative base correctly
wp.set_module_options({"fast_math": True})
try:
wp.launch(test_pow, dim=1, inputs=[2.0, 4.0], device="cpu")
finally:
wp.set_module_options({"fast_math": False})
devices = get_test_devices()
add_function_test(TestFastMath, "test_fast_math_cuda", test_fast_math_cuda, devices=get_cuda_test_devices())
add_function_test(TestFastMath, "test_fast_math_disabled", test_fast_math_disabled, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 1,951 | Python | 30.999999 | 108 | 0.69144 |
NVIDIA/warp/warp/tests/aux_test_class_kernel.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Dummy class used in test_reload.py"""
import warp as wp
class ClassKernelTest:
def __init__(self, device):
# 3x3 frames in the rest pose:
self.identities = wp.zeros(shape=10, dtype=wp.mat33, device=device)
wp.launch(kernel=self.gen_identities_kernel, dim=10, inputs=[self.identities], device=device)
@wp.func
def return_identity(e: int):
return wp.mat33(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
@wp.kernel
def gen_identities_kernel(s: wp.array(dtype=wp.mat33)):
tid = wp.tid()
s[tid] = ClassKernelTest.return_identity(tid)
| 1,027 | Python | 37.074073 | 101 | 0.70594 |
NVIDIA/warp/warp/tests/unittest_suites.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Warp Test Suites
This file is intended to define functions that return TestSuite objects, which
can be used in parallel or serial unit tests (with optional code coverage)
"""
import os
import sys
import unittest
START_DIRECTORY = os.path.realpath(os.path.dirname(__file__))
TOP_LEVEL_DIRECTORY = os.path.realpath(os.path.join(START_DIRECTORY, "..", ".."))
def _create_suite_from_test_classes(test_loader, test_classes):
suite = unittest.TestSuite()
for test in test_classes:
sub_suite = unittest.TestSuite()
# Note that the test_loader might have testNamePatterns set
sub_suite.addTest(test_loader.loadTestsFromTestCase(test))
suite.addTest(sub_suite)
return suite
def auto_discover_suite(loader=unittest.defaultTestLoader, pattern="test*.py"):
"""Uses unittest auto-discovery to build a test suite (test_*.py pattern)"""
return loader.discover(start_dir=START_DIRECTORY, pattern=pattern, top_level_dir=TOP_LEVEL_DIRECTORY)
def _iter_class_suites(test_suite):
"""Iterate class-level test suites - test suites that contains test cases
From unittest_parallel.py
"""
has_cases = any(isinstance(suite, unittest.TestCase) for suite in test_suite)
if has_cases:
yield test_suite
else:
for suite in test_suite:
yield from _iter_class_suites(suite)
def compare_unittest_suites(
test_loader: unittest.TestLoader, test_suite_name: str, reference_suite: unittest.TestSuite
) -> None:
"""Prints the tests in `test_suite` that are not in `reference_suite`."""
test_suite_fn = getattr(sys.modules[__name__], test_suite_name + "_suite")
test_suite = test_suite_fn(test_loader)
test_suite_classes_str = {
type(test_suite._tests[0]).__name__
for test_suite in list(_iter_class_suites(test_suite))
if test_suite.countTestCases() > 0
}
reference_suite_classes_str = {
type(test_suite._tests[0]).__name__
for test_suite in list(_iter_class_suites(reference_suite))
if test_suite.countTestCases() > 0
}
set_difference = reference_suite_classes_str - test_suite_classes_str
print(f"Selected test suite '{test_suite_name}'")
if len(set_difference) > 0:
print(f"Test suite '{test_suite_name}' omits the following test classes:")
for test_entry in set_difference:
print(f" {test_entry}")
return test_suite
def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader):
"""Example of a manually constructed test suite.
Intended to be modified to create additional test suites
"""
from warp.tests.test_adam import TestAdam
from warp.tests.test_arithmetic import TestArithmetic
from warp.tests.test_array import TestArray
from warp.tests.test_array_reduce import TestArrayReduce
from warp.tests.test_async import TestAsync
from warp.tests.test_atomic import TestAtomic
from warp.tests.test_bool import TestBool
from warp.tests.test_builtins_resolution import TestBuiltinsResolution
from warp.tests.test_bvh import TestBvh
from warp.tests.test_closest_point_edge_edge import TestClosestPointEdgeEdgeMethods
from warp.tests.test_codegen import TestCodeGen
from warp.tests.test_compile_consts import TestConstants
from warp.tests.test_conditional import TestConditional
from warp.tests.test_copy import TestCopy
from warp.tests.test_ctypes import TestCTypes
from warp.tests.test_dense import TestDense
from warp.tests.test_devices import TestDevices
from warp.tests.test_dlpack import TestDLPack
from warp.tests.test_examples import (
TestCoreExamples,
TestFemDiffusionExamples,
TestFemExamples,
TestOptimExamples,
TestSimExamples,
)
from warp.tests.test_fabricarray import TestFabricArray
from warp.tests.test_fast_math import TestFastMath
from warp.tests.test_fem import TestFem, TestFemShapeFunctions
from warp.tests.test_fp16 import TestFp16
from warp.tests.test_func import TestFunc
from warp.tests.test_generics import TestGenerics
from warp.tests.test_grad import TestGrad
from warp.tests.test_grad_customs import TestGradCustoms
from warp.tests.test_hash_grid import TestHashGrid
from warp.tests.test_import import TestImport
from warp.tests.test_indexedarray import TestIndexedArray
from warp.tests.test_intersect import TestIntersect
from warp.tests.test_jax import TestJax
from warp.tests.test_large import TestLarge
from warp.tests.test_launch import TestLaunch
from warp.tests.test_lerp import TestLerp
from warp.tests.test_linear_solvers import TestLinearSolvers
from warp.tests.test_lvalue import TestLValue
from warp.tests.test_marching_cubes import TestMarchingCubes
from warp.tests.test_mat import TestMat
from warp.tests.test_mat_lite import TestMatLite
from warp.tests.test_mat_scalar_ops import TestMatScalarOps
from warp.tests.test_math import TestMath
from warp.tests.test_matmul import TestMatmul
from warp.tests.test_matmul_lite import TestMatmulLite
from warp.tests.test_mempool import TestMempool
from warp.tests.test_mesh import TestMesh
from warp.tests.test_mesh_query_aabb import TestMeshQueryAABBMethods
from warp.tests.test_mesh_query_point import TestMeshQueryPoint
from warp.tests.test_mesh_query_ray import TestMeshQueryRay
from warp.tests.test_mlp import TestMLP
from warp.tests.test_model import TestModel
from warp.tests.test_modules_lite import TestModuleLite
from warp.tests.test_multigpu import TestMultiGPU
from warp.tests.test_noise import TestNoise
from warp.tests.test_operators import TestOperators
from warp.tests.test_options import TestOptions
from warp.tests.test_peer import TestPeer
from warp.tests.test_pinned import TestPinned
from warp.tests.test_print import TestPrint
from warp.tests.test_quat import TestQuat
from warp.tests.test_rand import TestRand
from warp.tests.test_reload import TestReload
from warp.tests.test_rounding import TestRounding
from warp.tests.test_runlength_encode import TestRunlengthEncode
from warp.tests.test_sim_grad import TestSimGradients
from warp.tests.test_sim_kinematics import TestSimKinematics
from warp.tests.test_smoothstep import TestSmoothstep
from warp.tests.test_snippet import TestSnippets
from warp.tests.test_sparse import TestSparse
from warp.tests.test_spatial import TestSpatial
from warp.tests.test_streams import TestStreams
from warp.tests.test_struct import TestStruct
from warp.tests.test_tape import TestTape
from warp.tests.test_torch import TestTorch
from warp.tests.test_transient_module import TestTransientModule
from warp.tests.test_types import TestTypes
from warp.tests.test_utils import TestUtils
from warp.tests.test_vec import TestVec
from warp.tests.test_vec_lite import TestVecLite
from warp.tests.test_vec_scalar_ops import TestVecScalarOps
from warp.tests.test_verify_fp import TestVerifyFP
from warp.tests.test_volume import TestVolume
from warp.tests.test_volume_write import TestVolumeWrite
test_classes = [
TestAdam,
TestArithmetic,
TestArray,
TestArrayReduce,
TestAsync,
TestAtomic,
TestBool,
TestBuiltinsResolution,
TestBvh,
TestClosestPointEdgeEdgeMethods,
TestCodeGen,
TestConstants,
TestConditional,
TestCopy,
TestCTypes,
TestDense,
TestDevices,
TestDLPack,
TestCoreExamples,
TestFemDiffusionExamples,
TestFemExamples,
TestOptimExamples,
TestSimExamples,
TestFabricArray,
TestFastMath,
TestFem,
TestFemShapeFunctions,
TestFp16,
TestFunc,
TestGenerics,
TestGrad,
TestGradCustoms,
TestHashGrid,
TestImport,
TestIndexedArray,
TestIntersect,
TestJax,
TestLarge,
TestLaunch,
TestLerp,
TestLinearSolvers,
TestLValue,
TestMarchingCubes,
TestMat,
TestMatLite,
TestMatScalarOps,
TestMath,
TestMatmul,
TestMatmulLite,
TestMempool,
TestMesh,
TestMeshQueryAABBMethods,
TestMeshQueryPoint,
TestMeshQueryRay,
TestMLP,
TestModel,
TestModuleLite,
TestMultiGPU,
TestNoise,
TestOperators,
TestOptions,
TestPeer,
TestPinned,
TestPrint,
TestQuat,
TestRand,
TestReload,
TestRounding,
TestRunlengthEncode,
TestSimGradients,
TestSimKinematics,
TestSmoothstep,
TestSparse,
TestSnippets,
TestSpatial,
TestStreams,
TestStruct,
TestTape,
TestTorch,
TestTransientModule,
TestTypes,
TestUtils,
TestVec,
TestVecLite,
TestVecScalarOps,
TestVerifyFP,
TestVolume,
TestVolumeWrite,
]
return _create_suite_from_test_classes(test_loader, test_classes)
def kit_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader):
"""Tries to mimic the test suite used for testing omni.warp.core in Kit
Requires manual updates with test_ext.py for now.
"""
from warp.tests.test_array import TestArray
from warp.tests.test_array_reduce import TestArrayReduce
from warp.tests.test_bvh import TestBvh
from warp.tests.test_codegen import TestCodeGen
from warp.tests.test_compile_consts import TestConstants
from warp.tests.test_conditional import TestConditional
from warp.tests.test_ctypes import TestCTypes
from warp.tests.test_devices import TestDevices
from warp.tests.test_dlpack import TestDLPack
from warp.tests.test_fabricarray import TestFabricArray
from warp.tests.test_func import TestFunc
from warp.tests.test_generics import TestGenerics
from warp.tests.test_grad_customs import TestGradCustoms
from warp.tests.test_hash_grid import TestHashGrid
from warp.tests.test_indexedarray import TestIndexedArray
from warp.tests.test_launch import TestLaunch
from warp.tests.test_marching_cubes import TestMarchingCubes
from warp.tests.test_mat_lite import TestMatLite
from warp.tests.test_math import TestMath
from warp.tests.test_matmul_lite import TestMatmulLite
from warp.tests.test_mesh import TestMesh
from warp.tests.test_mesh_query_aabb import TestMeshQueryAABBMethods
from warp.tests.test_mesh_query_point import TestMeshQueryPoint
from warp.tests.test_mesh_query_ray import TestMeshQueryRay
from warp.tests.test_modules_lite import TestModuleLite
from warp.tests.test_noise import TestNoise
from warp.tests.test_operators import TestOperators
from warp.tests.test_quat import TestQuat
from warp.tests.test_rand import TestRand
from warp.tests.test_rounding import TestRounding
from warp.tests.test_runlength_encode import TestRunlengthEncode
from warp.tests.test_sparse import TestSparse
from warp.tests.test_streams import TestStreams
from warp.tests.test_tape import TestTape
from warp.tests.test_transient_module import TestTransientModule
from warp.tests.test_types import TestTypes
from warp.tests.test_utils import TestUtils
from warp.tests.test_vec_lite import TestVecLite
from warp.tests.test_volume import TestVolume
from warp.tests.test_volume_write import TestVolumeWrite
test_classes = [
TestArray,
TestArrayReduce,
TestBvh,
TestCodeGen,
TestConstants,
TestConditional,
TestCTypes,
TestDevices,
TestDLPack,
TestFabricArray,
TestFunc,
TestGenerics,
TestGradCustoms,
TestHashGrid,
TestIndexedArray,
TestLaunch,
TestMarchingCubes,
TestMatLite,
TestMath,
TestMatmulLite,
TestMesh,
TestMeshQueryAABBMethods,
TestMeshQueryPoint,
TestMeshQueryRay,
TestModuleLite,
TestNoise,
TestOperators,
TestQuat,
TestRand,
TestRounding,
TestRunlengthEncode,
TestSparse,
TestStreams,
TestTape,
TestTransientModule,
TestTypes,
TestUtils,
TestVecLite,
TestVolume,
TestVolumeWrite,
]
return _create_suite_from_test_classes(test_loader, test_classes)
| 13,130 | Python | 35.074176 | 105 | 0.711653 |
NVIDIA/warp/warp/tests/test_mat.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
np_signed_int_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.byte,
]
np_float_types = [np.float16, np.float32, np.float64]
def randvals(rng, shape, dtype):
if dtype in np_float_types:
return rng.standard_normal(size=shape).astype(dtype)
elif dtype in [np.int8, np.uint8, np.byte, np.ubyte]:
return rng.integers(1, high=3, size=shape, dtype=dtype)
return rng.integers(1, high=5, size=shape, dtype=dtype)
kernel_cache = {}
def getkernel(func, suffix=""):
key = func.__name__ + "_" + suffix
if key not in kernel_cache:
kernel_cache[key] = wp.Kernel(func=func, key=key)
return kernel_cache[key]
def get_select_kernel(dtype):
def output_select_kernel_fn(
input: wp.array(dtype=dtype),
index: int,
out: wp.array(dtype=dtype),
):
out[0] = input[index]
return getkernel(output_select_kernel_fn, suffix=dtype.__name__)
def test_anon_constructor_error_shape_keyword_missing(test, device):
@wp.kernel
def kernel():
wp.matrix(1.0, 2.0, 3.0)
with test.assertRaisesRegex(
RuntimeError,
r"shape keyword must be specified when calling matrix\(\) function$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_dtype_keyword_missing(test, device):
@wp.kernel
def kernel():
wp.matrix(shape=(3, 3))
with test.assertRaisesRegex(
RuntimeError,
r"matrix\(\) must have dtype as a keyword argument if it has no " r"positional arguments$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_shape_mismatch(test, device):
@wp.kernel
def kernel():
wp.matrix(
wp.matrix(shape=(1, 2), dtype=float),
shape=(3, 4),
dtype=float,
)
with test.assertRaisesRegex(
RuntimeError,
r"Incompatible matrix sizes for casting copy constructor, " r"\(3, 4\) vs \(1, 2\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_anon_constructor_error_invalid_arg_count(test, device):
@wp.kernel
def kernel():
wp.matrix(1.0, 2.0, 3.0, shape=(2, 2), dtype=float)
with test.assertRaisesRegex(
RuntimeError,
r"Wrong number of arguments for matrix\(\) function, must initialize "
r"with either a scalar value, or m\*n values$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_incompatible_sizes(test, device):
@wp.kernel
def kernel():
wp.mat33(wp.mat22(1.0, 2.0, 3.0, 4.0))
with test.assertRaisesRegex(
RuntimeError,
r"Incompatible matrix sizes for casting copy constructor, " r"\(3, 3\) vs \(2, 2\)$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_scalar_type(test, device):
@wp.kernel
def kernel():
wp.mat22(1, 2, 3, 4)
with test.assertRaisesRegex(
RuntimeError,
r"Wrong scalar type for mat 2,2,<class 'warp.types.float32'> constructor$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_vector_count(test, device):
@wp.kernel
def kernel():
wp.mat22(wp.vec3(1.0, 2.0, 3.0))
with test.assertRaisesRegex(
RuntimeError,
r"Wrong number of vectors when attempting to construct a matrix " r"with column vectors$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_vector_shape(test, device):
@wp.kernel
def kernel():
wp.mat22(wp.vec3(1.0, 2.0, 3.0), wp.vec3(4.0, 5.0, 6.0))
with test.assertRaisesRegex(
RuntimeError,
r"Wrong vector row count when attempting to construct a matrix " r"with column vectors$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_tpl_constructor_error_invalid_arg_count(test, device):
@wp.kernel
def kernel():
wp.mat22(1.0, 2.0, 3.0)
with test.assertRaisesRegex(
RuntimeError,
r"Wrong number of scalars when attempting to construct a matrix " r"from a list of components$",
):
wp.launch(
kernel,
dim=1,
inputs=[],
device=device,
)
def test_py_arithmetic_ops(test, device, dtype):
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
def make_mat(*args):
if wptype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(tuple(wptype._type_(x).value for x in row) for row in args)
return args
def make_vec(*args):
if wptype in wp.types.int_types:
# Cast to the correct integer type to simulate wrapping.
return tuple(wptype._type_(x).value for x in args)
return args
mat_cls = wp.mat((3, 3), wptype)
vec_cls = wp.vec(3, wptype)
m = mat_cls(((-1, 2, 3), (4, -5, 6), (7, 8, -9)))
test.assertSequenceEqual(+m, make_mat((-1, 2, 3), (4, -5, 6), (7, 8, -9)))
test.assertSequenceEqual(-m, make_mat((1, -2, -3), (-4, 5, -6), (-7, -8, 9)))
test.assertSequenceEqual(m + mat_cls((5, 5, 5) * 3), make_mat((4, 7, 8), (9, 0, 11), (12, 13, -4)))
test.assertSequenceEqual(m - mat_cls((5, 5, 5) * 3), make_mat((-6, -3, -2), (-1, -10, 1), (2, 3, -14)))
test.assertSequenceEqual(m * vec_cls(5, 5, 5), make_vec(20, 25, 30))
test.assertSequenceEqual(m @ vec_cls(5, 5, 5), make_vec(20, 25, 30))
test.assertSequenceEqual(vec_cls(5, 5, 5) * m, make_vec(50, 25, 0))
test.assertSequenceEqual(vec_cls(5, 5, 5) @ m, make_vec(50, 25, 0))
m = mat_cls(((2, 4, 6), (8, 10, 12), (14, 16, 18)))
test.assertSequenceEqual(m * wptype(2), make_mat((4, 8, 12), (16, 20, 24), (28, 32, 36)))
test.assertSequenceEqual(wptype(2) * m, make_mat((4, 8, 12), (16, 20, 24), (28, 32, 36)))
test.assertSequenceEqual(m / wptype(2), make_mat((1, 2, 3), (4, 5, 6), (7, 8, 9)))
test.assertSequenceEqual(wptype(5040) / m, make_mat((2520, 1260, 840), (630, 504, 420), (360, 315, 280)))
test.assertSequenceEqual(m * vec_cls(5, 5, 5), make_vec(60, 150, 240))
test.assertSequenceEqual(m @ vec_cls(5, 5, 5), make_vec(60, 150, 240))
test.assertSequenceEqual(vec_cls(5, 5, 5) * m, make_vec(120, 150, 180))
test.assertSequenceEqual(vec_cls(5, 5, 5) @ m, make_vec(120, 150, 180))
def test_quat_constructor(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
vec4 = wp.types.vector(length=4, dtype=wptype)
vec3 = wp.types.vector(length=3, dtype=wptype)
quat = wp.types.quaternion(dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_quat_constructor(
p: wp.array(dtype=vec3),
r: wp.array(dtype=quat),
s: wp.array(dtype=vec3),
outcomponents: wp.array(dtype=wptype),
outcomponents_alt: wp.array(dtype=wptype),
):
m = mat44(p[0], r[0], s[0])
R = wp.transpose(wp.quat_to_matrix(r[0]))
c0 = s[0][0] * R[0]
c1 = s[0][1] * R[1]
c2 = s[0][2] * R[2]
m_alt = mat44(
vec4(c0[0], c0[1], c0[2], wptype(0.0)),
vec4(c1[0], c1[1], c1[2], wptype(0.0)),
vec4(c2[0], c2[1], c2[2], wptype(0.0)),
vec4(p[0][0], p[0][1], p[0][2], wptype(1.0)),
)
idx = 0
for i in range(4):
for j in range(4):
outcomponents[idx] = m[i, j]
outcomponents_alt[idx] = m_alt[i, j]
idx = idx + 1
kernel = getkernel(check_mat_quat_constructor, suffix=dtype.__name__)
if register_kernels:
return
# translation:
p = wp.array(rng.standard_normal(size=(1, 3)).astype(dtype), dtype=vec3, requires_grad=True, device=device)
# generate a normalized quaternion for the rotation:
r = rng.standard_normal(size=(1, 4))
r /= np.linalg.norm(r)
r = wp.array(r.astype(dtype), dtype=quat, requires_grad=True, device=device)
# scale:
s = wp.array(rng.standard_normal(size=(1, 3)).astype(dtype), dtype=vec3, requires_grad=True, device=device)
# just going to generate the matrix using the constructor, then
# more manually, and make sure the values/gradients are the same:
outcomponents = wp.zeros(4 * 4, dtype=wptype, requires_grad=True, device=device)
outcomponents_alt = wp.zeros(4 * 4, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[p, r, s], outputs=[outcomponents, outcomponents_alt], device=device)
assert_np_equal(outcomponents.numpy(), outcomponents_alt.numpy(), tol=1.0e-6)
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
out_alt = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for _i in range(4):
for _j in range(4):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[p, r, s], outputs=[outcomponents, outcomponents_alt], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents_alt, idx], outputs=[out_alt], device=device
)
tape.backward(loss=out)
p_grad = 1.0 * tape.gradients[p].numpy()[0]
r_grad = 1.0 * tape.gradients[r].numpy()[0]
s_grad = 1.0 * tape.gradients[s].numpy()[0]
tape.zero()
tape.backward(loss=out_alt)
p_grad_alt = 1.0 * tape.gradients[p].numpy()[0]
r_grad_alt = 1.0 * tape.gradients[r].numpy()[0]
s_grad_alt = 1.0 * tape.gradients[s].numpy()[0]
tape.zero()
assert_np_equal(p_grad, p_grad_alt, tol=tol)
assert_np_equal(r_grad, r_grad_alt, tol=tol)
assert_np_equal(s_grad, s_grad_alt, tol=tol)
idx = idx + 1
def test_negation(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-2,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_negation(
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
m5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
mat2 = -m2[0]
mat3 = -m3[0]
mat4 = -m4[0]
mat5 = -m5[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * mat2[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * mat3[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * mat4[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * mat5[i, j]
idx = idx + 1
kernel = getkernel(check_mat_negation, suffix=dtype.__name__)
if register_kernels:
return
m2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
m3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
m5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:4], -2 * m2.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], -2 * m3.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], -2 * m4.numpy().reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], -2 * m5.numpy().reshape(-1), tol=tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, input in [(2, m2), (3, m3), (4, m4), (5, m5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4, m5], outputs=[outcomponents], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = -2
assert_np_equal(tape.gradients[input].numpy()[0], expectedresult)
tape.zero()
idx = idx + 1
def test_subtraction(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
mat55 = wp.types.matrix(shape=(5, 5), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_sub(
s2: wp.array(dtype=mat22),
s3: wp.array(dtype=mat33),
s4: wp.array(dtype=mat44),
s5: wp.array(dtype=mat55),
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
v5: wp.array(dtype=mat55),
outcomponents: wp.array(dtype=wptype),
):
v2result = v2[0] - s2[0]
v3result = v3[0] - s3[0]
v4result = v4[0] - s4[0]
v5result = v5[0] - s5[0]
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * v2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * v3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * v4result[i, j]
idx = idx + 1
for i in range(5):
for j in range(5):
outcomponents[idx] = wptype(2) * v5result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_sub, suffix=dtype.__name__)
if register_kernels:
return
s2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
s3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
s4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
s5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
v5 = wp.array(randvals(rng, [1, 5, 5], dtype), dtype=mat55, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4 + 5 * 5, dtype=wptype, requires_grad=True, device=device)
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
assert_np_equal(outcomponents.numpy()[:4], 2 * (v2.numpy() - s2.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * (v3.numpy() - s3.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[13:29], 2 * (v4.numpy() - s4.numpy()).reshape(-1), tol=tol)
assert_np_equal(outcomponents.numpy()[29:54], 2 * (v5.numpy() - s5.numpy()).reshape(-1), tol=10 * tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, in1, in2 in [(2, s2, v2), (3, s3, v3), (4, s4, v4), (5, s5, v5)]:
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
s2,
s3,
s4,
s5,
v2,
v3,
v4,
v5,
],
outputs=[outcomponents],
device=device,
)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
expectedresult = np.zeros((dim, dim), dtype=dtype)
expectedresult[i, j] = 2
assert_np_equal(tape.gradients[in2].numpy()[0], expectedresult, tol=10 * tol)
expectedresult[i, j] = -2
assert_np_equal(tape.gradients[in1].numpy()[0], expectedresult, tol=10 * tol)
tape.zero()
idx = idx + 1
def test_determinant(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
def check_mat_det(
v2: wp.array(dtype=mat22),
v3: wp.array(dtype=mat33),
v4: wp.array(dtype=mat44),
det2: wp.array(dtype=wptype),
det3: wp.array(dtype=wptype),
det4: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
det2[0] = wptype(2) * wp.determinant(v2[0])
det3[0] = wptype(2) * wp.determinant(v3[0])
det4[0] = wptype(2) * wp.determinant(v4[0])
kernel = getkernel(check_mat_det, suffix=dtype.__name__)
if register_kernels:
return
v2 = wp.array(randvals(rng, [1, 2, 2], dtype), dtype=mat22, requires_grad=True, device=device)
v3 = wp.array(randvals(rng, [1, 3, 3], dtype), dtype=mat33, requires_grad=True, device=device)
v4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
det2 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
det3 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
det4 = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
tape = wp.Tape()
with tape:
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
if dtype in np_float_types:
assert_np_equal(det2.numpy()[0], 2 * np.linalg.det(v2.numpy()[0].astype(np.float64)), tol=100 * tol)
assert_np_equal(det3.numpy()[0], 2 * np.linalg.det(v3.numpy()[0].astype(np.float64)), tol=100 * tol)
assert_np_equal(det4.numpy()[0], 2 * np.linalg.det(v4.numpy()[0].astype(np.float64)), tol=420 * tol)
else:
assert_np_equal(det2.numpy()[0], 2 * np.around(np.linalg.det(v2.numpy()[0])).astype(int))
assert_np_equal(det3.numpy()[0], 2 * np.around(np.linalg.det(v3.numpy()[0])).astype(int))
assert_np_equal(det4.numpy()[0], 2 * np.around(np.linalg.det(v4.numpy()[0])).astype(int))
if dtype in np_float_types:
# determinant derivative formula is annoying so finite differences?
tape.backward(loss=det2)
v2grads = 1.0 * tape.gradients[v2].numpy()[0]
tape.zero()
tape.backward(loss=det3)
v3grads = 1.0 * tape.gradients[v3].numpy()[0]
tape.zero()
tape.backward(loss=det4)
v4grads = 1.0 * tape.gradients[v4].numpy()[0]
tape.zero()
# finite differences are also annoying hence the large tolerance...
# absolute nightmare in float16 too innit...
dx = 0.01 if dtype == np.float16 else 0.0001
fdtol = 2.0e-1 if dtype == np.float16 else 2.0e-3
for i in range(2):
for j in range(2):
v2test = v2.numpy()
v2test[0, i, j] += dx
wp.launch(
kernel,
dim=1,
inputs=[
wp.array(v2test, dtype=v2.dtype, requires_grad=True, device=device),
v3,
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dplus = det2.numpy()[0]
v2test[0, i, j] -= 2.0 * dx
wp.launch(
kernel,
dim=1,
inputs=[
wp.array(v2test, dtype=v2.dtype, requires_grad=True, device=device),
v3,
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dminus = det2.numpy()[0]
assert_np_equal((dplus - dminus) / (2.0 * dx * dplus), v2grads[i, j] / dplus, tol=fdtol)
for i in range(3):
for j in range(3):
v3test = v3.numpy()
v3test[0, i, j] += dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
wp.array(v3test, dtype=v3.dtype, requires_grad=True, device=device),
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dplus = det3.numpy()[0]
v3test[0, i, j] -= 2.0 * dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
wp.array(v3test, dtype=v3.dtype, requires_grad=True, device=device),
v4,
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dminus = det3.numpy()[0]
assert_np_equal((dplus - dminus) / (2.0 * dx * dplus), v3grads[i, j] / dplus, tol=fdtol)
for i in range(4):
for j in range(4):
v4test = v4.numpy()
v4test[0, i, j] += dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
wp.array(v4test, dtype=v4.dtype, requires_grad=True, device=device),
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dplus = det4.numpy()[0]
v4test[0, i, j] -= 2.0 * dx
wp.launch(
kernel,
dim=1,
inputs=[
v2,
v3,
wp.array(v4test, dtype=v4.dtype, requires_grad=True, device=device),
],
outputs=[
det2,
det3,
det4,
],
device=device,
)
dminus = det4.numpy()[0]
assert_np_equal((dplus - dminus) / (2.0 * dx * dplus), v4grads[i, j] / dplus, tol=fdtol)
# Unused. Why?
# def test_get_diag(test, device, dtype, register_kernels=False):
# tol = {
# np.float16: 1.0e-3,
# np.float32: 1.0e-6,
# np.float64: 1.0e-8,
# }.get(dtype, 0)
#
# wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
# mat55 = wp.types.vector(shape=(5, 5), dtype=wptype)
#
# output_select_kernel = get_select_kernel(wptype)
#
# def check_mat_diag(
# m55: wp.array(dtype=mat55),
# outcomponents: wp.array(dtype=wptype),
# ):
# # multiply outputs by 2 so we've got something to backpropagate:
# vec5result = wptype(2) * wp.get_diag(m55[0])
#
# idx = 0
# for i in range(5):
# outcomponents[idx] = vec5result[i]
# idx = idx + 1
#
# kernel = getkernel(check_mat_diag, suffix=dtype.__name__)
#
# if register_kernels:
# return
#
# m55 = wp.array(randvals((1, 5, 5), dtype), dtype=mat55, requires_grad=True, device=device)
# outcomponents = wp.zeros(5, dtype=wptype, requires_grad=True, device=device)
# out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
#
# wp.launch(kernel, dim=1, inputs=[m55], outputs=[outcomponents], device=device)
#
# assert_np_equal(outcomponents.numpy(), 2 * np.diag(m55.numpy()[0]), tol=tol)
#
# if dtype in np_float_types:
# idx = 0
# for i in range(5):
# tape = wp.Tape()
# with tape:
# wp.launch(kernel, dim=1, inputs=[m55], outputs=[outcomponents], device=device)
# wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
# tape.backward(loss=out)
# expectedresult = np.zeros((5, 5), dtype=dtype)
# expectedresult[i, i] = 2
# assert_np_equal(tape.gradients[m55].numpy()[0], expectedresult, tol=10 * tol)
# tape.zero()
#
# idx = idx + 1
def test_inverse(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat22 = wp.types.matrix(shape=(2, 2), dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_inverse(
m2: wp.array(dtype=mat22),
m3: wp.array(dtype=mat33),
m4: wp.array(dtype=mat44),
outcomponents: wp.array(dtype=wptype),
):
m2result = wp.inverse(m2[0])
m3result = wp.inverse(m3[0])
m4result = wp.inverse(m4[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(2):
for j in range(2):
outcomponents[idx] = wptype(2) * m2result[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
for i in range(4):
for j in range(4):
outcomponents[idx] = wptype(2) * m4result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_inverse, suffix=dtype.__name__)
if register_kernels:
return
m2 = wp.array(
2 * (randvals(rng, [1, 2, 2], dtype) + 0.2 * np.eye(2)), dtype=mat22, requires_grad=True, device=device
)
m3 = wp.array(
2 * (randvals(rng, [1, 3, 3], dtype) + 0.2 * np.eye(3)), dtype=mat33, requires_grad=True, device=device
)
m4 = wp.array(
2 * (randvals(rng, [1, 4, 4], dtype) + 0.2 * np.eye(4)), dtype=mat44, requires_grad=True, device=device
)
outcomponents = wp.zeros(2 * 2 + 3 * 3 + 4 * 4, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
assert_np_equal(outcomponents.numpy()[:4], 2 * np.linalg.inv(m2.numpy()[0].astype(np.float64)), tol=tol)
assert_np_equal(outcomponents.numpy()[4:13], 2 * np.linalg.inv(m3.numpy()[0].astype(np.float64)), tol=5 * tol)
assert_np_equal(outcomponents.numpy()[13:], 2 * np.linalg.inv(m4.numpy()[0].astype(np.float64)), tol=5 * tol)
if dtype in np_float_types:
# check gradients:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for dim, input in [(2, m2), (3, m3), (4, m4)]:
minv = np.linalg.inv(input.numpy()[0].astype(np.float64))
for i in range(dim):
for j in range(dim):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(
output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device
)
tape.backward(loss=out)
d = np.zeros((dim, dim))
d[j, i] = 2
assert_np_equal(
tape.gradients[input].numpy()[0], -np.matmul(minv, np.matmul(d, minv)).T, tol=10 * tol
)
tape.zero()
idx = idx + 1
# let's check 2x2 using different formulae just for (in)sanity's sake:
m = m2.numpy()[0]
det = m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]
expected = 2 * np.array([[m[1, 1], -m[0, 1]], [-m[1, 0], m[0, 0]]], dtype=dtype) / det
assert_np_equal(expected, outcomponents.numpy()[:4], tol=tol)
# 0,0 component is this:
# 2 * m[1,1] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(2 * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[0], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 0], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(-2 * m[1, 1] * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(2 * m[1, 1] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(-2 * m[0, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
assert_np_equal(2 * m[1, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
tape.zero()
# 0,1 component is this:
# -2 * m[0,1] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(-2 * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[1], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 1], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(2 * m[0, 1] * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(-2 * m[0, 1] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
assert_np_equal(-2 * m[1, 1] * m[0, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
tape.zero()
# 1,0 component is this:
# -2 * m[1,0] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(-2 * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[2], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 2], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(2 * m[1, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(-2 * m[0, 0] * m[1, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
assert_np_equal(-2 * m[1, 0] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
tape.zero()
# 1,1 component is this:
# 2 * m[0,0] / (m[0,0]*m[1,1] - m[1,0] * m[0,1])
assert_np_equal(2 * m[0, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]), outcomponents.numpy()[3], tol=tol)
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m2, m3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, 3], outputs=[out], device=device)
if dtype in np_float_types:
tape.backward(loss=out)
g = tape.gradients[m2].numpy()[0]
assert_np_equal(-2 * m[0, 1] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[0, 1] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 0], tol=tol)
assert_np_equal(2 * m[0, 0] * m[1, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[0, 1], tol=tol)
assert_np_equal(-2 * m[0, 0] * m[0, 0] / (m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1]) ** 2, g[1, 1], tol=tol)
tape.zero()
def test_svd(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-6,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
def check_mat_svd(
m3: wp.array(dtype=mat33),
Uout: wp.array(dtype=mat33),
sigmaout: wp.array(dtype=vec3),
Vout: wp.array(dtype=mat33),
outcomponents: wp.array(dtype=wptype),
):
U = mat33()
sigma = vec3()
V = mat33()
wp.svd3(m3[0], U, sigma, V)
Uout[0] = U
sigmaout[0] = sigma
Vout[0] = V
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * U[i, j]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * sigma[i]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * V[i, j]
idx = idx + 1
kernel = getkernel(check_mat_svd, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
m3 = wp.array(randvals(rng, [1, 3, 3], dtype) + np.eye(3), dtype=mat33, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 3 * 3 + 3, dtype=wptype, requires_grad=True, device=device)
Uout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
sigmaout = wp.zeros(1, dtype=vec3, requires_grad=True, device=device)
Vout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Uout, sigmaout, Vout, outcomponents], device=device)
Uout_np = Uout.numpy()[0].astype(np.float64)
sigmaout_np = np.diag(sigmaout.numpy()[0].astype(np.float64))
Vout_np = Vout.numpy()[0].astype(np.float64)
assert_np_equal(
np.matmul(Uout_np, np.matmul(sigmaout_np, Vout_np.T)), m3.numpy()[0].astype(np.float64), tol=30 * tol
)
if dtype == np.float16:
# I'm not even going to bother testing the gradients for float16
# because the rounding errors are terrible...
return
# check gradients:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for idx in range(3 * 3 + 3 + 3 * 3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Uout, sigmaout, Vout, outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(out)
m3grads = 1.0 * tape.gradients[m3].numpy()[0]
tape.zero()
dx = 0.0001
fdtol = 5.0e-4 if dtype == np.float64 else 2.0e-2
for ii in range(3):
for jj in range(3):
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] += dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Uout, sigmaout, Vout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
plusval = out.numpy()[0]
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] -= dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Uout, sigmaout, Vout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
minusval = out.numpy()[0]
assert_np_equal((plusval - minusval) / (2 * dx), m3grads[ii, jj], tol=fdtol)
def test_qr(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 2.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-6,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
def check_mat_qr(
m3: wp.array(dtype=mat33),
Qout: wp.array(dtype=mat33),
Rout: wp.array(dtype=mat33),
outcomponents: wp.array(dtype=wptype),
):
Q = mat33()
R = mat33()
wp.qr3(m3[0], Q, R)
Qout[0] = Q
Rout[0] = R
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * Q[i, j]
idx = idx + 1
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * R[i, j]
idx = idx + 1
kernel = getkernel(check_mat_qr, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
m3 = wp.array(0.5 * (randvals(rng, [1, 3, 3], dtype) + np.eye(3)), dtype=mat33, requires_grad=True, device=device)
outcomponents = wp.zeros(2 * 3 * 3, dtype=wptype, requires_grad=True, device=device)
Qout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
Rout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, Rout, outcomponents], device=device)
Qout_np = Qout.numpy()[0].astype(np.float64)
Rout_np = Rout.numpy()[0].astype(np.float64)
# check it's actually a q and an r:
assert_np_equal(np.matmul(Qout_np.T, Qout_np), np.eye(3, dtype=np.float64), tol=tol)
assert_np_equal(Rout_np[1, [0]], np.zeros(1, dtype=np.float64), tol=tol)
assert_np_equal(Rout_np[2, [0, 1]], np.zeros(2, dtype=np.float64), tol=tol)
# check it's a factorization:
assert_np_equal(np.matmul(Qout_np, Rout_np), m3.numpy()[0].astype(np.float64), tol=30 * tol)
if dtype == np.float16:
# I'm not even going to bother testing the gradients for float16
# because the rounding errors are terrible...
return
# check gradients:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for idx in range(len(outcomponents)):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, Rout, outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(out)
m3grads = 1.0 * tape.gradients[m3].numpy()[0]
tape.zero()
dx = 0.0001
fdtol = 5.0e-4 if dtype == np.float64 else 2.0e-2
for ii in range(3):
for jj in range(3):
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] += dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, Rout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
plusval = out.numpy()[0]
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] -= dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, Rout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
minusval = out.numpy()[0]
assert_np_equal((plusval - minusval) / (2 * dx), m3grads[ii, jj], tol=fdtol)
def test_eig(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 4.0e-2,
np.float32: 1.0e-5,
np.float64: 1.0e-5,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat33 = wp.types.matrix(shape=(3, 3), dtype=wptype)
def check_mat_eig(
m3: wp.array(dtype=mat33),
Qout: wp.array(dtype=mat33),
dout: wp.array(dtype=vec3),
outcomponents: wp.array(dtype=wptype),
):
Q = mat33()
d = vec3()
wp.eig3(m3[0] + wp.transpose(m3[0]), Q, d)
Qout[0] = Q
dout[0] = d
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * Q[i, j]
idx = idx + 1
for i in range(3):
outcomponents[idx] = wptype(2) * d[i]
idx = idx + 1
kernel = getkernel(check_mat_eig, suffix=dtype.__name__)
output_select_kernel = get_select_kernel(wptype)
if register_kernels:
return
m3_np = randvals(rng, [1, 3, 3], dtype) + np.eye(3, dtype=dtype)
m3 = wp.array(m3_np, dtype=mat33, requires_grad=True, device=device)
outcomponents = wp.zeros(3 * 3 + 3, dtype=wptype, requires_grad=True, device=device)
Qout = wp.zeros(1, dtype=mat33, requires_grad=True, device=device)
dout = wp.zeros(1, dtype=vec3, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, dout, outcomponents], device=device)
Qout_np = Qout.numpy()[0].astype(np.float64)
dout_np = dout.numpy()[0].astype(np.float64)
Dout_np = np.diag(dout_np)
# check Q is orthogonal:
assert_np_equal(np.matmul(Qout_np.T, Qout_np), np.eye(3), tol=tol)
# check Q contains eigenvectors:
assert_np_equal(np.matmul(Qout_np, np.matmul(Dout_np, Qout_np.T)), (m3_np[0] + m3_np[0].transpose()), tol=tol)
if dtype == np.float16:
# I'm not even going to bother testing the gradients for float16
# because the rounding errors are terrible...
return
# check gradients:
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
idx = 0
for idx in range(len(outcomponents)):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[m3], outputs=[Qout, dout, outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(out)
m3grads = 1.0 * tape.gradients[m3].numpy()[0]
tape.zero()
dx = 0.0001
fdtol = 5.0e-4 if dtype == np.float64 else 2.0e-2
for ii in range(3):
for jj in range(3):
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] += dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, dout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
plusval = out.numpy()[0]
m3test = 1.0 * m3.numpy()
m3test[0, ii, jj] -= dx
wp.launch(
kernel,
dim=1,
inputs=[wp.array(m3test, dtype=mat33, device=device)],
outputs=[Qout, dout, outcomponents],
device=device,
)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
minusval = out.numpy()[0]
assert_np_equal((plusval - minusval) / (2 * dx), m3grads[ii, jj], tol=fdtol)
def test_skew(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 1.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_skew(
v3: wp.array(dtype=vec3),
outcomponents: wp.array(dtype=wptype),
):
m3result = wp.skew(v3[0])
# multiply outputs by 2 so we've got something to backpropagate:
idx = 0
for i in range(3):
for j in range(3):
outcomponents[idx] = wptype(2) * m3result[i, j]
idx = idx + 1
kernel = getkernel(check_mat_skew, suffix=dtype.__name__)
if register_kernels:
return
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
outcomponents = wp.zeros(3 * 3, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v3], outputs=[outcomponents], device=device)
# make sure it gives you a cross product matrix:
crossprodmat = outcomponents.numpy().reshape(3, 3)
v = np.array([1, 0, 0])
assert_np_equal(
np.matmul(crossprodmat, np.array([1, 0, 0])).reshape(-1),
2 * np.cross(v3.numpy()[0], np.array([1, 0, 0])),
tol=tol,
)
assert_np_equal(
np.matmul(crossprodmat, np.array([0, 1, 0])).reshape(-1),
2 * np.cross(v3.numpy()[0], np.array([0, 1, 0])),
tol=tol,
)
assert_np_equal(
np.matmul(crossprodmat, np.array([0, 0, 1])).reshape(-1),
2 * np.cross(v3.numpy()[0], np.array([0, 0, 1])),
tol=tol,
)
# check it another way:
x0 = v3.numpy()[0, 0]
x1 = v3.numpy()[0, 1]
x2 = v3.numpy()[0, 2]
crossprodmat_expected = np.array(
[
[0, -x2, x1],
[x2, 0, -x0],
[-x1, x0, 0],
],
dtype=dtype,
)
assert_np_equal(crossprodmat, 2 * crossprodmat_expected, tol=tol)
if dtype in np_float_types:
idx = 0
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
for i in range(3):
for j in range(3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v3], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, idx], outputs=[out], device=device)
tape.backward(loss=out)
if i == j:
assert_np_equal(tape.gradients[v3].numpy()[0], np.zeros(3))
elif [i, j] == [0, 1]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, 0, -2]))
elif [i, j] == [1, 0]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, 0, 2]))
elif [i, j] == [0, 2]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, 2, 0]))
elif [i, j] == [2, 0]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([0, -2, 0]))
elif [i, j] == [1, 2]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([-2, 0, 0]))
elif [i, j] == [2, 1]:
assert_np_equal(tape.gradients[v3].numpy()[0], np.array([2, 0, 0]))
tape.zero()
idx = idx + 1
def test_transform_point(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_transform_point(
v3: wp.array(dtype=vec3),
m4: wp.array(dtype=mat44),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
presult = wptype(2) * wp.transform_point(m4[0], v3[0])
outcomponents[0] = presult[0]
outcomponents[1] = presult[1]
outcomponents[2] = presult[2]
kernel = getkernel(check_mat_transform_point, suffix=dtype.__name__)
if register_kernels:
return
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
outcomponents = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
v3homog = np.ones(4, dtype=dtype)
v3homog[:3] = v3.numpy()[0]
assert_np_equal(outcomponents.numpy(), 2 * np.matmul(m4.numpy()[0], v3homog)[:3], tol=10 * tol)
if dtype in np_float_types:
for j in range(3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, j], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(2 * m4.numpy()[0, j, :3], tape.gradients[v3].numpy(), tol=tol)
expected = np.zeros((4, 4), dtype=dtype)
expected[j, :3] = 2 * v3.numpy()
expected[j, 3] = 2
assert_np_equal(tape.gradients[m4].numpy(), expected, tol=tol)
tape.zero()
def test_transform_vector(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
tol = {
np.float16: 5.0e-3,
np.float32: 1.0e-6,
np.float64: 1.0e-8,
}.get(dtype, 0)
wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
vec3 = wp.types.vector(length=3, dtype=wptype)
mat44 = wp.types.matrix(shape=(4, 4), dtype=wptype)
output_select_kernel = get_select_kernel(wptype)
def check_mat_transform_vector(
v3: wp.array(dtype=vec3),
m4: wp.array(dtype=mat44),
outcomponents: wp.array(dtype=wptype),
):
# multiply outputs by 2 so we've got something to backpropagate:
presult = wptype(2) * wp.transform_vector(m4[0], v3[0])
outcomponents[0] = presult[0]
outcomponents[1] = presult[1]
outcomponents[2] = presult[2]
kernel = getkernel(check_mat_transform_vector, suffix=dtype.__name__)
if register_kernels:
return
v3 = wp.array(randvals(rng, [1, 3], dtype), dtype=vec3, requires_grad=True, device=device)
m4 = wp.array(randvals(rng, [1, 4, 4], dtype), dtype=mat44, requires_grad=True, device=device)
outcomponents = wp.zeros(3, dtype=wptype, requires_grad=True, device=device)
out = wp.zeros(1, dtype=wptype, requires_grad=True, device=device)
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
v3homog = np.zeros(4, dtype=dtype)
v3homog[:3] = v3.numpy()[0]
assert_np_equal(outcomponents.numpy(), 2 * np.matmul(m4.numpy()[0], v3homog)[:3], tol=10 * tol)
if dtype in np_float_types:
for j in range(3):
tape = wp.Tape()
with tape:
wp.launch(kernel, dim=1, inputs=[v3, m4], outputs=[outcomponents], device=device)
wp.launch(output_select_kernel, dim=1, inputs=[outcomponents, j], outputs=[out], device=device)
tape.backward(loss=out)
assert_np_equal(2 * m4.numpy()[0, j, :3], tape.gradients[v3].numpy(), tol=tol)
expected = np.zeros((4, 4), dtype=dtype)
expected[j, :3] = 2 * v3.numpy()
assert_np_equal(tape.gradients[m4].numpy(), expected, tol=tol)
tape.zero()
# Test matrix constructors using explicit type (float16)
# note that these tests are specifically not using generics / closure
# args to create kernels dynamically (like the rest of this file)
# as those use different code paths to resolve arg types which
# has lead to regressions.
@wp.kernel
def test_constructors_explicit_precision():
# construction for custom matrix types
eye = wp.identity(dtype=wp.float16, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=wp.float16)
custom = wp.matrix(wp.float16(0.0), wp.float16(1.0), wp.float16(2.0), wp.float16(3.0), shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], wp.float16(1.0))
else:
wp.expect_eq(eye[i, j], wp.float16(0.0))
wp.expect_eq(zeros[i, j], wp.float16(0.0))
wp.expect_eq(custom[i, j], wp.float16(i) * wp.float16(2.0) + wp.float16(j))
mat32d = wp.mat(shape=(3, 2), dtype=wp.float64)
@wp.kernel
def test_matrix_constructor_value_func():
a = wp.mat22()
b = wp.matrix(a, shape=(2, 2))
c = mat32d()
d = mat32d(c, shape=(3, 2))
e = mat32d(wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0), wp.float64(1.0), wp.float64(2.0))
f = mat32d(
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
wp.vec3d(wp.float64(1.0), wp.float64(2.0), wp.float64(3.0)),
)
# Same as above but with a default (float/int) type
# which tests some different code paths that
# need to ensure types are correctly canonicalized
# during codegen
@wp.kernel
def test_constructors_default_precision():
# construction for default (float) matrix types
eye = wp.identity(dtype=float, n=2)
zeros = wp.matrix(shape=(2, 2), dtype=float)
custom = wp.matrix(0.0, 1.0, 2.0, 3.0, shape=(2, 2))
for i in range(2):
for j in range(2):
if i == j:
wp.expect_eq(eye[i, j], 1.0)
else:
wp.expect_eq(eye[i, j], 0.0)
wp.expect_eq(zeros[i, j], 0.0)
wp.expect_eq(custom[i, j], float(i) * 2.0 + float(j))
@wp.kernel
def test_matrix_mutation(expected: wp.types.matrix(shape=(10, 3), dtype=float)):
m = wp.matrix(shape=(10, 3), dtype=float)
# test direct element indexing
m[0, 0] = 1.0
m[0, 1] = 2.0
m[0, 2] = 3.0
# The nested indexing (matrix->vector->scalar) below does not
# currently modify m because m[0] returns row vector by
# value rather than reference, this is different from NumPy
# which always returns by ref. Not clear how we can support
# this as well as auto-diff.
# m[0][1] = 2.0
# m[0][2] = 3.0
# test setting rows
for i in range(1, 10):
m[i] = m[i - 1] + wp.vec3(1.0, 2.0, 3.0)
wp.expect_eq(m, expected)
CONSTANT_SHAPE_ROWS = wp.constant(10)
CONSTANT_SHAPE_COLS = wp.constant(10)
# tests that we can use global constants in shape keyword argument
# for matrix constructor
@wp.kernel
def test_constructors_constant_shape():
m = wp.matrix(shape=(CONSTANT_SHAPE_ROWS, CONSTANT_SHAPE_COLS), dtype=float)
for i in range(CONSTANT_SHAPE_ROWS):
for j in range(CONSTANT_SHAPE_COLS):
m[i, j] = float(i * j)
devices = get_test_devices()
class TestMat(unittest.TestCase):
def test_tpl_ops_with_anon(self):
mat22f = wp.mat((2, 2), dtype=float)
m = wp.mat22f(1.0, 2.0, 3.0, 4.0)
m += mat22f(2.0, 3.0, 4.0, 5.0)
m -= mat22f(3.0, 4.0, 5.0, 6.0)
self.assertSequenceEqual(m, ((0.0, 1.0), (2.0, 3.0)))
m = mat22f(1.0, 2.0, 3.0, 4.0)
m += wp.mat22f(2.0, 3.0, 4.0, 5.0)
m -= wp.mat22f(3.0, 4.0, 5.0, 6.0)
self.assertSequenceEqual(m, ((0.0, 1.0), (2.0, 3.0)))
add_kernel_test(TestMat, test_constructors_explicit_precision, dim=1, devices=devices)
add_kernel_test(TestMat, test_constructors_default_precision, dim=1, devices=devices)
add_kernel_test(TestMat, test_constructors_constant_shape, dim=1, devices=devices)
add_kernel_test(TestMat, test_matrix_constructor_value_func, dim=1, devices=devices)
mat103 = wp.types.matrix(shape=(10, 3), dtype=float)
add_kernel_test(
TestMat,
test_matrix_mutation,
dim=1,
inputs=[
mat103(
1.0, 2.0, 3.0,
2.0, 4.0, 6.0,
3.0, 6.0, 9.0,
4.0, 8.0, 12.0,
5.0, 10.0, 15.0,
6.0, 12.0, 18.0,
7.0, 14.0, 21.0,
8.0, 16.0, 24.0,
9.0, 18.0, 27.0,
10.0, 20.0, 30.0,
)
],
devices=devices,
) # fmt: skip
for dtype in np_signed_int_types + np_float_types:
add_function_test_register_kernel(
TestMat, f"test_negation_{dtype.__name__}", test_negation, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_subtraction_{dtype.__name__}", test_subtraction, devices=devices, dtype=dtype
)
add_function_test(
TestMat,
"test_anon_constructor_error_shape_keyword_missing",
test_anon_constructor_error_shape_keyword_missing,
devices=devices,
)
add_function_test(
TestMat,
"test_anon_constructor_error_dtype_keyword_missing",
test_anon_constructor_error_dtype_keyword_missing,
devices=devices,
)
add_function_test(
TestMat,
"test_anon_constructor_error_shape_mismatch",
test_anon_constructor_error_shape_mismatch,
devices=devices,
)
add_function_test(
TestMat,
"test_anon_constructor_error_invalid_arg_count",
test_anon_constructor_error_invalid_arg_count,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_incompatible_sizes",
test_tpl_constructor_error_incompatible_sizes,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_scalar_type",
test_tpl_constructor_error_invalid_scalar_type,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_vector_count",
test_tpl_constructor_error_invalid_vector_count,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_vector_shape",
test_tpl_constructor_error_invalid_vector_shape,
devices=devices,
)
add_function_test(
TestMat,
"test_tpl_constructor_error_invalid_arg_count",
test_tpl_constructor_error_invalid_arg_count,
devices=devices,
)
for dtype in np_float_types:
add_function_test(
TestMat, f"test_py_arithmetic_ops_{dtype.__name__}", test_py_arithmetic_ops, devices=None, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_quat_constructor_{dtype.__name__}", test_quat_constructor, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_inverse_{dtype.__name__}", test_inverse, devices=devices, dtype=dtype
)
add_function_test_register_kernel(TestMat, f"test_svd_{dtype.__name__}", test_svd, devices=devices, dtype=dtype)
add_function_test_register_kernel(TestMat, f"test_qr_{dtype.__name__}", test_qr, devices=devices, dtype=dtype)
add_function_test_register_kernel(TestMat, f"test_eig_{dtype.__name__}", test_eig, devices=devices, dtype=dtype)
add_function_test_register_kernel(
TestMat, f"test_transform_point_{dtype.__name__}", test_transform_point, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_transform_vector_{dtype.__name__}", test_transform_vector, devices=devices, dtype=dtype
)
add_function_test_register_kernel(
TestMat, f"test_determinant_{dtype.__name__}", test_determinant, devices=devices, dtype=dtype
)
add_function_test_register_kernel(TestMat, f"test_skew_{dtype.__name__}", test_skew, devices=devices, dtype=dtype)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=True)
| 65,047 | Python | 35.137778 | 118 | 0.539917 |
NVIDIA/warp/warp/tests/aux_test_unresolved_func.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
@wp.kernel
def unresolved_func_kernel():
# this should trigger an exception due to unresolved function
x = wp.missing_func(42)
| 579 | Python | 37.666664 | 76 | 0.791019 |
NVIDIA/warp/warp/tests/test_bool.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
TRUE_CONSTANT = wp.constant(True)
@wp.func
def identity_function(input_bool: wp.bool, plain_bool: bool):
return input_bool and plain_bool
@wp.kernel
def identity_test(data: wp.array(dtype=wp.bool)):
i = wp.tid()
data[i] = data[i] and True
data[i] = data[i] and wp.bool(True)
data[i] = data[i] and not False
data[i] = data[i] and not wp.bool(False)
data[i] = identity_function(data[i], True)
if data[i]:
data[i] = True
else:
data[i] = False
if not data[i]:
data[i] = False
else:
data[i] = True
if data[i] and True:
data[i] = True
else:
data[i] = False
if data[i] or False:
data[i] = True
else:
data[i] = False
data[i] = wp.select(data[i], False, True)
def test_bool_identity_ops(test, device):
rng = np.random.default_rng(123)
dim_x = 10
rand_np = rng.random(dim_x) > 0.5
data_array = wp.array(data=rand_np, device=device)
test.assertEqual(data_array.dtype, wp.bool)
wp.launch(identity_test, dim=data_array.shape, inputs=[data_array], device=device)
assert_np_equal(data_array.numpy(), rand_np)
@wp.kernel
def check_compile_constant(result: wp.array(dtype=wp.bool)):
if TRUE_CONSTANT:
result[0] = TRUE_CONSTANT
else:
result[0] = False
def test_bool_constant(test, device):
compile_constant_value = wp.zeros(1, dtype=wp.bool, device=device)
wp.launch(check_compile_constant, 1, inputs=[compile_constant_value], device=device)
test.assertTrue(compile_constant_value.numpy()[0])
# Repeat the comparison with dtype=bool for the array
compile_constant_value = wp.zeros(1, dtype=bool, device=device)
wp.launch(check_compile_constant, 1, inputs=[compile_constant_value], device=device)
test.assertTrue(compile_constant_value.numpy()[0])
vec3bool = wp.vec(length=3, dtype=wp.bool)
bool_selector_vec = wp.constant(vec3bool([True, False, True]))
@wp.kernel
def sum_from_bool_vec(sum_array: wp.array(dtype=wp.int32)):
i = wp.tid()
if bool_selector_vec[0]:
sum_array[i] = sum_array[i] + 1
if bool_selector_vec[1]:
sum_array[i] = sum_array[i] + 2
if bool_selector_vec[2]:
sum_array[i] = sum_array[i] + 4
def test_bool_constant_vec(test, device):
result_array = wp.zeros(10, dtype=wp.int32, device=device)
wp.launch(sum_from_bool_vec, result_array.shape, inputs=[result_array], device=device)
assert_np_equal(result_array.numpy(), np.full(result_array.shape, 5))
mat22bool = wp.mat((2, 2), dtype=wp.bool)
bool_selector_mat = wp.constant(mat22bool([True, False, False, True]))
@wp.kernel
def sum_from_bool_mat(sum_array: wp.array(dtype=wp.int32)):
i = wp.tid()
if bool_selector_mat[0, 0]:
sum_array[i] = sum_array[i] + 1
if bool_selector_mat[0, 1]:
sum_array[i] = sum_array[i] + 2
if bool_selector_mat[1, 0]:
sum_array[i] = sum_array[i] + 4
if bool_selector_mat[1, 1]:
sum_array[i] = sum_array[i] + 8
def test_bool_constant_mat(test, device):
result_array = wp.zeros(10, dtype=wp.int32, device=device)
wp.launch(sum_from_bool_mat, result_array.shape, inputs=[result_array], device=device)
assert_np_equal(result_array.numpy(), np.full(result_array.shape, 9))
vec3bool_type = wp.types.vector(length=3, dtype=bool)
@wp.kernel
def test_bool_vec_anonymous_typing():
# Zero initialize
wp.expect_eq(vec3bool_type(), wp.vector(False, False, False))
# Scalar initialize
wp.expect_eq(vec3bool_type(True), wp.vector(True, True, True))
# Component-wise initialize
wp.expect_eq(vec3bool_type(True, False, True), wp.vector(True, False, True))
def test_bool_vec_typing(test, device):
# Zero initialize
vec3bool_z = vec3bool_type()
test.assertEqual(tuple(vec3bool_z), (False, False, False))
# Scalar initialize
vec3bool_s = vec3bool_type(True)
test.assertEqual(tuple(vec3bool_s), (True, True, True))
# Component-wise initialize
vec3bool_c = vec3bool_type(True, False, True)
test.assertEqual(tuple(vec3bool_c), (True, False, True))
wp.launch(test_bool_vec_anonymous_typing, (1,), inputs=[], device=device)
mat22bool_type = wp.types.matrix((2, 2), dtype=bool)
@wp.kernel
def test_bool_mat_anonymous_typing():
# Zero initialize
wp.expect_eq(mat22bool_type(), wp.matrix(False, False, False, False, shape=(2, 2)))
# Scalar initialize
wp.expect_eq(mat22bool_type(True), wp.matrix(True, True, True, True, shape=(2, 2)))
# Component-wise initialize
wp.expect_eq(mat22bool_type(True, False, True, False), wp.matrix(True, False, True, False, shape=(2, 2)))
def test_bool_mat_typing(test, device):
# Zero initialize
mat22bool_z = mat22bool_type()
test.assertEqual(tuple(mat22bool_z), ((False, False), (False, False)))
# Scalar initialize
mat22bool_s = mat22bool_type(True)
test.assertEqual(tuple(mat22bool_s), ((True, True), (True, True)))
# Component-wise initialize
mat22bool_c = mat22bool_type(True, False, True, False)
test.assertEqual(tuple(mat22bool_c), ((True, False), (True, False)))
wp.launch(test_bool_mat_anonymous_typing, (1,), inputs=[], device=device)
devices = get_test_devices()
class TestBool(unittest.TestCase):
pass
add_function_test(TestBool, "test_bool_identity_ops", test_bool_identity_ops, devices=devices)
add_function_test(TestBool, "test_bool_constant", test_bool_constant, devices=devices)
add_function_test(TestBool, "test_bool_constant_vec", test_bool_constant_vec, devices=devices)
add_function_test(TestBool, "test_bool_constant_mat", test_bool_constant_mat, devices=devices)
add_function_test(TestBool, "test_bool_vec_typing", test_bool_vec_typing, devices=devices)
add_function_test(TestBool, "test_bool_mat_typing", test_bool_mat_typing, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,478 | Python | 29.41784 | 109 | 0.676443 |
NVIDIA/warp/warp/tests/test_mesh_query_aabb.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
@wp.func
def min_vec3(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.min(a[0], b[0]), wp.min(a[1], b[1]), wp.min(a[2], b[2]))
@wp.func
def max_vec3(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.max(a[0], b[0]), wp.max(a[1], b[1]), wp.max(a[2], b[2]))
@wp.kernel
def compute_bounds(
indices: wp.array(dtype=int),
positions: wp.array(dtype=wp.vec3),
lowers: wp.array(dtype=wp.vec3),
uppers: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
i = indices[tid * 3 + 0]
j = indices[tid * 3 + 1]
k = indices[tid * 3 + 2]
x0 = positions[i] # point zero
x1 = positions[j] # point one
x2 = positions[k] # point two
lower = min_vec3(min_vec3(x0, x1), x2)
upper = max_vec3(max_vec3(x0, x1), x2)
lowers[tid] = lower
uppers[tid] = upper
@wp.kernel
def compute_num_contacts(
lowers: wp.array(dtype=wp.vec3), uppers: wp.array(dtype=wp.vec3), mesh_id: wp.uint64, counts: wp.array(dtype=int)
):
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
tid = wp.tid()
upper = uppers[tid]
lower = lowers[tid]
query = wp.mesh_query_aabb(mesh_id, lower, upper)
count = int(0)
# index = int(-1)
# while wp.mesh_query_aabb_next(query, index):
for _index in query:
count = count + 1
counts[tid] = count
def test_compute_bounds(test, device):
# create two touching triangles.
points = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, -1, 1]])
indices = np.array([0, 1, 2, 1, 2, 3])
m = wp.Mesh(
points=wp.array(points, dtype=wp.vec3, device=device),
indices=wp.array(indices, dtype=int, device=device),
)
num_tris = int(len(indices) / 3)
# First compute bounds of each of the triangles.
lowers = wp.empty(n=num_tris, dtype=wp.vec3, device=device)
uppers = wp.empty_like(lowers)
wp.launch(
kernel=compute_bounds,
dim=num_tris,
inputs=[m.indices, m.points],
outputs=[lowers, uppers],
device=device,
)
lower_view = lowers.numpy()
upper_view = uppers.numpy()
# Confirm the bounds of each triangle are correct.
test.assertTrue(lower_view[0][0] == 0)
test.assertTrue(lower_view[0][1] == 0)
test.assertTrue(lower_view[0][2] == 0)
test.assertTrue(upper_view[0][0] == 1)
test.assertTrue(upper_view[0][1] == 1)
test.assertTrue(upper_view[0][2] == 0)
test.assertTrue(lower_view[1][0] == -1)
test.assertTrue(lower_view[1][1] == -1)
test.assertTrue(lower_view[1][2] == 0)
test.assertTrue(upper_view[1][0] == 1)
test.assertTrue(upper_view[1][1] == 1)
test.assertTrue(upper_view[1][2] == 1)
def test_mesh_query_aabb_count_overlap(test, device):
# create two touching triangles.
points = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, -1, 1]])
indices = np.array([0, 1, 2, 1, 2, 3])
m = wp.Mesh(
points=wp.array(points, dtype=wp.vec3, device=device),
indices=wp.array(indices, dtype=int, device=device),
)
num_tris = int(len(indices) / 3)
# Compute AABB of each of the triangles.
lowers = wp.empty(n=num_tris, dtype=wp.vec3, device=device)
uppers = wp.empty_like(lowers)
wp.launch(
kernel=compute_bounds,
dim=num_tris,
inputs=[m.indices, m.points],
outputs=[lowers, uppers],
device=device,
)
counts = wp.empty(n=num_tris, dtype=int, device=device)
wp.launch(
kernel=compute_num_contacts,
dim=num_tris,
inputs=[lowers, uppers, m.id],
outputs=[counts],
device=device,
)
view = counts.numpy()
# 2 triangles that share a vertex having overlapping AABBs.
for c in view:
test.assertTrue(c == 2)
def test_mesh_query_aabb_count_nonoverlap(test, device):
# create two separate triangles.
points = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [10, 0, 0], [10, 1, 0], [10, 0, 1]])
indices = np.array([0, 1, 2, 3, 4, 5])
m = wp.Mesh(
points=wp.array(points, dtype=wp.vec3, device=device),
indices=wp.array(indices, dtype=int, device=device),
)
num_tris = int(len(indices) / 3)
lowers = wp.empty(n=num_tris, dtype=wp.vec3, device=device)
uppers = wp.empty_like(lowers)
wp.launch(
kernel=compute_bounds,
dim=num_tris,
inputs=[m.indices, m.points],
outputs=[lowers, uppers],
device=device,
)
counts = wp.empty(n=num_tris, dtype=int, device=device)
wp.launch(
kernel=compute_num_contacts,
dim=num_tris,
inputs=[lowers, uppers, m.id],
outputs=[counts],
device=device,
)
view = counts.numpy()
# AABB query only returns one triangle at a time, the triangles are not close enough to overlap.
for c in view:
test.assertTrue(c == 1)
devices = get_test_devices()
class TestMeshQueryAABBMethods(unittest.TestCase):
def test_mesh_query_aabb_codegen_adjoints_with_select(self):
def kernel_fn(
mesh: wp.uint64,
):
v = wp.vec3(0.0, 0.0, 0.0)
if True:
query = wp.mesh_query_aabb(mesh, v, v)
else:
query = wp.mesh_query_aabb(mesh, v, v)
wp.Kernel(func=kernel_fn)
add_function_test(TestMeshQueryAABBMethods, "test_compute_bounds", test_compute_bounds, devices=devices)
add_function_test(
TestMeshQueryAABBMethods, "test_mesh_query_aabb_count_overlap", test_mesh_query_aabb_count_overlap, devices=devices
)
add_function_test(
TestMeshQueryAABBMethods,
"test_mesh_query_aabb_count_nonoverlap",
test_mesh_query_aabb_count_nonoverlap,
devices=devices,
)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 6,307 | Python | 26.788546 | 119 | 0.611701 |
NVIDIA/warp/warp/tests/test_grad_customs.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
import numpy as np
import warp as wp
from warp.tests.unittest_utils import *
# atomic add function that memorizes which thread incremented the counter
# so that the correct counter value per thread can be used in the replay
# phase of the backward pass
@wp.func
def reversible_increment(
counter: wp.array(dtype=int), counter_index: int, value: int, thread_values: wp.array(dtype=int), tid: int
):
"""This is a docstring"""
next_index = wp.atomic_add(counter, counter_index, value)
thread_values[tid] = next_index
return next_index
@wp.func_replay(reversible_increment)
def replay_reversible_increment(
counter: wp.array(dtype=int), counter_index: int, value: int, thread_values: wp.array(dtype=int), tid: int
):
"""This is a docstring"""
return thread_values[tid]
def test_custom_replay_grad(test, device):
num_threads = 128
counter = wp.zeros(1, dtype=wp.int32, device=device)
thread_ids = wp.zeros(num_threads, dtype=wp.int32, device=device)
inputs = wp.array(np.arange(num_threads, dtype=np.float32), device=device, requires_grad=True)
outputs = wp.zeros_like(inputs)
@wp.kernel
def run_atomic_add(
input: wp.array(dtype=float),
counter: wp.array(dtype=int),
thread_values: wp.array(dtype=int),
output: wp.array(dtype=float),
):
tid = wp.tid()
idx = reversible_increment(counter, 0, 1, thread_values, tid)
output[idx] = input[idx] ** 2.0
tape = wp.Tape()
with tape:
wp.launch(
run_atomic_add, dim=num_threads, inputs=[inputs, counter, thread_ids], outputs=[outputs], device=device
)
tape.backward(grads={outputs: wp.ones(num_threads, dtype=wp.float32, device=device)})
assert_np_equal(inputs.grad.numpy(), 2.0 * inputs.numpy(), tol=1e-4)
@wp.func
def overload_fn(x: float, y: float):
"""This is a docstring"""
return x * 3.0 + y / 3.0, y**2.5
@wp.func_grad(overload_fn)
def overload_fn_grad(x: float, y: float, adj_ret0: float, adj_ret1: float):
"""This is a docstring"""
wp.adjoint[x] += x * adj_ret0 * 42.0 + y * adj_ret1 * 10.0
wp.adjoint[y] += y * adj_ret1 * 3.0
@wp.struct
class MyStruct:
"""This is a docstring"""
scalar: float
vec: wp.vec3
@wp.func
def overload_fn(x: MyStruct):
"""This is a docstring"""
return x.vec[0] * x.vec[1] * x.vec[2] * 4.0, wp.length(x.vec), x.scalar**0.5
@wp.func_grad(overload_fn)
def overload_fn_grad(x: MyStruct, adj_ret0: float, adj_ret1: float, adj_ret2: float):
"""This is a docstring"""
wp.adjoint[x.scalar] += x.scalar * adj_ret0 * 10.0
wp.adjoint[x.vec][0] += adj_ret0 * x.vec[1] * x.vec[2] * 20.0
wp.adjoint[x.vec][1] += adj_ret1 * x.vec[0] * x.vec[2] * 30.0
wp.adjoint[x.vec][2] += adj_ret2 * x.vec[0] * x.vec[1] * 40.0
@wp.kernel
def run_overload_float_fn(
xs: wp.array(dtype=float), ys: wp.array(dtype=float), output0: wp.array(dtype=float), output1: wp.array(dtype=float)
):
"""This is a docstring"""
i = wp.tid()
out0, out1 = overload_fn(xs[i], ys[i])
output0[i] = out0
output1[i] = out1
@wp.kernel
def run_overload_struct_fn(xs: wp.array(dtype=MyStruct), output: wp.array(dtype=float)):
i = wp.tid()
out0, out1, out2 = overload_fn(xs[i])
output[i] = out0 + out1 + out2
def test_custom_overload_grad(test, device):
dim = 3
xs_float = wp.array(np.arange(1.0, dim + 1.0), dtype=wp.float32, requires_grad=True, device=device)
ys_float = wp.array(np.arange(10.0, dim + 10.0), dtype=wp.float32, requires_grad=True, device=device)
out0_float = wp.zeros(dim, device=device)
out1_float = wp.zeros(dim, device=device)
tape = wp.Tape()
with tape:
wp.launch(
run_overload_float_fn, dim=dim, inputs=[xs_float, ys_float], outputs=[out0_float, out1_float], device=device
)
tape.backward(
grads={
out0_float: wp.ones(dim, dtype=wp.float32, device=device),
out1_float: wp.ones(dim, dtype=wp.float32, device=device),
}
)
assert_np_equal(xs_float.grad.numpy(), xs_float.numpy() * 42.0 + ys_float.numpy() * 10.0)
assert_np_equal(ys_float.grad.numpy(), ys_float.numpy() * 3.0)
x0 = MyStruct()
x0.vec = wp.vec3(1.0, 2.0, 3.0)
x0.scalar = 4.0
x1 = MyStruct()
x1.vec = wp.vec3(5.0, 6.0, 7.0)
x1.scalar = -1.0
x2 = MyStruct()
x2.vec = wp.vec3(8.0, 9.0, 10.0)
x2.scalar = 19.0
xs_struct = wp.array([x0, x1, x2], dtype=MyStruct, requires_grad=True, device=device)
out_struct = wp.zeros(dim, device=device)
tape = wp.Tape()
with tape:
wp.launch(run_overload_struct_fn, dim=dim, inputs=[xs_struct], outputs=[out_struct], device=device)
tape.backward(grads={out_struct: wp.ones(dim, dtype=wp.float32, device=device)})
xs_struct_np = xs_struct.numpy()
struct_grads = xs_struct.grad.numpy()
# fmt: off
assert_np_equal(
np.array([g[0] for g in struct_grads]),
np.array([g[0] * 10.0 for g in xs_struct_np]))
assert_np_equal(
np.array([g[1][0] for g in struct_grads]),
np.array([g[1][1] * g[1][2] * 20.0 for g in xs_struct_np]))
assert_np_equal(
np.array([g[1][1] for g in struct_grads]),
np.array([g[1][0] * g[1][2] * 30.0 for g in xs_struct_np]))
assert_np_equal(
np.array([g[1][2] for g in struct_grads]),
np.array([g[1][0] * g[1][1] * 40.0 for g in xs_struct_np]))
# fmt: on
def test_custom_import_grad(test, device):
from warp.tests.aux_test_grad_customs import aux_custom_fn
@wp.kernel
def run_defined_float_fn(
xs: wp.array(dtype=float),
ys: wp.array(dtype=float),
output0: wp.array(dtype=float),
output1: wp.array(dtype=float),
):
i = wp.tid()
out0, out1 = aux_custom_fn(xs[i], ys[i])
output0[i] = out0
output1[i] = out1
dim = 3
xs_float = wp.array(np.arange(1.0, dim + 1.0), dtype=wp.float32, requires_grad=True, device=device)
ys_float = wp.array(np.arange(10.0, dim + 10.0), dtype=wp.float32, requires_grad=True, device=device)
out0_float = wp.zeros(dim, device=device)
out1_float = wp.zeros(dim, device=device)
tape = wp.Tape()
with tape:
wp.launch(
run_defined_float_fn, dim=dim, inputs=[xs_float, ys_float], outputs=[out0_float, out1_float], device=device
)
tape.backward(
grads={
out0_float: wp.ones(dim, dtype=wp.float32, device=device),
out1_float: wp.ones(dim, dtype=wp.float32, device=device),
}
)
assert_np_equal(xs_float.grad.numpy(), xs_float.numpy() * 42.0 + ys_float.numpy() * 10.0)
assert_np_equal(ys_float.grad.numpy(), ys_float.numpy() * 3.0)
@wp.func
def sigmoid(x: float):
return 1.0 / (1.0 + wp.exp(-x))
@wp.func_grad(sigmoid)
def adj_sigmoid(x: float, adj: float):
# unused function to test that we don't run into infinite recursion when calling
# the forward function from within the gradient function
wp.adjoint[x] += adj * sigmoid(x) * (1.0 - sigmoid(x))
@wp.func
def sigmoid_no_return(i: int, xs: wp.array(dtype=float), ys: wp.array(dtype=float)):
# test function that does not return anything
ys[i] = sigmoid(xs[i])
@wp.func_grad(sigmoid_no_return)
def adj_sigmoid_no_return(i: int, xs: wp.array(dtype=float), ys: wp.array(dtype=float)):
wp.adjoint[xs][i] += ys[i] * (1.0 - ys[i])
@wp.kernel
def eval_sigmoid(xs: wp.array(dtype=float), ys: wp.array(dtype=float)):
i = wp.tid()
sigmoid_no_return(i, xs, ys)
def test_custom_grad_no_return(test, device):
xs = wp.array([1.0, 2.0, 3.0, 4.0], dtype=wp.float32, requires_grad=True, device=device)
ys = wp.zeros_like(xs, device=device)
ys.grad.fill_(1.0)
tape = wp.Tape()
with tape:
wp.launch(eval_sigmoid, dim=len(xs), inputs=[xs], outputs=[ys], device=device)
tape.backward()
sigmoids = ys.numpy()
grad = xs.grad.numpy()
assert_np_equal(grad, sigmoids * (1.0 - sigmoids))
@wp.func
def dense_gemm(
m: int,
n: int,
p: int,
transpose_A: bool,
transpose_B: bool,
add_to_C: bool,
A: wp.array(dtype=float),
B: wp.array(dtype=float),
# outputs
C: wp.array(dtype=float),
):
# this function doesn't get called but it is an important test for code generation
# multiply a `m x p` matrix A by a `p x n` matrix B to produce a `m x n` matrix C
for i in range(m):
for j in range(n):
sum = float(0.0)
for k in range(p):
if transpose_A:
a_i = k * m + i
else:
a_i = i * p + k
if transpose_B:
b_j = j * p + k
else:
b_j = k * n + j
sum += A[a_i] * B[b_j]
if add_to_C:
C[i * n + j] += sum
else:
C[i * n + j] = sum
@wp.func_grad(dense_gemm)
def adj_dense_gemm(
m: int,
n: int,
p: int,
transpose_A: bool,
transpose_B: bool,
add_to_C: bool,
A: wp.array(dtype=float),
B: wp.array(dtype=float),
# outputs
C: wp.array(dtype=float),
):
# code generation would break here if we didn't defer building the custom grad
# function until after the forward functions + kernels of the module have been built
add_to_C = True
if transpose_A:
dense_gemm(p, m, n, False, True, add_to_C, B, wp.adjoint[C], wp.adjoint[A])
dense_gemm(p, n, m, False, False, add_to_C, A, wp.adjoint[C], wp.adjoint[B])
else:
dense_gemm(m, p, n, False, not transpose_B, add_to_C, wp.adjoint[C], B, wp.adjoint[A])
dense_gemm(p, n, m, True, False, add_to_C, A, wp.adjoint[C], wp.adjoint[B])
devices = get_test_devices()
class TestGradCustoms(unittest.TestCase):
def test_wrapped_docstring(self):
self.assertTrue("This is a docstring" in reversible_increment.__doc__)
self.assertTrue("This is a docstring" in replay_reversible_increment.__doc__)
self.assertTrue("This is a docstring" in overload_fn.__doc__)
self.assertTrue("This is a docstring" in overload_fn_grad.__doc__)
self.assertTrue("This is a docstring" in run_overload_float_fn.__doc__)
self.assertTrue("This is a docstring" in MyStruct.__doc__)
add_function_test(TestGradCustoms, "test_custom_replay_grad", test_custom_replay_grad, devices=devices)
add_function_test(TestGradCustoms, "test_custom_overload_grad", test_custom_overload_grad, devices=devices)
add_function_test(TestGradCustoms, "test_custom_import_grad", test_custom_import_grad, devices=devices)
add_function_test(TestGradCustoms, "test_custom_grad_no_return", test_custom_grad_no_return, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2, failfast=False)
| 11,323 | Python | 33.108434 | 120 | 0.619271 |
NVIDIA/warp/warp/tests/test_options.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import contextlib
import io
import unittest
import warp as wp
from warp.tests.unittest_utils import *
@wp.kernel
def scale(
x: wp.array(dtype=float),
y: wp.array(dtype=float),
):
y[0] = x[0] ** 2.0
@wp.kernel(enable_backward=True)
def scale_1(
x: wp.array(dtype=float),
y: wp.array(dtype=float),
):
y[0] = x[0] ** 2.0
@wp.kernel(enable_backward=False)
def scale_2(
x: wp.array(dtype=float),
y: wp.array(dtype=float),
):
y[0] = x[0] ** 2.0
def test_options_1(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": False})
tape = wp.Tape()
with tape:
wp.launch(scale, dim=1, inputs=[x, y], device=device)
with contextlib.redirect_stdout(io.StringIO()) as f:
tape.backward(y)
expected = f"Warp UserWarning: Running the tape backwards may produce incorrect gradients because recorded kernel {scale.key} is defined in a module with the option 'enable_backward=False' set.\n"
assert f.getvalue() == expected
assert_np_equal(tape.gradients[x].numpy(), np.array(0.0))
def test_options_2(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": True})
tape = wp.Tape()
with tape:
wp.launch(scale, dim=1, inputs=[x, y], device=device)
tape.backward(y)
assert_np_equal(tape.gradients[x].numpy(), np.array(6.0))
def test_options_3(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": False})
tape = wp.Tape()
with tape:
wp.launch(scale_1, dim=1, inputs=[x, y], device=device)
tape.backward(y)
assert_np_equal(tape.gradients[x].numpy(), np.array(6.0))
def test_options_4(test, device):
x = wp.array([3.0], dtype=float, requires_grad=True, device=device)
y = wp.zeros_like(x)
wp.set_module_options({"enable_backward": True})
tape = wp.Tape()
with tape:
wp.launch(scale_2, dim=1, inputs=[x, y], device=device)
with contextlib.redirect_stdout(io.StringIO()) as f:
tape.backward(y)
expected = f"Warp UserWarning: Running the tape backwards may produce incorrect gradients because recorded kernel {scale_2.key} is configured with the option 'enable_backward=False'.\n"
assert f.getvalue() == expected
assert_np_equal(tape.gradients[x].numpy(), np.array(0.0))
devices = get_test_devices()
class TestOptions(unittest.TestCase):
pass
add_function_test(TestOptions, "test_options_1", test_options_1, devices=devices)
add_function_test(TestOptions, "test_options_2", test_options_2, devices=devices)
add_function_test(TestOptions, "test_options_3", test_options_3, devices=devices)
add_function_test(TestOptions, "test_options_4", test_options_4, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 3,485 | Python | 27.57377 | 200 | 0.681492 |
NVIDIA/warp/warp/tests/test_indexedarray.py | # Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import unittest
from typing import Any
import numpy as np
import warp as wp
from warp.tests.test_array import FillStruct
from warp.tests.unittest_utils import *
@wp.kernel
def kernel_1d(a: wp.indexedarray(dtype=float), expected: wp.array(dtype=float)):
i = wp.tid()
wp.expect_eq(a[i], expected[i])
a[i] = 2.0 * a[i]
wp.atomic_add(a, i, 1.0)
wp.expect_eq(a[i], 2.0 * expected[i] + 1.0)
def test_indexedarray_1d(test, device):
values = np.arange(10, dtype=np.float32)
arr = wp.array(data=values, device=device)
indices = wp.array([1, 3, 5, 7, 9], dtype=int, device=device)
iarr = wp.indexedarray1d(arr, [indices])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 1)
test.assertEqual(iarr.shape, (5,))
test.assertEqual(iarr.size, 5)
expected_arr = wp.array(data=[1, 3, 5, 7, 9], dtype=float, device=device)
wp.launch(kernel_1d, dim=iarr.size, inputs=[iarr, expected_arr], device=device)
@wp.kernel
def kernel_2d(a: wp.indexedarray2d(dtype=float), expected: wp.array2d(dtype=float)):
i, j = wp.tid()
# check expected values
wp.expect_eq(a[i, j], expected[i, j])
# test wp.view()
wp.expect_eq(a[i][j], a[i, j])
a[i, j] = 2.0 * a[i, j]
wp.atomic_add(a, i, j, 1.0)
wp.expect_eq(a[i, j], 2.0 * expected[i, j] + 1.0)
def test_indexedarray_2d(test, device):
values = np.arange(100, dtype=np.float32).reshape((10, 10))
arr = wp.array(data=values, device=device)
indices0 = wp.array([1, 3], dtype=int, device=device)
indices1 = wp.array([2, 4, 8], dtype=int, device=device)
iarr = wp.indexedarray2d(arr, [indices0, indices1])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 2)
test.assertEqual(iarr.shape, (2, 3))
test.assertEqual(iarr.size, 6)
expected_values = [[12, 14, 18], [32, 34, 38]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_2d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
@wp.kernel
def kernel_3d(a: wp.indexedarray3d(dtype=float), expected: wp.array3d(dtype=float)):
i, j, k = wp.tid()
# check expected values
wp.expect_eq(a[i, j, k], expected[i, j, k])
# test wp.view()
wp.expect_eq(a[i][j][k], a[i, j, k])
wp.expect_eq(a[i, j][k], a[i, j, k])
wp.expect_eq(a[i][j, k], a[i, j, k])
a[i, j, k] = 2.0 * a[i, j, k]
wp.atomic_add(a, i, j, k, 1.0)
wp.expect_eq(a[i, j, k], 2.0 * expected[i, j, k] + 1.0)
def test_indexedarray_3d(test, device):
values = np.arange(1000, dtype=np.float32).reshape((10, 10, 10))
arr = wp.array(data=values, device=device)
indices0 = wp.array([1, 3], dtype=int, device=device)
indices1 = wp.array([2, 4, 8], dtype=int, device=device)
indices2 = wp.array([0, 5], dtype=int, device=device)
iarr = wp.indexedarray3d(arr, [indices0, indices1, indices2])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 3, 2))
test.assertEqual(iarr.size, 12)
expected_values = [
[[120, 125], [140, 145], [180, 185]],
[[320, 325], [340, 345], [380, 385]],
]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
@wp.kernel
def kernel_4d(a: wp.indexedarray4d(dtype=float), expected: wp.array4d(dtype=float)):
i, j, k, l = wp.tid()
# check expected values
wp.expect_eq(a[i, j, k, l], expected[i, j, k, l])
# test wp.view()
wp.expect_eq(a[i][j][k][l], a[i, j, k, l])
wp.expect_eq(a[i][j, k, l], a[i, j, k, l])
wp.expect_eq(a[i, j][k, l], a[i, j, k, l])
wp.expect_eq(a[i, j, k][l], a[i, j, k, l])
a[i, j, k, l] = 2.0 * a[i, j, k, l]
wp.atomic_add(a, i, j, k, l, 1.0)
wp.expect_eq(a[i, j, k, l], 2.0 * expected[i, j, k, l] + 1.0)
def test_indexedarray_4d(test, device):
values = np.arange(10000, dtype=np.float32).reshape((10, 10, 10, 10))
arr = wp.array(data=values, device=device)
indices0 = wp.array([1, 3], dtype=int, device=device)
indices1 = wp.array([2, 4, 8], dtype=int, device=device)
indices2 = wp.array([0, 5], dtype=int, device=device)
indices3 = wp.array([6, 7, 9], dtype=int, device=device)
iarr = wp.indexedarray4d(arr, [indices0, indices1, indices2, indices3])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 4)
test.assertEqual(iarr.shape, (2, 3, 2, 3))
test.assertEqual(iarr.size, 36)
expected_values = [
[
[[1206, 1207, 1209], [1256, 1257, 1259]],
[[1406, 1407, 1409], [1456, 1457, 1459]],
[[1806, 1807, 1809], [1856, 1857, 1859]],
],
[
[[3206, 3207, 3209], [3256, 3257, 3259]],
[[3406, 3407, 3409], [3456, 3457, 3459]],
[[3806, 3807, 3809], [3856, 3857, 3859]],
],
]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_4d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
def test_indexedarray_mixed(test, device):
# [[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11],
# [12, 13, 14, 15]],
# [[16, 17, 18, 19],
# [20, 21, 22, 23],
# [24, 25, 26, 27],
# [28, 29, 30, 31]],
# [[32, 33, 34, 35],
# [36, 37, 38, 39],
# [40, 41, 42, 43],
# [44, 45, 46, 47],
# [[48, 49, 50, 51],
# [52, 53, 54, 55],
# [56, 57, 58, 59],
# [60, 61, 62, 63]]]]
values = np.arange(64, dtype=np.float32).reshape((4, 4, 4))
indices = wp.array([0, 3], dtype=int, device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [indices, None, None])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 4, 4))
test.assertEqual(iarr.size, 32)
expected_values = [
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]],
[[48, 49, 50, 51], [52, 53, 54, 55], [56, 57, 58, 59], [60, 61, 62, 63]],
]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [indices, indices, None])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 2, 4))
test.assertEqual(iarr.size, 16)
expected_values = [[[0, 1, 2, 3], [12, 13, 14, 15]], [[48, 49, 50, 51], [60, 61, 62, 63]]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [indices, None, indices])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (2, 4, 2))
test.assertEqual(iarr.size, 16)
expected_values = [[[0, 3], [4, 7], [8, 11], [12, 15]], [[48, 51], [52, 55], [56, 59], [60, 63]]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
# -----
arr = wp.array(data=values, device=device)
iarr = wp.indexedarray(arr, [None, indices, indices])
test.assertEqual(iarr.dtype, arr.dtype)
test.assertEqual(iarr.ndim, 3)
test.assertEqual(iarr.shape, (4, 2, 2))
test.assertEqual(iarr.size, 16)
expected_values = [[[0, 3], [12, 15]], [[16, 19], [28, 31]], [[32, 35], [44, 47]], [[48, 51], [60, 63]]]
expected_arr = wp.array(data=expected_values, dtype=float, device=device)
wp.launch(kernel_3d, dim=iarr.shape, inputs=[iarr, expected_arr], device=device)
vec2i = wp.types.vector(length=2, dtype=wp.int32)
vec3i = wp.types.vector(length=3, dtype=wp.int32)
vec4i = wp.types.vector(length=4, dtype=wp.int32)
@wp.kernel
def shape_kernel_1d(arr: wp.indexedarray1d(dtype=float), expected: int):
wp.expect_eq(arr.shape[0], expected)
@wp.kernel
def shape_kernel_2d(arr: wp.indexedarray2d(dtype=float), expected: vec2i):
wp.expect_eq(arr.shape[0], expected[0])
wp.expect_eq(arr.shape[1], expected[1])
# 1d slice
view = arr[0]
wp.expect_eq(view.shape[0], expected[1])
@wp.kernel
def shape_kernel_3d(arr: wp.indexedarray3d(dtype=float), expected: vec3i):
wp.expect_eq(arr.shape[0], expected[0])
wp.expect_eq(arr.shape[1], expected[1])
wp.expect_eq(arr.shape[2], expected[2])
# 2d slice
view2 = arr[0]
wp.expect_eq(view2.shape[0], expected[1])
wp.expect_eq(view2.shape[1], expected[2])
# 1d slice
view1 = arr[0, 0]
wp.expect_eq(view1.shape[0], expected[2])
@wp.kernel
def shape_kernel_4d(arr: wp.indexedarray4d(dtype=float), expected: vec4i):
wp.expect_eq(arr.shape[0], expected[0])
wp.expect_eq(arr.shape[1], expected[1])
wp.expect_eq(arr.shape[2], expected[2])
wp.expect_eq(arr.shape[3], expected[3])
# 3d slice
view3 = arr[0]
wp.expect_eq(view3.shape[0], expected[1])
wp.expect_eq(view3.shape[1], expected[2])
wp.expect_eq(view3.shape[2], expected[3])
# 2d slice
view2 = arr[0, 0]
wp.expect_eq(view2.shape[0], expected[2])
wp.expect_eq(view2.shape[1], expected[3])
# 1d slice
view1 = arr[0, 0, 0]
wp.expect_eq(view1.shape[0], expected[3])
def test_indexedarray_shape(test, device):
with wp.ScopedDevice(device):
data1 = wp.zeros(10, dtype=float)
data2 = wp.zeros((10, 20), dtype=float)
data3 = wp.zeros((10, 20, 30), dtype=float)
data4 = wp.zeros((10, 20, 30, 40), dtype=float)
indices1 = wp.array(data=[2, 7], dtype=int)
indices2 = wp.array(data=[2, 7, 12, 17], dtype=int)
indices3 = wp.array(data=[2, 7, 12, 17, 22, 27], dtype=int)
indices4 = wp.array(data=[2, 7, 12, 17, 22, 27, 32, 37], dtype=int)
ia1 = wp.indexedarray(data1, [indices1])
wp.launch(shape_kernel_1d, dim=1, inputs=[ia1, 2])
ia2_1 = wp.indexedarray(data2, [indices1, None])
ia2_2 = wp.indexedarray(data2, [None, indices2])
ia2_3 = wp.indexedarray(data2, [indices1, indices2])
wp.launch(shape_kernel_2d, dim=1, inputs=[ia2_1, vec2i(2, 20)])
wp.launch(shape_kernel_2d, dim=1, inputs=[ia2_2, vec2i(10, 4)])
wp.launch(shape_kernel_2d, dim=1, inputs=[ia2_3, vec2i(2, 4)])
ia3_1 = wp.indexedarray(data3, [indices1, None, None])
ia3_2 = wp.indexedarray(data3, [None, indices2, None])
ia3_3 = wp.indexedarray(data3, [None, None, indices3])
ia3_4 = wp.indexedarray(data3, [indices1, indices2, None])
ia3_5 = wp.indexedarray(data3, [indices1, None, indices3])
ia3_6 = wp.indexedarray(data3, [None, indices2, indices3])
ia3_7 = wp.indexedarray(data3, [indices1, indices2, indices3])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_1, vec3i(2, 20, 30)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_2, vec3i(10, 4, 30)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_3, vec3i(10, 20, 6)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_4, vec3i(2, 4, 30)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_5, vec3i(2, 20, 6)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_6, vec3i(10, 4, 6)])
wp.launch(shape_kernel_3d, dim=1, inputs=[ia3_7, vec3i(2, 4, 6)])
ia4_1 = wp.indexedarray(data4, [indices1, None, None, None])
ia4_2 = wp.indexedarray(data4, [indices1, None, None, indices4])
ia4_3 = wp.indexedarray(data4, [None, indices2, indices3, None])
ia4_4 = wp.indexedarray(data4, [indices1, indices2, indices3, indices4])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_1, vec4i(2, 20, 30, 40)])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_2, vec4i(2, 20, 30, 8)])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_3, vec4i(10, 4, 6, 40)])
wp.launch(shape_kernel_4d, dim=1, inputs=[ia4_4, vec4i(2, 4, 6, 8)])
wp.synchronize_device(device)
def test_indexedarray_getitem(test, device):
with wp.ScopedDevice(device):
data = wp.array(data=np.arange(1000, dtype=np.int32).reshape((10, 10, 10)))
I = wp.array(data=[0, 1, 2], dtype=int)
# use constructor
a1 = wp.indexedarray(data, [None, None, I])
a2 = wp.indexedarray(data, [None, I])
a3 = wp.indexedarray(data, [None, I, I])
a4 = wp.indexedarray(data, [I])
a5 = wp.indexedarray(data, [I, None, I])
a6 = wp.indexedarray(data, [I, I])
a7 = wp.indexedarray(data, [I, I, I])
# use array.__getitem__()
b1 = data[:, :, I]
b2 = data[:, I]
b3 = data[:, I, I]
b4 = data[I]
b5 = data[I, :, I]
b6 = data[I, I]
b7 = data[I, I, I]
test.assertEqual(type(a1), type(b1))
test.assertEqual(type(a2), type(b2))
test.assertEqual(type(a3), type(b3))
test.assertEqual(type(a4), type(b4))
test.assertEqual(type(a5), type(b5))
test.assertEqual(type(a6), type(b6))
test.assertEqual(type(a7), type(b7))
assert_np_equal(a1.numpy(), b1.numpy())
assert_np_equal(a2.numpy(), b2.numpy())
assert_np_equal(a3.numpy(), b3.numpy())
assert_np_equal(a4.numpy(), b4.numpy())
assert_np_equal(a5.numpy(), b5.numpy())
assert_np_equal(a6.numpy(), b6.numpy())
assert_np_equal(a7.numpy(), b7.numpy())
def test_indexedarray_slicing(test, device):
with wp.ScopedDevice(device):
data = wp.array(data=np.arange(1000, dtype=np.int32).reshape((10, 10, 10)))
# test equivalence of slicing and indexing the same range
s = slice(0, 3)
I = wp.array(data=[0, 1, 2], dtype=int)
a0 = data[s, s, s]
test.assertEqual(type(a0), wp.array)
a1 = data[s, s, I]
test.assertEqual(type(a1), wp.indexedarray)
a2 = data[s, I, s]
test.assertEqual(type(a2), wp.indexedarray)
a3 = data[s, I, I]
test.assertEqual(type(a3), wp.indexedarray)
a4 = data[I, s, s]
test.assertEqual(type(a4), wp.indexedarray)
a5 = data[I, s, I]
test.assertEqual(type(a5), wp.indexedarray)
a6 = data[I, I, s]
test.assertEqual(type(a6), wp.indexedarray)
a7 = data[I, I, I]
test.assertEqual(type(a7), wp.indexedarray)
expected = a0.numpy()
assert_np_equal(a1.numpy(), expected)
assert_np_equal(a2.numpy(), expected)
assert_np_equal(a3.numpy(), expected)
assert_np_equal(a4.numpy(), expected)
assert_np_equal(a5.numpy(), expected)
assert_np_equal(a6.numpy(), expected)
assert_np_equal(a7.numpy(), expected)
# generic increment kernels that work with any array (regular or indexed)
@wp.kernel
def inc_1d(a: Any):
i = wp.tid()
a[i] = a[i] + 1
@wp.kernel
def inc_2d(a: Any):
i, j = wp.tid()
a[i, j] = a[i, j] + 1
@wp.kernel
def inc_3d(a: Any):
i, j, k = wp.tid()
a[i, j, k] = a[i, j, k] + 1
@wp.kernel
def inc_4d(a: Any):
i, j, k, l = wp.tid()
a[i, j, k, l] = a[i, j, k, l] + 1
# optional overloads to avoid module reloading
wp.overload(inc_1d, [wp.array1d(dtype=int)])
wp.overload(inc_2d, [wp.array2d(dtype=int)])
wp.overload(inc_3d, [wp.array3d(dtype=int)])
wp.overload(inc_4d, [wp.array4d(dtype=int)])
wp.overload(inc_1d, [wp.indexedarray1d(dtype=int)])
wp.overload(inc_2d, [wp.indexedarray2d(dtype=int)])
wp.overload(inc_3d, [wp.indexedarray3d(dtype=int)])
wp.overload(inc_4d, [wp.indexedarray4d(dtype=int)])
def test_indexedarray_generics(test, device):
with wp.ScopedDevice(device):
data1 = wp.zeros((5,), dtype=int)
data2 = wp.zeros((5, 5), dtype=int)
data3 = wp.zeros((5, 5, 5), dtype=int)
data4 = wp.zeros((5, 5, 5, 5), dtype=int)
indices = wp.array(data=[0, 4], dtype=int)
ia1 = wp.indexedarray(data1, [indices])
ia2 = wp.indexedarray(data2, [indices, indices])
ia3 = wp.indexedarray(data3, [indices, indices, indices])
ia4 = wp.indexedarray(data4, [indices, indices, indices, indices])
wp.launch(inc_1d, dim=data1.shape, inputs=[data1])
wp.launch(inc_2d, dim=data2.shape, inputs=[data2])
wp.launch(inc_3d, dim=data3.shape, inputs=[data3])
wp.launch(inc_4d, dim=data4.shape, inputs=[data4])
wp.launch(inc_1d, dim=ia1.shape, inputs=[ia1])
wp.launch(inc_2d, dim=ia2.shape, inputs=[ia2])
wp.launch(inc_3d, dim=ia3.shape, inputs=[ia3])
wp.launch(inc_4d, dim=ia4.shape, inputs=[ia4])
expected1 = np.ones(5, dtype=np.int32)
expected1[0] = 2
expected1[4] = 2
expected2 = np.ones((5, 5), dtype=np.int32)
expected2[0, 0] = 2
expected2[0, 4] = 2
expected2[4, 0] = 2
expected2[4, 4] = 2
expected3 = np.ones((5, 5, 5), dtype=np.int32)
expected3[0, 0, 0] = 2
expected3[0, 0, 4] = 2
expected3[0, 4, 0] = 2
expected3[0, 4, 4] = 2
expected3[4, 0, 0] = 2
expected3[4, 0, 4] = 2
expected3[4, 4, 0] = 2
expected3[4, 4, 4] = 2
expected4 = np.ones((5, 5, 5, 5), dtype=np.int32)
expected4[0, 0, 0, 0] = 2
expected4[0, 0, 0, 4] = 2
expected4[0, 0, 4, 0] = 2
expected4[0, 0, 4, 4] = 2
expected4[0, 4, 0, 0] = 2
expected4[0, 4, 0, 4] = 2
expected4[0, 4, 4, 0] = 2
expected4[0, 4, 4, 4] = 2
expected4[4, 0, 0, 0] = 2
expected4[4, 0, 0, 4] = 2
expected4[4, 0, 4, 0] = 2
expected4[4, 0, 4, 4] = 2
expected4[4, 4, 0, 0] = 2
expected4[4, 4, 0, 4] = 2
expected4[4, 4, 4, 0] = 2
expected4[4, 4, 4, 4] = 2
assert_np_equal(data1.numpy(), expected1)
assert_np_equal(data2.numpy(), expected2)
assert_np_equal(data3.numpy(), expected3)
assert_np_equal(data4.numpy(), expected4)
assert_np_equal(ia1.numpy(), np.full((2,), 2, dtype=np.int32))
assert_np_equal(ia2.numpy(), np.full((2, 2), 2, dtype=np.int32))
assert_np_equal(ia3.numpy(), np.full((2, 2, 2), 2, dtype=np.int32))
assert_np_equal(ia4.numpy(), np.full((2, 2, 2, 2), 2, dtype=np.int32))
def test_indexedarray_empty(test, device):
# Test whether common operations work with empty (zero-sized) indexed arrays
# without throwing exceptions.
def test_empty_ops(ndim, nrows, ncols, wptype, nptype):
data_shape = (1,) * ndim
dtype_shape = ()
if wptype in wp.types.scalar_types:
# scalar, vector, or matrix
if ncols > 0:
if nrows > 0:
wptype = wp.types.matrix((nrows, ncols), wptype)
else:
wptype = wp.types.vector(ncols, wptype)
dtype_shape = wptype._shape_
fill_value = wptype(42)
else:
# struct
fill_value = wptype()
# create a data array
data = wp.empty(data_shape, dtype=wptype, device=device, requires_grad=True)
# create a zero-sized array of indices
indices = wp.empty(0, dtype=int, device=device)
a = data[indices]
# we expect dim to be zero for the empty indexed array, unchanged otherwise
expected_shape = (0, *data_shape[1:])
test.assertEqual(a.size, 0)
test.assertEqual(a.shape, expected_shape)
# all of these methods should succeed with zero-sized arrays
a.zero_()
a.fill_(fill_value)
b = a.contiguous()
b = wp.empty_like(a)
b = wp.zeros_like(a)
b = wp.full_like(a, fill_value)
b = wp.clone(a)
wp.copy(a, b)
a.assign(b)
na = a.numpy()
test.assertEqual(na.size, 0)
test.assertEqual(na.shape, (*expected_shape, *dtype_shape))
test.assertEqual(na.dtype, nptype)
test.assertEqual(a.list(), [])
for ndim in range(1, 5):
# test with scalars, vectors, and matrices
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# scalars
test_empty_ops(ndim, 0, 0, wptype, nptype)
for ncols in [2, 3, 4, 5]:
# vectors
test_empty_ops(ndim, 0, ncols, wptype, nptype)
# square matrices
test_empty_ops(ndim, ncols, ncols, wptype, nptype)
# non-square matrices
test_empty_ops(ndim, 2, 3, wptype, nptype)
test_empty_ops(ndim, 3, 2, wptype, nptype)
test_empty_ops(ndim, 3, 4, wptype, nptype)
test_empty_ops(ndim, 4, 3, wptype, nptype)
# test with structs
test_empty_ops(ndim, 0, 0, FillStruct, FillStruct.numpy_dtype())
def test_indexedarray_fill_scalar(test, device):
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
data1 = wp.zeros(dim_x, dtype=wptype, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=wptype, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=wptype, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=wptype, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
# fill with int value
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value, dtype=nptype))
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
if wptype in wp.types.float_types:
# fill with float value
fill_value = 13.37
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value, dtype=nptype))
# fill with Warp scalar value
fill_value = wptype(17)
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full(a1.shape, fill_value.value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full(a2.shape, fill_value.value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full(a3.shape, fill_value.value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full(a4.shape, fill_value.value, dtype=nptype))
def test_indexedarray_fill_vector(test, device):
# test filling a vector array with scalar or vector values (vec_type, list, or numpy array)
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# vector types
vector_types = [
wp.types.vector(2, wptype),
wp.types.vector(3, wptype),
wp.types.vector(4, wptype),
wp.types.vector(5, wptype),
]
for vec_type in vector_types:
vec_len = vec_type._length_
data1 = wp.zeros(dim_x, dtype=vec_type, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=vec_type, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=vec_type, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=vec_type, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, vec_len), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, vec_len), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, vec_len), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, vec_len), dtype=nptype))
# fill with int scalar
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, vec_len), fill_value, dtype=nptype))
# test zeroing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, vec_len), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, vec_len), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, vec_len), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, vec_len), dtype=nptype))
# vector values can be passed as a list, numpy array, or Warp vector instance
fill_list = [17, 42, 99, 101, 127][:vec_len]
fill_arr = np.array(fill_list, dtype=nptype)
fill_vec = vec_type(fill_list)
expected1 = np.tile(fill_arr, a1.size).reshape((*a1.shape, vec_len))
expected2 = np.tile(fill_arr, a2.size).reshape((*a2.shape, vec_len))
expected3 = np.tile(fill_arr, a3.size).reshape((*a3.shape, vec_len))
expected4 = np.tile(fill_arr, a4.size).reshape((*a4.shape, vec_len))
# fill with list of vector length
a1.fill_(fill_list)
a2.fill_(fill_list)
a3.fill_(fill_list)
a4.fill_(fill_list)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with numpy array of vector length
a1.fill_(fill_arr)
a2.fill_(fill_arr)
a3.fill_(fill_arr)
a4.fill_(fill_arr)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with vec instance
a1.fill_(fill_vec)
a2.fill_(fill_vec)
a3.fill_(fill_vec)
a4.fill_(fill_vec)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
if wptype in wp.types.float_types:
# fill with float scalar
fill_value = 13.37
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, vec_len), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, vec_len), fill_value, dtype=nptype))
# fill with float list of vector length
fill_list = [-2.5, -1.25, 1.25, 2.5, 5.0][:vec_len]
a1.fill_(fill_list)
a2.fill_(fill_list)
a3.fill_(fill_list)
a4.fill_(fill_list)
expected1 = np.tile(np.array(fill_list, dtype=nptype), a1.size).reshape((*a1.shape, vec_len))
expected2 = np.tile(np.array(fill_list, dtype=nptype), a2.size).reshape((*a2.shape, vec_len))
expected3 = np.tile(np.array(fill_list, dtype=nptype), a3.size).reshape((*a3.shape, vec_len))
expected4 = np.tile(np.array(fill_list, dtype=nptype), a4.size).reshape((*a4.shape, vec_len))
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
def test_indexedarray_fill_matrix(test, device):
# test filling a matrix array with scalar or matrix values (mat_type, nested list, or 2d numpy array)
dim_x = 4
for nptype, wptype in wp.types.np_dtype_to_warp_type.items():
# matrix types
matrix_types = [
# square matrices
wp.types.matrix((2, 2), wptype),
wp.types.matrix((3, 3), wptype),
wp.types.matrix((4, 4), wptype),
wp.types.matrix((5, 5), wptype),
# non-square matrices
wp.types.matrix((2, 3), wptype),
wp.types.matrix((3, 2), wptype),
wp.types.matrix((3, 4), wptype),
wp.types.matrix((4, 3), wptype),
]
for mat_type in matrix_types:
mat_len = mat_type._length_
mat_shape = mat_type._shape_
data1 = wp.zeros(dim_x, dtype=mat_type, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=mat_type, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=mat_type, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=mat_type, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, *mat_shape), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, *mat_shape), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, *mat_shape), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, *mat_shape), dtype=nptype))
# fill with scalar
fill_value = 42
a1.fill_(fill_value)
a2.fill_(fill_value)
a3.fill_(fill_value)
a4.fill_(fill_value)
assert_np_equal(a1.numpy(), np.full((*a1.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a2.numpy(), np.full((*a2.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a3.numpy(), np.full((*a3.shape, *mat_shape), fill_value, dtype=nptype))
assert_np_equal(a4.numpy(), np.full((*a4.shape, *mat_shape), fill_value, dtype=nptype))
# test zeroing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros((*a1.shape, *mat_shape), dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros((*a2.shape, *mat_shape), dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros((*a3.shape, *mat_shape), dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros((*a4.shape, *mat_shape), dtype=nptype))
# matrix values can be passed as a 1d numpy array, 2d numpy array, flat list, nested list, or Warp matrix instance
if wptype != wp.bool:
fill_arr1 = np.arange(mat_len, dtype=nptype)
else:
fill_arr1 = np.ones(mat_len, dtype=nptype)
fill_arr2 = fill_arr1.reshape(mat_shape)
fill_list1 = list(fill_arr1)
fill_list2 = [list(row) for row in fill_arr2]
fill_mat = mat_type(fill_arr1)
expected1 = np.tile(fill_arr1, a1.size).reshape((*a1.shape, *mat_shape))
expected2 = np.tile(fill_arr1, a2.size).reshape((*a2.shape, *mat_shape))
expected3 = np.tile(fill_arr1, a3.size).reshape((*a3.shape, *mat_shape))
expected4 = np.tile(fill_arr1, a4.size).reshape((*a4.shape, *mat_shape))
# fill with 1d numpy array
a1.fill_(fill_arr1)
a2.fill_(fill_arr1)
a3.fill_(fill_arr1)
a4.fill_(fill_arr1)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with 2d numpy array
a1.fill_(fill_arr2)
a2.fill_(fill_arr2)
a3.fill_(fill_arr2)
a4.fill_(fill_arr2)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with flat list
a1.fill_(fill_list1)
a2.fill_(fill_list1)
a3.fill_(fill_list1)
a4.fill_(fill_list1)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with nested list
a1.fill_(fill_list2)
a2.fill_(fill_list2)
a3.fill_(fill_list2)
a4.fill_(fill_list2)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# clear
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
# fill with mat instance
a1.fill_(fill_mat)
a2.fill_(fill_mat)
a3.fill_(fill_mat)
a4.fill_(fill_mat)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
def test_indexedarray_fill_struct(test, device):
dim_x = 8
nptype = FillStruct.numpy_dtype()
data1 = wp.zeros(dim_x, dtype=FillStruct, device=device)
data2 = wp.zeros((dim_x, dim_x), dtype=FillStruct, device=device)
data3 = wp.zeros((dim_x, dim_x, dim_x), dtype=FillStruct, device=device)
data4 = wp.zeros((dim_x, dim_x, dim_x, dim_x), dtype=FillStruct, device=device)
indices = wp.array(np.arange(0, dim_x, 2, dtype=np.int32), device=device)
a1 = data1[indices]
a2 = data2[indices]
a3 = data3[indices]
a4 = data4[indices]
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
s = FillStruct()
# fill with default struct value (should be all zeros)
a1.fill_(s)
a2.fill_(s)
a3.fill_(s)
a4.fill_(s)
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
# scalars
s.i1 = -17
s.i2 = 42
s.i4 = 99
s.i8 = 101
s.f2 = -1.25
s.f4 = 13.37
s.f8 = 0.125
# vectors
s.v2 = [21, 22]
s.v3 = [31, 32, 33]
s.v4 = [41, 42, 43, 44]
s.v5 = [51, 52, 53, 54, 55]
# matrices
s.m2 = [[61, 62]] * 2
s.m3 = [[71, 72, 73]] * 3
s.m4 = [[81, 82, 83, 84]] * 4
s.m5 = [[91, 92, 93, 94, 95]] * 5
# arrays
s.a1 = wp.zeros((2,) * 1, dtype=float, device=device)
s.a2 = wp.zeros((2,) * 2, dtype=float, device=device)
s.a3 = wp.zeros((2,) * 3, dtype=float, device=device)
s.a4 = wp.zeros((2,) * 4, dtype=float, device=device)
# fill with custom struct value
a1.fill_(s)
a2.fill_(s)
a3.fill_(s)
a4.fill_(s)
ns = s.numpy_value()
expected1 = np.empty(a1.shape, dtype=nptype)
expected2 = np.empty(a2.shape, dtype=nptype)
expected3 = np.empty(a3.shape, dtype=nptype)
expected4 = np.empty(a4.shape, dtype=nptype)
expected1.fill(ns)
expected2.fill(ns)
expected3.fill(ns)
expected4.fill(ns)
assert_np_equal(a1.numpy(), expected1)
assert_np_equal(a2.numpy(), expected2)
assert_np_equal(a3.numpy(), expected3)
assert_np_equal(a4.numpy(), expected4)
# test clearing
a1.zero_()
a2.zero_()
a3.zero_()
a4.zero_()
assert_np_equal(a1.numpy(), np.zeros(a1.shape, dtype=nptype))
assert_np_equal(a2.numpy(), np.zeros(a2.shape, dtype=nptype))
assert_np_equal(a3.numpy(), np.zeros(a3.shape, dtype=nptype))
assert_np_equal(a4.numpy(), np.zeros(a4.shape, dtype=nptype))
devices = get_test_devices()
class TestIndexedArray(unittest.TestCase):
pass
add_function_test(TestIndexedArray, "test_indexedarray_1d", test_indexedarray_1d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_2d", test_indexedarray_2d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_3d", test_indexedarray_3d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_4d", test_indexedarray_4d, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_mixed", test_indexedarray_mixed, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_shape", test_indexedarray_shape, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_getitem", test_indexedarray_getitem, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_slicing", test_indexedarray_slicing, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_generics", test_indexedarray_generics, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_empty", test_indexedarray_empty, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_scalar", test_indexedarray_fill_scalar, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_vector", test_indexedarray_fill_vector, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_matrix", test_indexedarray_fill_matrix, devices=devices)
add_function_test(TestIndexedArray, "test_indexedarray_fill_struct", test_indexedarray_fill_struct, devices=devices)
if __name__ == "__main__":
wp.build.clear_kernel_cache()
unittest.main(verbosity=2)
| 40,800 | Python | 35.011474 | 126 | 0.579044 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.